github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/drivers/docker/driver_test.go (about)

     1  package docker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"math/rand"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"sort"
    13  	"strings"
    14  	"syscall"
    15  	"testing"
    16  	"time"
    17  
    18  	docker "github.com/fsouza/go-dockerclient"
    19  	hclog "github.com/hashicorp/go-hclog"
    20  	"github.com/hashicorp/nomad/client/taskenv"
    21  	"github.com/hashicorp/nomad/client/testutil"
    22  	"github.com/hashicorp/nomad/helper/freeport"
    23  	"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
    24  	"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
    25  	"github.com/hashicorp/nomad/helper/pluginutils/loader"
    26  	"github.com/hashicorp/nomad/helper/testlog"
    27  	"github.com/hashicorp/nomad/helper/uuid"
    28  	"github.com/hashicorp/nomad/nomad/structs"
    29  	"github.com/hashicorp/nomad/plugins/base"
    30  	"github.com/hashicorp/nomad/plugins/drivers"
    31  	dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
    32  	tu "github.com/hashicorp/nomad/testutil"
    33  	"github.com/stretchr/testify/assert"
    34  	"github.com/stretchr/testify/require"
    35  )
    36  
    37  var (
    38  	basicResources = &drivers.Resources{
    39  		NomadResources: &structs.AllocatedTaskResources{
    40  			Memory: structs.AllocatedMemoryResources{
    41  				MemoryMB: 256,
    42  			},
    43  			Cpu: structs.AllocatedCpuResources{
    44  				CpuShares: 250,
    45  			},
    46  		},
    47  		LinuxResources: &drivers.LinuxResources{
    48  			CPUShares:        512,
    49  			MemoryLimitBytes: 256 * 1024 * 1024,
    50  		},
    51  	}
    52  )
    53  
    54  func dockerIsRemote(t *testing.T) bool {
    55  	client, err := docker.NewClientFromEnv()
    56  	if err != nil {
    57  		return false
    58  	}
    59  
    60  	// Technically this could be a local tcp socket but for testing purposes
    61  	// we'll just assume that tcp is only used for remote connections.
    62  	if client.Endpoint()[0:3] == "tcp" {
    63  		return true
    64  	}
    65  	return false
    66  }
    67  
    68  var (
    69  	// busyboxLongRunningCmd is a busybox command that runs indefinitely, and
    70  	// ideally responds to SIGINT/SIGTERM.  Sadly, busybox:1.29.3 /bin/sleep doesn't.
    71  	busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
    72  )
    73  
    74  // Returns a task with a reserved and dynamic port. The ports are returned
    75  // respectively, and should be reclaimed with freeport.Return at the end of a test.
    76  func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
    77  	ports := freeport.MustTake(2)
    78  	dockerReserved := ports[0]
    79  	dockerDynamic := ports[1]
    80  
    81  	cfg := newTaskConfig("", busyboxLongRunningCmd)
    82  	task := &drivers.TaskConfig{
    83  		ID:      uuid.Generate(),
    84  		Name:    "redis-demo",
    85  		AllocID: uuid.Generate(),
    86  		Env: map[string]string{
    87  			"test": t.Name(),
    88  		},
    89  		DeviceEnv: make(map[string]string),
    90  		Resources: &drivers.Resources{
    91  			NomadResources: &structs.AllocatedTaskResources{
    92  				Memory: structs.AllocatedMemoryResources{
    93  					MemoryMB: 256,
    94  				},
    95  				Cpu: structs.AllocatedCpuResources{
    96  					CpuShares: 512,
    97  				},
    98  				Networks: []*structs.NetworkResource{
    99  					{
   100  						IP:            "127.0.0.1",
   101  						ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}},
   102  						DynamicPorts:  []structs.Port{{Label: "REDIS", Value: dockerDynamic}},
   103  					},
   104  				},
   105  			},
   106  			LinuxResources: &drivers.LinuxResources{
   107  				CPUShares:        512,
   108  				MemoryLimitBytes: 256 * 1024 * 1024,
   109  				PercentTicks:     float64(512) / float64(4096),
   110  			},
   111  		},
   112  	}
   113  
   114  	require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
   115  
   116  	return task, &cfg, ports
   117  }
   118  
   119  // dockerSetup does all of the basic setup you need to get a running docker
   120  // process up and running for testing. Use like:
   121  //
   122  //	task := taskTemplate()
   123  //	// do custom task configuration
   124  //	client, handle, cleanup := dockerSetup(t, task, nil)
   125  //	defer cleanup()
   126  //	// do test stuff
   127  //
   128  // If there is a problem during setup this function will abort or skip the test
   129  // and indicate the reason.
   130  func dockerSetup(t *testing.T, task *drivers.TaskConfig, driverCfg map[string]interface{}) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) {
   131  	client := newTestDockerClient(t)
   132  	driver := dockerDriverHarness(t, driverCfg)
   133  	cleanup := driver.MkAllocDir(task, true)
   134  
   135  	copyImage(t, task.TaskDir(), "busybox.tar")
   136  	_, _, err := driver.StartTask(task)
   137  	require.NoError(t, err)
   138  
   139  	dockerDriver, ok := driver.Impl().(*Driver)
   140  	require.True(t, ok)
   141  	handle, ok := dockerDriver.tasks.Get(task.ID)
   142  	require.True(t, ok)
   143  
   144  	return client, driver, handle, func() {
   145  		driver.DestroyTask(task.ID, true)
   146  		cleanup()
   147  	}
   148  }
   149  
   150  // cleanSlate removes the specified docker image, including potentially stopping/removing any
   151  // containers based on that image. This is used to decouple tests that would be coupled
   152  // by using the same container image.
   153  func cleanSlate(client *docker.Client, imageID string) {
   154  	if img, _ := client.InspectImage(imageID); img == nil {
   155  		return
   156  	}
   157  	containers, _ := client.ListContainers(docker.ListContainersOptions{
   158  		All: true,
   159  		Filters: map[string][]string{
   160  			"ancestor": {imageID},
   161  		},
   162  	})
   163  	for _, c := range containers {
   164  		client.RemoveContainer(docker.RemoveContainerOptions{
   165  			Force: true,
   166  			ID:    c.ID,
   167  		})
   168  	}
   169  	client.RemoveImageExtended(imageID, docker.RemoveImageOptions{
   170  		Force: true,
   171  	})
   172  	return
   173  }
   174  
   175  // dockerDriverHarness wires up everything needed to launch a task with a docker driver.
   176  // A driver plugin interface and cleanup function is returned
   177  func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness {
   178  	logger := testlog.HCLogger(t)
   179  	ctx, cancel := context.WithCancel(context.Background())
   180  	t.Cleanup(func() { cancel() })
   181  	harness := dtestutil.NewDriverHarness(t, NewDockerDriver(ctx, logger))
   182  	if cfg == nil {
   183  		cfg = map[string]interface{}{
   184  			"gc": map[string]interface{}{
   185  				"image":       false,
   186  				"image_delay": "1s",
   187  			},
   188  		}
   189  	}
   190  	plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{
   191  		Logger:            logger,
   192  		PluginDir:         "./plugins",
   193  		SupportedVersions: loader.AgentSupportedApiVersions,
   194  		InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{
   195  			PluginID: {
   196  				Config: cfg,
   197  				Factory: func(context.Context, hclog.Logger) interface{} {
   198  					return harness
   199  				},
   200  			},
   201  		},
   202  	})
   203  
   204  	require.NoError(t, err)
   205  	instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger)
   206  	require.NoError(t, err)
   207  	driver, ok := instance.Plugin().(*dtestutil.DriverHarness)
   208  	if !ok {
   209  		t.Fatal("plugin instance is not a driver... wat?")
   210  	}
   211  
   212  	return driver
   213  }
   214  
   215  func newTestDockerClient(t *testing.T) *docker.Client {
   216  	t.Helper()
   217  	testutil.DockerCompatible(t)
   218  
   219  	client, err := docker.NewClientFromEnv()
   220  	if err != nil {
   221  		t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
   222  	}
   223  	return client
   224  }
   225  
   226  // Following tests have been removed from this file.
   227  // [TestDockerDriver_Fingerprint, TestDockerDriver_Fingerprint_Bridge, TestDockerDriver_Check_DockerHealthStatus]
   228  // If you want to checkout/revert those tests, please check commit: 41715b1860778aa80513391bd64abd721d768ab0
   229  
   230  func TestDockerDriver_Start_Wait(t *testing.T) {
   231  	if !tu.IsCI() {
   232  		t.Parallel()
   233  	}
   234  	testutil.DockerCompatible(t)
   235  
   236  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   237  	task := &drivers.TaskConfig{
   238  		ID:        uuid.Generate(),
   239  		Name:      "nc-demo",
   240  		AllocID:   uuid.Generate(),
   241  		Resources: basicResources,
   242  	}
   243  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   244  
   245  	d := dockerDriverHarness(t, nil)
   246  	cleanup := d.MkAllocDir(task, true)
   247  	defer cleanup()
   248  	copyImage(t, task.TaskDir(), "busybox.tar")
   249  
   250  	_, _, err := d.StartTask(task)
   251  	require.NoError(t, err)
   252  
   253  	defer d.DestroyTask(task.ID, true)
   254  
   255  	// Attempt to wait
   256  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   257  	require.NoError(t, err)
   258  
   259  	select {
   260  	case <-waitCh:
   261  		t.Fatalf("wait channel should not have received an exit result")
   262  	case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second):
   263  	}
   264  }
   265  
   266  func TestDockerDriver_Start_WaitFinish(t *testing.T) {
   267  	if !tu.IsCI() {
   268  		t.Parallel()
   269  	}
   270  	testutil.DockerCompatible(t)
   271  
   272  	taskCfg := newTaskConfig("", []string{"echo", "hello"})
   273  	task := &drivers.TaskConfig{
   274  		ID:        uuid.Generate(),
   275  		Name:      "nc-demo",
   276  		AllocID:   uuid.Generate(),
   277  		Resources: basicResources,
   278  	}
   279  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   280  
   281  	d := dockerDriverHarness(t, nil)
   282  	cleanup := d.MkAllocDir(task, true)
   283  	defer cleanup()
   284  	copyImage(t, task.TaskDir(), "busybox.tar")
   285  
   286  	_, _, err := d.StartTask(task)
   287  	require.NoError(t, err)
   288  
   289  	defer d.DestroyTask(task.ID, true)
   290  
   291  	// Attempt to wait
   292  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   293  	require.NoError(t, err)
   294  
   295  	select {
   296  	case res := <-waitCh:
   297  		if !res.Successful() {
   298  			require.Fail(t, "ExitResult should be successful: %v", res)
   299  		}
   300  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   301  		require.Fail(t, "timeout")
   302  	}
   303  }
   304  
   305  // TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a
   306  // stopped task container, remove it, and start a new container.
   307  //
   308  // See https://github.com/hashicorp/nomad/issues/3419
   309  func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
   310  	if !tu.IsCI() {
   311  		t.Parallel()
   312  	}
   313  	testutil.DockerCompatible(t)
   314  
   315  	taskCfg := newTaskConfig("", []string{"sleep", "9001"})
   316  	task := &drivers.TaskConfig{
   317  		ID:        uuid.Generate(),
   318  		Name:      "nc-demo",
   319  		AllocID:   uuid.Generate(),
   320  		Resources: basicResources,
   321  	}
   322  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   323  
   324  	d := dockerDriverHarness(t, nil)
   325  	cleanup := d.MkAllocDir(task, true)
   326  	defer cleanup()
   327  	copyImage(t, task.TaskDir(), "busybox.tar")
   328  
   329  	client := newTestDockerClient(t)
   330  
   331  	var imageID string
   332  	var err error
   333  
   334  	if runtime.GOOS != "windows" {
   335  		imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
   336  	} else {
   337  		image, lErr := client.InspectImage(taskCfg.Image)
   338  		err = lErr
   339  		if image != nil {
   340  			imageID = image.ID
   341  		}
   342  	}
   343  	require.NoError(t, err)
   344  	require.NotEmpty(t, imageID)
   345  
   346  	// Create a container of the same name but don't start it. This mimics
   347  	// the case of dockerd getting restarted and stopping containers while
   348  	// Nomad is watching them.
   349  	opts := docker.CreateContainerOptions{
   350  		Name: strings.Replace(task.ID, "/", "_", -1),
   351  		Config: &docker.Config{
   352  			Image: taskCfg.Image,
   353  			Cmd:   []string{"sleep", "9000"},
   354  			Env:   []string{fmt.Sprintf("test=%s", t.Name())},
   355  		},
   356  	}
   357  
   358  	if _, err := client.CreateContainer(opts); err != nil {
   359  		t.Fatalf("error creating initial container: %v", err)
   360  	}
   361  
   362  	_, _, err = d.StartTask(task)
   363  	defer d.DestroyTask(task.ID, true)
   364  	require.NoError(t, err)
   365  
   366  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   367  	require.NoError(t, d.DestroyTask(task.ID, true))
   368  }
   369  
   370  func TestDockerDriver_Start_LoadImage(t *testing.T) {
   371  	if !tu.IsCI() {
   372  		t.Parallel()
   373  	}
   374  	testutil.DockerCompatible(t)
   375  
   376  	taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"})
   377  	task := &drivers.TaskConfig{
   378  		ID:        uuid.Generate(),
   379  		Name:      "busybox-demo",
   380  		AllocID:   uuid.Generate(),
   381  		Resources: basicResources,
   382  	}
   383  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   384  
   385  	d := dockerDriverHarness(t, nil)
   386  	cleanup := d.MkAllocDir(task, true)
   387  	defer cleanup()
   388  	copyImage(t, task.TaskDir(), "busybox.tar")
   389  
   390  	_, _, err := d.StartTask(task)
   391  	require.NoError(t, err)
   392  
   393  	defer d.DestroyTask(task.ID, true)
   394  
   395  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   396  	require.NoError(t, err)
   397  	select {
   398  	case res := <-waitCh:
   399  		if !res.Successful() {
   400  			require.Fail(t, "ExitResult should be successful: %v", res)
   401  		}
   402  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   403  		require.Fail(t, "timeout")
   404  	}
   405  
   406  	// Check that data was written to the shared alloc directory.
   407  	outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
   408  	act, err := ioutil.ReadFile(outputFile)
   409  	if err != nil {
   410  		t.Fatalf("Couldn't read expected output: %v", err)
   411  	}
   412  
   413  	exp := "hello"
   414  	if strings.TrimSpace(string(act)) != exp {
   415  		t.Fatalf("Command outputted %v; want %v", act, exp)
   416  	}
   417  
   418  }
   419  
   420  // Tests that starting a task without an image fails
   421  func TestDockerDriver_Start_NoImage(t *testing.T) {
   422  	if !tu.IsCI() {
   423  		t.Parallel()
   424  	}
   425  	testutil.DockerCompatible(t)
   426  
   427  	taskCfg := TaskConfig{
   428  		Command: "echo",
   429  		Args:    []string{"foo"},
   430  	}
   431  	task := &drivers.TaskConfig{
   432  		ID:        uuid.Generate(),
   433  		Name:      "echo",
   434  		AllocID:   uuid.Generate(),
   435  		Resources: basicResources,
   436  	}
   437  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   438  
   439  	d := dockerDriverHarness(t, nil)
   440  	cleanup := d.MkAllocDir(task, false)
   441  	defer cleanup()
   442  
   443  	_, _, err := d.StartTask(task)
   444  	require.Error(t, err)
   445  	require.Contains(t, err.Error(), "image name required")
   446  
   447  	d.DestroyTask(task.ID, true)
   448  }
   449  
   450  func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
   451  	if !tu.IsCI() {
   452  		t.Parallel()
   453  	}
   454  	testutil.DockerCompatible(t)
   455  
   456  	taskCfg := TaskConfig{
   457  		Image:            "127.0.0.1:32121/foo", // bad path
   458  		ImagePullTimeout: "5m",
   459  		Command:          "echo",
   460  		Args: []string{
   461  			"hello",
   462  		},
   463  	}
   464  	task := &drivers.TaskConfig{
   465  		ID:        uuid.Generate(),
   466  		Name:      "busybox-demo",
   467  		AllocID:   uuid.Generate(),
   468  		Resources: basicResources,
   469  	}
   470  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   471  
   472  	d := dockerDriverHarness(t, nil)
   473  	cleanup := d.MkAllocDir(task, true)
   474  	defer cleanup()
   475  
   476  	_, _, err := d.StartTask(task)
   477  	require.Error(t, err)
   478  
   479  	defer d.DestroyTask(task.ID, true)
   480  
   481  	if rerr, ok := err.(*structs.RecoverableError); !ok {
   482  		t.Fatalf("want recoverable error: %+v", err)
   483  	} else if !rerr.IsRecoverable() {
   484  		t.Fatalf("error not recoverable: %+v", err)
   485  	}
   486  }
   487  
   488  func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
   489  	if !tu.IsCI() {
   490  		t.Parallel()
   491  	}
   492  	// This test requires that the alloc dir be mounted into docker as a volume.
   493  	// Because this cannot happen when docker is run remotely, e.g. when running
   494  	// docker in a VM, we skip this when we detect Docker is being run remotely.
   495  	if !testutil.DockerIsConnected(t) || dockerIsRemote(t) {
   496  		t.Skip("Docker not connected")
   497  	}
   498  
   499  	exp := []byte{'w', 'i', 'n'}
   500  	file := "output.txt"
   501  
   502  	taskCfg := newTaskConfig("", []string{
   503  		"sh",
   504  		"-c",
   505  		fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`,
   506  			string(exp), taskenv.AllocDir, file),
   507  	})
   508  	task := &drivers.TaskConfig{
   509  		ID:        uuid.Generate(),
   510  		Name:      "busybox-demo",
   511  		AllocID:   uuid.Generate(),
   512  		Resources: basicResources,
   513  	}
   514  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   515  
   516  	d := dockerDriverHarness(t, nil)
   517  	cleanup := d.MkAllocDir(task, true)
   518  	defer cleanup()
   519  	copyImage(t, task.TaskDir(), "busybox.tar")
   520  
   521  	_, _, err := d.StartTask(task)
   522  	require.NoError(t, err)
   523  
   524  	defer d.DestroyTask(task.ID, true)
   525  
   526  	// Attempt to wait
   527  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   528  	require.NoError(t, err)
   529  
   530  	select {
   531  	case res := <-waitCh:
   532  		if !res.Successful() {
   533  			require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
   534  		}
   535  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   536  		require.Fail(t, "timeout")
   537  	}
   538  
   539  	// Check that data was written to the shared alloc directory.
   540  	outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file)
   541  	act, err := ioutil.ReadFile(outputFile)
   542  	if err != nil {
   543  		t.Fatalf("Couldn't read expected output: %v", err)
   544  	}
   545  
   546  	if !reflect.DeepEqual(act, exp) {
   547  		t.Fatalf("Command outputted %v; want %v", act, exp)
   548  	}
   549  }
   550  
   551  func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
   552  	if !tu.IsCI() {
   553  		t.Parallel()
   554  	}
   555  	testutil.DockerCompatible(t)
   556  
   557  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   558  	task := &drivers.TaskConfig{
   559  		ID:        uuid.Generate(),
   560  		Name:      "busybox-demo",
   561  		AllocID:   uuid.Generate(),
   562  		Resources: basicResources,
   563  	}
   564  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   565  
   566  	d := dockerDriverHarness(t, nil)
   567  	cleanup := d.MkAllocDir(task, true)
   568  	defer cleanup()
   569  	copyImage(t, task.TaskDir(), "busybox.tar")
   570  
   571  	_, _, err := d.StartTask(task)
   572  	require.NoError(t, err)
   573  
   574  	defer d.DestroyTask(task.ID, true)
   575  
   576  	go func(t *testing.T) {
   577  		time.Sleep(100 * time.Millisecond)
   578  		signal := "SIGINT"
   579  		if runtime.GOOS == "windows" {
   580  			signal = "SIGKILL"
   581  		}
   582  		require.NoError(t, d.StopTask(task.ID, time.Second, signal))
   583  	}(t)
   584  
   585  	// Attempt to wait
   586  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   587  	require.NoError(t, err)
   588  
   589  	select {
   590  	case res := <-waitCh:
   591  		if res.Successful() {
   592  			require.Fail(t, "ExitResult should err: %v", res)
   593  		}
   594  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   595  		require.Fail(t, "timeout")
   596  	}
   597  }
   598  
   599  func TestDockerDriver_Start_KillTimeout(t *testing.T) {
   600  	if !tu.IsCI() {
   601  		t.Parallel()
   602  	}
   603  	testutil.DockerCompatible(t)
   604  
   605  	if runtime.GOOS == "windows" {
   606  		t.Skip("Windows Docker does not support SIGUSR1")
   607  	}
   608  
   609  	timeout := 2 * time.Second
   610  	taskCfg := newTaskConfig("", []string{"sleep", "10"})
   611  	task := &drivers.TaskConfig{
   612  		ID:        uuid.Generate(),
   613  		Name:      "busybox-demo",
   614  		AllocID:   uuid.Generate(),
   615  		Resources: basicResources,
   616  	}
   617  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   618  
   619  	d := dockerDriverHarness(t, nil)
   620  	cleanup := d.MkAllocDir(task, true)
   621  	defer cleanup()
   622  	copyImage(t, task.TaskDir(), "busybox.tar")
   623  
   624  	_, _, err := d.StartTask(task)
   625  	require.NoError(t, err)
   626  
   627  	defer d.DestroyTask(task.ID, true)
   628  
   629  	var killSent time.Time
   630  	go func() {
   631  		time.Sleep(100 * time.Millisecond)
   632  		killSent = time.Now()
   633  		require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1"))
   634  	}()
   635  
   636  	// Attempt to wait
   637  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   638  	require.NoError(t, err)
   639  
   640  	var killed time.Time
   641  	select {
   642  	case <-waitCh:
   643  		killed = time.Now()
   644  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   645  		require.Fail(t, "timeout")
   646  	}
   647  
   648  	require.True(t, killed.Sub(killSent) > timeout)
   649  }
   650  
   651  func TestDockerDriver_StartN(t *testing.T) {
   652  	if runtime.GOOS == "windows" {
   653  		t.Skip("Windows Docker does not support SIGINT")
   654  	}
   655  	if !tu.IsCI() {
   656  		t.Parallel()
   657  	}
   658  	testutil.DockerCompatible(t)
   659  	require := require.New(t)
   660  
   661  	task1, _, ports1 := dockerTask(t)
   662  	defer freeport.Return(ports1)
   663  
   664  	task2, _, ports2 := dockerTask(t)
   665  	defer freeport.Return(ports2)
   666  
   667  	task3, _, ports3 := dockerTask(t)
   668  	defer freeport.Return(ports3)
   669  
   670  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   671  
   672  	t.Logf("Starting %d tasks", len(taskList))
   673  
   674  	d := dockerDriverHarness(t, nil)
   675  	// Let's spin up a bunch of things
   676  	for _, task := range taskList {
   677  		cleanup := d.MkAllocDir(task, true)
   678  		defer cleanup()
   679  		copyImage(t, task.TaskDir(), "busybox.tar")
   680  		_, _, err := d.StartTask(task)
   681  		require.NoError(err)
   682  
   683  	}
   684  
   685  	defer d.DestroyTask(task3.ID, true)
   686  	defer d.DestroyTask(task2.ID, true)
   687  	defer d.DestroyTask(task1.ID, true)
   688  
   689  	t.Log("All tasks are started. Terminating...")
   690  	for _, task := range taskList {
   691  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   692  
   693  		// Attempt to wait
   694  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   695  		require.NoError(err)
   696  
   697  		select {
   698  		case <-waitCh:
   699  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   700  			require.Fail("timeout waiting on task")
   701  		}
   702  	}
   703  
   704  	t.Log("Test complete!")
   705  }
   706  
   707  func TestDockerDriver_StartNVersions(t *testing.T) {
   708  	if runtime.GOOS == "windows" {
   709  		t.Skip("Skipped on windows, we don't have image variants available")
   710  	}
   711  	if !tu.IsCI() {
   712  		t.Parallel()
   713  	}
   714  	testutil.DockerCompatible(t)
   715  	require := require.New(t)
   716  
   717  	task1, cfg1, ports1 := dockerTask(t)
   718  	defer freeport.Return(ports1)
   719  	tcfg1 := newTaskConfig("", []string{"echo", "hello"})
   720  	cfg1.Image = tcfg1.Image
   721  	cfg1.LoadImage = tcfg1.LoadImage
   722  	require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
   723  
   724  	task2, cfg2, ports2 := dockerTask(t)
   725  	defer freeport.Return(ports2)
   726  	tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
   727  	cfg2.Image = tcfg2.Image
   728  	cfg2.LoadImage = tcfg2.LoadImage
   729  	require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
   730  
   731  	task3, cfg3, ports3 := dockerTask(t)
   732  	defer freeport.Return(ports3)
   733  	tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
   734  	cfg3.Image = tcfg3.Image
   735  	cfg3.LoadImage = tcfg3.LoadImage
   736  	require.NoError(task3.EncodeConcreteDriverConfig(cfg3))
   737  
   738  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   739  
   740  	t.Logf("Starting %d tasks", len(taskList))
   741  	d := dockerDriverHarness(t, nil)
   742  
   743  	// Let's spin up a bunch of things
   744  	for _, task := range taskList {
   745  		cleanup := d.MkAllocDir(task, true)
   746  		defer cleanup()
   747  		copyImage(t, task.TaskDir(), "busybox.tar")
   748  		copyImage(t, task.TaskDir(), "busybox_musl.tar")
   749  		copyImage(t, task.TaskDir(), "busybox_glibc.tar")
   750  		_, _, err := d.StartTask(task)
   751  		require.NoError(err)
   752  
   753  		require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
   754  	}
   755  
   756  	defer d.DestroyTask(task3.ID, true)
   757  	defer d.DestroyTask(task2.ID, true)
   758  	defer d.DestroyTask(task1.ID, true)
   759  
   760  	t.Log("All tasks are started. Terminating...")
   761  	for _, task := range taskList {
   762  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   763  
   764  		// Attempt to wait
   765  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   766  		require.NoError(err)
   767  
   768  		select {
   769  		case <-waitCh:
   770  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   771  			require.Fail("timeout waiting on task")
   772  		}
   773  	}
   774  
   775  	t.Log("Test complete!")
   776  }
   777  
   778  func TestDockerDriver_Labels(t *testing.T) {
   779  	if !tu.IsCI() {
   780  		t.Parallel()
   781  	}
   782  	testutil.DockerCompatible(t)
   783  
   784  	task, cfg, ports := dockerTask(t)
   785  	defer freeport.Return(ports)
   786  
   787  	cfg.Labels = map[string]string{
   788  		"label1": "value1",
   789  		"label2": "value2",
   790  	}
   791  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   792  
   793  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   794  	defer cleanup()
   795  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   796  
   797  	container, err := client.InspectContainer(handle.containerID)
   798  	if err != nil {
   799  		t.Fatalf("err: %v", err)
   800  	}
   801  
   802  	// expect to see 1 additional standard labels
   803  	require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels))
   804  	for k, v := range cfg.Labels {
   805  		require.Equal(t, v, container.Config.Labels[k])
   806  	}
   807  }
   808  
   809  func TestDockerDriver_ForcePull(t *testing.T) {
   810  	if !tu.IsCI() {
   811  		t.Parallel()
   812  	}
   813  	testutil.DockerCompatible(t)
   814  
   815  	task, cfg, ports := dockerTask(t)
   816  	defer freeport.Return(ports)
   817  
   818  	cfg.ForcePull = true
   819  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   820  
   821  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   822  	defer cleanup()
   823  
   824  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   825  
   826  	_, err := client.InspectContainer(handle.containerID)
   827  	if err != nil {
   828  		t.Fatalf("err: %v", err)
   829  	}
   830  }
   831  
   832  func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
   833  	if runtime.GOOS == "windows" {
   834  		t.Skip("TODO: Skipped digest test on Windows")
   835  	}
   836  
   837  	if !tu.IsCI() {
   838  		t.Parallel()
   839  	}
   840  	testutil.DockerCompatible(t)
   841  
   842  	task, cfg, ports := dockerTask(t)
   843  	defer freeport.Return(ports)
   844  	cfg.LoadImage = ""
   845  	cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
   846  	localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
   847  	cfg.ForcePull = true
   848  	cfg.Command = busyboxLongRunningCmd[0]
   849  	cfg.Args = busyboxLongRunningCmd[1:]
   850  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   851  
   852  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   853  	defer cleanup()
   854  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   855  
   856  	container, err := client.InspectContainer(handle.containerID)
   857  	require.NoError(t, err)
   858  	require.Equal(t, localDigest, container.Image)
   859  }
   860  
   861  func TestDockerDriver_SecurityOptUnconfined(t *testing.T) {
   862  	if runtime.GOOS == "windows" {
   863  		t.Skip("Windows does not support seccomp")
   864  	}
   865  	if !tu.IsCI() {
   866  		t.Parallel()
   867  	}
   868  	testutil.DockerCompatible(t)
   869  
   870  	task, cfg, ports := dockerTask(t)
   871  	defer freeport.Return(ports)
   872  	cfg.SecurityOpt = []string{"seccomp=unconfined"}
   873  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   874  
   875  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   876  	defer cleanup()
   877  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   878  
   879  	container, err := client.InspectContainer(handle.containerID)
   880  	if err != nil {
   881  		t.Fatalf("err: %v", err)
   882  	}
   883  
   884  	require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt)
   885  }
   886  
   887  func TestDockerDriver_SecurityOptFromFile(t *testing.T) {
   888  
   889  	if runtime.GOOS == "windows" {
   890  		t.Skip("Windows does not support seccomp")
   891  	}
   892  	if !tu.IsCI() {
   893  		t.Parallel()
   894  	}
   895  	testutil.DockerCompatible(t)
   896  
   897  	task, cfg, ports := dockerTask(t)
   898  	defer freeport.Return(ports)
   899  	cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"}
   900  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   901  
   902  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   903  	defer cleanup()
   904  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   905  
   906  	container, err := client.InspectContainer(handle.containerID)
   907  	require.NoError(t, err)
   908  
   909  	require.Contains(t, container.HostConfig.SecurityOpt[0], "reboot")
   910  }
   911  
   912  func TestDockerDriver_Runtime(t *testing.T) {
   913  	if !tu.IsCI() {
   914  		t.Parallel()
   915  	}
   916  	testutil.DockerCompatible(t)
   917  
   918  	task, cfg, ports := dockerTask(t)
   919  	defer freeport.Return(ports)
   920  	cfg.Runtime = "runc"
   921  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   922  
   923  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   924  	defer cleanup()
   925  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   926  
   927  	container, err := client.InspectContainer(handle.containerID)
   928  	if err != nil {
   929  		t.Fatalf("err: %v", err)
   930  	}
   931  
   932  	require.Exactly(t, cfg.Runtime, container.HostConfig.Runtime)
   933  }
   934  
   935  func TestDockerDriver_CreateContainerConfig(t *testing.T) {
   936  	t.Parallel()
   937  
   938  	task, cfg, ports := dockerTask(t)
   939  	defer freeport.Return(ports)
   940  	opt := map[string]string{"size": "120G"}
   941  
   942  	cfg.StorageOpt = opt
   943  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   944  
   945  	dh := dockerDriverHarness(t, nil)
   946  	driver := dh.Impl().(*Driver)
   947  
   948  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
   949  	require.NoError(t, err)
   950  
   951  	require.Equal(t, "org/repo:0.1", c.Config.Image)
   952  	require.EqualValues(t, opt, c.HostConfig.StorageOpt)
   953  
   954  	// Container name should be /<task_name>-<alloc_id> for backward compat
   955  	containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID)
   956  	require.Equal(t, containerName, c.Name)
   957  }
   958  
   959  func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) {
   960  	t.Parallel()
   961  
   962  	task, cfg, ports := dockerTask(t)
   963  	defer freeport.Return(ports)
   964  	task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
   965  
   966  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   967  
   968  	dh := dockerDriverHarness(t, nil)
   969  	driver := dh.Impl().(*Driver)
   970  	driver.gpuRuntime = true
   971  
   972  	// Should error if a runtime was explicitly set that doesn't match gpu runtime
   973  	cfg.Runtime = "nvidia"
   974  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
   975  	require.NoError(t, err)
   976  	require.Equal(t, "nvidia", c.HostConfig.Runtime)
   977  
   978  	cfg.Runtime = "custom"
   979  	_, err = driver.createContainerConfig(task, cfg, "org/repo:0.1")
   980  	require.Error(t, err)
   981  	require.Contains(t, err.Error(), "conflicting runtime requests")
   982  }
   983  
   984  func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
   985  	t.Parallel()
   986  
   987  	dh := dockerDriverHarness(t, nil)
   988  	driver := dh.Impl().(*Driver)
   989  	driver.gpuRuntime = true
   990  	driver.config.allowRuntimes = map[string]struct{}{
   991  		"runc":   {},
   992  		"custom": {},
   993  	}
   994  
   995  	allowRuntime := []string{
   996  		"", // default always works
   997  		"runc",
   998  		"custom",
   999  	}
  1000  
  1001  	task, cfg, ports := dockerTask(t)
  1002  	defer freeport.Return(ports)
  1003  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1004  
  1005  	for _, runtime := range allowRuntime {
  1006  		t.Run(runtime, func(t *testing.T) {
  1007  			cfg.Runtime = runtime
  1008  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1009  			require.NoError(t, err)
  1010  			require.Equal(t, runtime, c.HostConfig.Runtime)
  1011  		})
  1012  	}
  1013  
  1014  	t.Run("not allowed: denied", func(t *testing.T) {
  1015  		cfg.Runtime = "denied"
  1016  		_, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1017  		require.Error(t, err)
  1018  		require.Contains(t, err.Error(), `runtime "denied" is not allowed`)
  1019  	})
  1020  
  1021  }
  1022  
  1023  func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
  1024  	t.Parallel()
  1025  
  1026  	task, cfg, ports := dockerTask(t)
  1027  	defer freeport.Return(ports)
  1028  	task.User = "random-user-1"
  1029  
  1030  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1031  
  1032  	dh := dockerDriverHarness(t, nil)
  1033  	driver := dh.Impl().(*Driver)
  1034  
  1035  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1036  	require.NoError(t, err)
  1037  
  1038  	require.Equal(t, task.User, c.Config.User)
  1039  }
  1040  
  1041  func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
  1042  	t.Parallel()
  1043  
  1044  	task, cfg, ports := dockerTask(t)
  1045  	defer freeport.Return(ports)
  1046  	task.AllocID = uuid.Generate()
  1047  	task.JobName = "redis-demo-job"
  1048  
  1049  	cfg.Labels = map[string]string{
  1050  		"user_label": "user_value",
  1051  
  1052  		// com.hashicorp.nomad. labels are reserved and
  1053  		// cannot be overridden
  1054  		"com.hashicorp.nomad.alloc_id": "bad_value",
  1055  	}
  1056  
  1057  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1058  
  1059  	dh := dockerDriverHarness(t, nil)
  1060  	driver := dh.Impl().(*Driver)
  1061  
  1062  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1063  	require.NoError(t, err)
  1064  
  1065  	expectedLabels := map[string]string{
  1066  		// user provided labels
  1067  		"user_label": "user_value",
  1068  		// default labels
  1069  		"com.hashicorp.nomad.alloc_id": task.AllocID,
  1070  	}
  1071  
  1072  	require.Equal(t, expectedLabels, c.Config.Labels)
  1073  }
  1074  
  1075  func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
  1076  	t.Parallel()
  1077  
  1078  	cases := []struct {
  1079  		name           string
  1080  		loggingConfig  DockerLogging
  1081  		expectedConfig DockerLogging
  1082  	}{
  1083  		{
  1084  			"simple type",
  1085  			DockerLogging{Type: "fluentd"},
  1086  			DockerLogging{
  1087  				Type:   "fluentd",
  1088  				Config: map[string]string{},
  1089  			},
  1090  		},
  1091  		{
  1092  			"simple driver",
  1093  			DockerLogging{Driver: "fluentd"},
  1094  			DockerLogging{
  1095  				Type:   "fluentd",
  1096  				Config: map[string]string{},
  1097  			},
  1098  		},
  1099  		{
  1100  			"type takes precedence",
  1101  			DockerLogging{
  1102  				Type:   "json-file",
  1103  				Driver: "fluentd",
  1104  			},
  1105  			DockerLogging{
  1106  				Type:   "json-file",
  1107  				Config: map[string]string{},
  1108  			},
  1109  		},
  1110  		{
  1111  			"user config takes precedence, even if no type provided",
  1112  			DockerLogging{
  1113  				Type:   "",
  1114  				Config: map[string]string{"max-file": "3", "max-size": "10m"},
  1115  			},
  1116  			DockerLogging{
  1117  				Type:   "",
  1118  				Config: map[string]string{"max-file": "3", "max-size": "10m"},
  1119  			},
  1120  		},
  1121  		{
  1122  			"defaults to json-file w/ log rotation",
  1123  			DockerLogging{
  1124  				Type: "",
  1125  			},
  1126  			DockerLogging{
  1127  				Type:   "json-file",
  1128  				Config: map[string]string{"max-file": "2", "max-size": "2m"},
  1129  			},
  1130  		},
  1131  	}
  1132  
  1133  	for _, c := range cases {
  1134  		t.Run(c.name, func(t *testing.T) {
  1135  			task, cfg, ports := dockerTask(t)
  1136  			defer freeport.Return(ports)
  1137  
  1138  			cfg.Logging = c.loggingConfig
  1139  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1140  
  1141  			dh := dockerDriverHarness(t, nil)
  1142  			driver := dh.Impl().(*Driver)
  1143  
  1144  			cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1145  			require.NoError(t, err)
  1146  
  1147  			require.Equal(t, c.expectedConfig.Type, cc.HostConfig.LogConfig.Type)
  1148  			require.Equal(t, c.expectedConfig.Config["max-file"], cc.HostConfig.LogConfig.Config["max-file"])
  1149  			require.Equal(t, c.expectedConfig.Config["max-size"], cc.HostConfig.LogConfig.Config["max-size"])
  1150  		})
  1151  	}
  1152  }
  1153  
  1154  func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) {
  1155  	t.Parallel()
  1156  
  1157  	task, cfg, ports := dockerTask(t)
  1158  	defer freeport.Return(ports)
  1159  
  1160  	cfg.Mounts = []DockerMount{
  1161  		DockerMount{
  1162  			Type:   "bind",
  1163  			Target: "/map-bind-target",
  1164  			Source: "/map-source",
  1165  		},
  1166  		DockerMount{
  1167  			Type:   "tmpfs",
  1168  			Target: "/map-tmpfs-target",
  1169  		},
  1170  	}
  1171  	cfg.MountsList = []DockerMount{
  1172  		{
  1173  			Type:   "bind",
  1174  			Target: "/list-bind-target",
  1175  			Source: "/list-source",
  1176  		},
  1177  		{
  1178  			Type:   "tmpfs",
  1179  			Target: "/list-tmpfs-target",
  1180  		},
  1181  	}
  1182  
  1183  	expectedSrcPrefix := "/"
  1184  	if runtime.GOOS == "windows" {
  1185  		expectedSrcPrefix = "redis-demo\\"
  1186  	}
  1187  	expected := []docker.HostMount{
  1188  		// from mount map
  1189  		{
  1190  			Type:        "bind",
  1191  			Target:      "/map-bind-target",
  1192  			Source:      expectedSrcPrefix + "map-source",
  1193  			BindOptions: &docker.BindOptions{},
  1194  		},
  1195  		{
  1196  			Type:          "tmpfs",
  1197  			Target:        "/map-tmpfs-target",
  1198  			TempfsOptions: &docker.TempfsOptions{},
  1199  		},
  1200  		// from mount list
  1201  		{
  1202  			Type:        "bind",
  1203  			Target:      "/list-bind-target",
  1204  			Source:      expectedSrcPrefix + "list-source",
  1205  			BindOptions: &docker.BindOptions{},
  1206  		},
  1207  		{
  1208  			Type:          "tmpfs",
  1209  			Target:        "/list-tmpfs-target",
  1210  			TempfsOptions: &docker.TempfsOptions{},
  1211  		},
  1212  	}
  1213  
  1214  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1215  
  1216  	dh := dockerDriverHarness(t, nil)
  1217  	driver := dh.Impl().(*Driver)
  1218  	driver.config.Volumes.Enabled = true
  1219  
  1220  	cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1221  	require.NoError(t, err)
  1222  
  1223  	found := cc.HostConfig.Mounts
  1224  	sort.Slice(found, func(i, j int) bool { return strings.Compare(found[i].Target, found[j].Target) < 0 })
  1225  	sort.Slice(expected, func(i, j int) bool {
  1226  		return strings.Compare(expected[i].Target, expected[j].Target) < 0
  1227  	})
  1228  
  1229  	require.Equal(t, expected, found)
  1230  }
  1231  
  1232  func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
  1233  	if !tu.IsCI() {
  1234  		t.Parallel()
  1235  	}
  1236  	testCases := []struct {
  1237  		description           string
  1238  		gpuRuntimeSet         bool
  1239  		expectToReturnError   bool
  1240  		expectedRuntime       string
  1241  		nvidiaDevicesProvided bool
  1242  	}{
  1243  		{
  1244  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 1",
  1245  			gpuRuntimeSet:         true,
  1246  			expectToReturnError:   false,
  1247  			expectedRuntime:       "nvidia",
  1248  			nvidiaDevicesProvided: true,
  1249  		},
  1250  		{
  1251  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 2",
  1252  			gpuRuntimeSet:         true,
  1253  			expectToReturnError:   false,
  1254  			expectedRuntime:       "nvidia-runtime-modified-name",
  1255  			nvidiaDevicesProvided: true,
  1256  		},
  1257  		{
  1258  			description:           "no gpu devices provided - no runtime should be set",
  1259  			gpuRuntimeSet:         true,
  1260  			expectToReturnError:   false,
  1261  			expectedRuntime:       "nvidia",
  1262  			nvidiaDevicesProvided: false,
  1263  		},
  1264  		{
  1265  			description:           "no gpuRuntime supported by docker driver",
  1266  			gpuRuntimeSet:         false,
  1267  			expectToReturnError:   true,
  1268  			expectedRuntime:       "nvidia",
  1269  			nvidiaDevicesProvided: true,
  1270  		},
  1271  	}
  1272  	for _, testCase := range testCases {
  1273  		t.Run(testCase.description, func(t *testing.T) {
  1274  			task, cfg, ports := dockerTask(t)
  1275  			defer freeport.Return(ports)
  1276  
  1277  			dh := dockerDriverHarness(t, map[string]interface{}{
  1278  				"allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"},
  1279  			})
  1280  			driver := dh.Impl().(*Driver)
  1281  
  1282  			driver.gpuRuntime = testCase.gpuRuntimeSet
  1283  			driver.config.GPURuntimeName = testCase.expectedRuntime
  1284  			if testCase.nvidiaDevicesProvided {
  1285  				task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
  1286  			}
  1287  
  1288  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1289  			if testCase.expectToReturnError {
  1290  				require.NotNil(t, err)
  1291  			} else {
  1292  				require.NoError(t, err)
  1293  				if testCase.nvidiaDevicesProvided {
  1294  					require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime)
  1295  				} else {
  1296  					// no nvidia devices provided -> no point to use nvidia runtime
  1297  					require.Equal(t, "", c.HostConfig.Runtime)
  1298  				}
  1299  			}
  1300  		})
  1301  	}
  1302  }
  1303  
  1304  func TestDockerDriver_Capabilities(t *testing.T) {
  1305  	if !tu.IsCI() {
  1306  		t.Parallel()
  1307  	}
  1308  	testutil.DockerCompatible(t)
  1309  	if runtime.GOOS == "windows" {
  1310  		t.Skip("Capabilities not supported on windows")
  1311  	}
  1312  
  1313  	testCases := []struct {
  1314  		Name       string
  1315  		CapAdd     []string
  1316  		CapDrop    []string
  1317  		Allowlist  string
  1318  		StartError string
  1319  	}{
  1320  		{
  1321  			Name:    "default-allowlist-add-allowed",
  1322  			CapAdd:  []string{"fowner", "mknod"},
  1323  			CapDrop: []string{"all"},
  1324  		},
  1325  		{
  1326  			Name:       "default-allowlist-add-forbidden",
  1327  			CapAdd:     []string{"net_admin"},
  1328  			StartError: "net_admin",
  1329  		},
  1330  		{
  1331  			Name:    "default-allowlist-drop-existing",
  1332  			CapDrop: []string{"fowner", "mknod"},
  1333  		},
  1334  		{
  1335  			Name:      "restrictive-allowlist-drop-all",
  1336  			CapDrop:   []string{"all"},
  1337  			Allowlist: "fowner,mknod",
  1338  		},
  1339  		{
  1340  			Name:      "restrictive-allowlist-add-allowed",
  1341  			CapAdd:    []string{"fowner", "mknod"},
  1342  			CapDrop:   []string{"all"},
  1343  			Allowlist: "fowner,mknod",
  1344  		},
  1345  		{
  1346  			Name:       "restrictive-allowlist-add-forbidden",
  1347  			CapAdd:     []string{"net_admin", "mknod"},
  1348  			CapDrop:    []string{"all"},
  1349  			Allowlist:  "fowner,mknod",
  1350  			StartError: "net_admin",
  1351  		},
  1352  		{
  1353  			Name:      "permissive-allowlist",
  1354  			CapAdd:    []string{"net_admin", "mknod"},
  1355  			Allowlist: "all",
  1356  		},
  1357  		{
  1358  			Name:      "permissive-allowlist-add-all",
  1359  			CapAdd:    []string{"all"},
  1360  			Allowlist: "all",
  1361  		},
  1362  	}
  1363  
  1364  	for _, tc := range testCases {
  1365  		t.Run(tc.Name, func(t *testing.T) {
  1366  			client := newTestDockerClient(t)
  1367  			task, cfg, ports := dockerTask(t)
  1368  			defer freeport.Return(ports)
  1369  
  1370  			if len(tc.CapAdd) > 0 {
  1371  				cfg.CapAdd = tc.CapAdd
  1372  			}
  1373  			if len(tc.CapDrop) > 0 {
  1374  				cfg.CapDrop = tc.CapDrop
  1375  			}
  1376  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1377  
  1378  			d := dockerDriverHarness(t, nil)
  1379  			dockerDriver, ok := d.Impl().(*Driver)
  1380  			require.True(t, ok)
  1381  			if tc.Allowlist != "" {
  1382  				dockerDriver.config.AllowCaps = strings.Split(tc.Allowlist, ",")
  1383  			}
  1384  
  1385  			cleanup := d.MkAllocDir(task, true)
  1386  			defer cleanup()
  1387  			copyImage(t, task.TaskDir(), "busybox.tar")
  1388  
  1389  			_, _, err := d.StartTask(task)
  1390  			defer d.DestroyTask(task.ID, true)
  1391  			if err == nil && tc.StartError != "" {
  1392  				t.Fatalf("Expected error in start: %v", tc.StartError)
  1393  			} else if err != nil {
  1394  				if tc.StartError == "" {
  1395  					require.NoError(t, err)
  1396  				} else {
  1397  					require.Contains(t, err.Error(), tc.StartError)
  1398  				}
  1399  				return
  1400  			}
  1401  
  1402  			handle, ok := dockerDriver.tasks.Get(task.ID)
  1403  			require.True(t, ok)
  1404  
  1405  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1406  
  1407  			container, err := client.InspectContainer(handle.containerID)
  1408  			require.NoError(t, err)
  1409  
  1410  			require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd)
  1411  			require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop)
  1412  		})
  1413  	}
  1414  }
  1415  
  1416  func TestDockerDriver_DNS(t *testing.T) {
  1417  	if !tu.IsCI() {
  1418  		t.Parallel()
  1419  	}
  1420  	testutil.DockerCompatible(t)
  1421  	testutil.ExecCompatible(t)
  1422  
  1423  	cases := []struct {
  1424  		name string
  1425  		cfg  *drivers.DNSConfig
  1426  	}{
  1427  		{
  1428  			name: "nil DNSConfig",
  1429  		},
  1430  		{
  1431  			name: "basic",
  1432  			cfg: &drivers.DNSConfig{
  1433  				Servers: []string{"1.1.1.1", "1.0.0.1"},
  1434  			},
  1435  		},
  1436  		{
  1437  			name: "full",
  1438  			cfg: &drivers.DNSConfig{
  1439  				Servers:  []string{"1.1.1.1", "1.0.0.1"},
  1440  				Searches: []string{"local.test", "node.consul"},
  1441  				Options:  []string{"ndots:2", "edns0"},
  1442  			},
  1443  		},
  1444  	}
  1445  
  1446  	for _, c := range cases {
  1447  		task, cfg, ports := dockerTask(t)
  1448  		defer freeport.Return(ports)
  1449  		task.DNS = c.cfg
  1450  		require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1451  
  1452  		_, d, _, cleanup := dockerSetup(t, task, nil)
  1453  		defer cleanup()
  1454  
  1455  		require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1456  		defer d.DestroyTask(task.ID, true)
  1457  
  1458  		dtestutil.TestTaskDNSConfig(t, d, task.ID, c.cfg)
  1459  	}
  1460  
  1461  }
  1462  
  1463  func TestDockerDriver_CPUSetCPUs(t *testing.T) {
  1464  	if !tu.IsCI() {
  1465  		t.Parallel()
  1466  	}
  1467  	testutil.DockerCompatible(t)
  1468  	if runtime.GOOS == "windows" {
  1469  		t.Skip("Windows does not support CPUSetCPUs.")
  1470  	}
  1471  
  1472  	testCases := []struct {
  1473  		Name       string
  1474  		CPUSetCPUs string
  1475  	}{
  1476  		{
  1477  			Name:       "Single CPU",
  1478  			CPUSetCPUs: "0",
  1479  		},
  1480  		{
  1481  			Name:       "Comma separated list of CPUs",
  1482  			CPUSetCPUs: "0,1",
  1483  		},
  1484  		{
  1485  			Name:       "Range of CPUs",
  1486  			CPUSetCPUs: "0-1",
  1487  		},
  1488  	}
  1489  
  1490  	for _, testCase := range testCases {
  1491  		t.Run(testCase.Name, func(t *testing.T) {
  1492  			task, cfg, ports := dockerTask(t)
  1493  			defer freeport.Return(ports)
  1494  
  1495  			cfg.CPUSetCPUs = testCase.CPUSetCPUs
  1496  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1497  
  1498  			client, d, handle, cleanup := dockerSetup(t, task, nil)
  1499  			defer cleanup()
  1500  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1501  
  1502  			container, err := client.InspectContainer(handle.containerID)
  1503  			require.NoError(t, err)
  1504  
  1505  			require.Equal(t, cfg.CPUSetCPUs, container.HostConfig.CPUSetCPUs)
  1506  		})
  1507  	}
  1508  }
  1509  
  1510  func TestDockerDriver_MemoryHardLimit(t *testing.T) {
  1511  	if !tu.IsCI() {
  1512  		t.Parallel()
  1513  	}
  1514  	testutil.DockerCompatible(t)
  1515  	if runtime.GOOS == "windows" {
  1516  		t.Skip("Windows does not support MemoryReservation")
  1517  	}
  1518  
  1519  	task, cfg, ports := dockerTask(t)
  1520  	defer freeport.Return(ports)
  1521  
  1522  	cfg.MemoryHardLimit = 300
  1523  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1524  
  1525  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1526  	defer cleanup()
  1527  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1528  
  1529  	container, err := client.InspectContainer(handle.containerID)
  1530  	require.NoError(t, err)
  1531  
  1532  	require.Equal(t, task.Resources.LinuxResources.MemoryLimitBytes, container.HostConfig.MemoryReservation)
  1533  	require.Equal(t, cfg.MemoryHardLimit*1024*1024, container.HostConfig.Memory)
  1534  }
  1535  
  1536  func TestDockerDriver_MACAddress(t *testing.T) {
  1537  	if !tu.IsCI() {
  1538  		t.Parallel()
  1539  	}
  1540  	testutil.DockerCompatible(t)
  1541  	if runtime.GOOS == "windows" {
  1542  		t.Skip("Windows docker does not support setting MacAddress")
  1543  	}
  1544  
  1545  	task, cfg, ports := dockerTask(t)
  1546  	defer freeport.Return(ports)
  1547  	cfg.MacAddress = "00:16:3e:00:00:00"
  1548  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1549  
  1550  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1551  	defer cleanup()
  1552  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1553  
  1554  	container, err := client.InspectContainer(handle.containerID)
  1555  	require.NoError(t, err)
  1556  
  1557  	require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress)
  1558  }
  1559  
  1560  func TestDockerWorkDir(t *testing.T) {
  1561  	if !tu.IsCI() {
  1562  		t.Parallel()
  1563  	}
  1564  	testutil.DockerCompatible(t)
  1565  
  1566  	task, cfg, ports := dockerTask(t)
  1567  	defer freeport.Return(ports)
  1568  	cfg.WorkDir = "/some/path"
  1569  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1570  
  1571  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1572  	defer cleanup()
  1573  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1574  
  1575  	container, err := client.InspectContainer(handle.containerID)
  1576  	require.NoError(t, err)
  1577  	require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir))
  1578  }
  1579  
  1580  func inSlice(needle string, haystack []string) bool {
  1581  	for _, h := range haystack {
  1582  		if h == needle {
  1583  			return true
  1584  		}
  1585  	}
  1586  	return false
  1587  }
  1588  
  1589  func TestDockerDriver_PortsNoMap(t *testing.T) {
  1590  	if !tu.IsCI() {
  1591  		t.Parallel()
  1592  	}
  1593  	testutil.DockerCompatible(t)
  1594  
  1595  	task, _, ports := dockerTask(t)
  1596  	defer freeport.Return(ports)
  1597  	res := ports[0]
  1598  	dyn := ports[1]
  1599  
  1600  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1601  	defer cleanup()
  1602  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1603  
  1604  	container, err := client.InspectContainer(handle.containerID)
  1605  	require.NoError(t, err)
  1606  
  1607  	// Verify that the correct ports are EXPOSED
  1608  	expectedExposedPorts := map[docker.Port]struct{}{
  1609  		docker.Port(fmt.Sprintf("%d/tcp", res)): {},
  1610  		docker.Port(fmt.Sprintf("%d/udp", res)): {},
  1611  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
  1612  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
  1613  	}
  1614  
  1615  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1616  
  1617  	hostIP := "127.0.0.1"
  1618  	if runtime.GOOS == "windows" {
  1619  		hostIP = ""
  1620  	}
  1621  
  1622  	// Verify that the correct ports are FORWARDED
  1623  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1624  		docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1625  		docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1626  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1627  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1628  	}
  1629  
  1630  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1631  }
  1632  
  1633  func TestDockerDriver_PortsMapping(t *testing.T) {
  1634  	if !tu.IsCI() {
  1635  		t.Parallel()
  1636  	}
  1637  	testutil.DockerCompatible(t)
  1638  
  1639  	task, cfg, ports := dockerTask(t)
  1640  	defer freeport.Return(ports)
  1641  	res := ports[0]
  1642  	dyn := ports[1]
  1643  	cfg.PortMap = map[string]int{
  1644  		"main":  8080,
  1645  		"REDIS": 6379,
  1646  	}
  1647  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1648  
  1649  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1650  	defer cleanup()
  1651  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1652  
  1653  	container, err := client.InspectContainer(handle.containerID)
  1654  	require.NoError(t, err)
  1655  
  1656  	// Verify that the port environment variables are set
  1657  	require.Contains(t, container.Config.Env, "NOMAD_PORT_main=8080")
  1658  	require.Contains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379")
  1659  
  1660  	// Verify that the correct ports are EXPOSED
  1661  	expectedExposedPorts := map[docker.Port]struct{}{
  1662  		docker.Port("8080/tcp"): {},
  1663  		docker.Port("8080/udp"): {},
  1664  		docker.Port("6379/tcp"): {},
  1665  		docker.Port("6379/udp"): {},
  1666  	}
  1667  
  1668  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1669  
  1670  	hostIP := "127.0.0.1"
  1671  	if runtime.GOOS == "windows" {
  1672  		hostIP = ""
  1673  	}
  1674  
  1675  	// Verify that the correct ports are FORWARDED
  1676  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1677  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1678  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1679  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1680  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1681  	}
  1682  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1683  }
  1684  
  1685  func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) {
  1686  	t.Parallel()
  1687  
  1688  	task, cfg, ports := dockerTask(t)
  1689  	defer freeport.Return(ports)
  1690  	hostIP := "127.0.0.1"
  1691  	if runtime.GOOS == "windows" {
  1692  		hostIP = ""
  1693  	}
  1694  	portmappings := structs.AllocatedPorts(make([]structs.AllocatedPortMapping, len(ports)))
  1695  	portmappings[0] = structs.AllocatedPortMapping{
  1696  		Label:  "main",
  1697  		Value:  ports[0],
  1698  		HostIP: hostIP,
  1699  		To:     8080,
  1700  	}
  1701  	portmappings[1] = structs.AllocatedPortMapping{
  1702  		Label:  "REDIS",
  1703  		Value:  ports[1],
  1704  		HostIP: hostIP,
  1705  		To:     6379,
  1706  	}
  1707  	task.Resources.Ports = &portmappings
  1708  	cfg.Ports = []string{"main", "REDIS"}
  1709  
  1710  	dh := dockerDriverHarness(t, nil)
  1711  	driver := dh.Impl().(*Driver)
  1712  
  1713  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1714  	require.NoError(t, err)
  1715  
  1716  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1717  
  1718  	// Verify that the correct ports are FORWARDED
  1719  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1720  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}},
  1721  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}},
  1722  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}},
  1723  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}},
  1724  	}
  1725  	require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
  1726  
  1727  }
  1728  func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) {
  1729  	t.Parallel()
  1730  
  1731  	task, cfg, ports := dockerTask(t)
  1732  	defer freeport.Return(ports)
  1733  	res := ports[0]
  1734  	dyn := ports[1]
  1735  	cfg.PortMap = map[string]int{
  1736  		"main":  8080,
  1737  		"REDIS": 6379,
  1738  	}
  1739  	dh := dockerDriverHarness(t, nil)
  1740  	driver := dh.Impl().(*Driver)
  1741  
  1742  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1743  	require.NoError(t, err)
  1744  
  1745  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1746  	require.Contains(t, c.Config.Env, "NOMAD_PORT_main=8080")
  1747  	require.Contains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379")
  1748  
  1749  	// Verify that the correct ports are FORWARDED
  1750  	hostIP := "127.0.0.1"
  1751  	if runtime.GOOS == "windows" {
  1752  		hostIP = ""
  1753  	}
  1754  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1755  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1756  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1757  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1758  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1759  	}
  1760  	require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
  1761  
  1762  }
  1763  
  1764  func TestDockerDriver_CleanupContainer(t *testing.T) {
  1765  	if !tu.IsCI() {
  1766  		t.Parallel()
  1767  	}
  1768  	testutil.DockerCompatible(t)
  1769  
  1770  	task, cfg, ports := dockerTask(t)
  1771  	defer freeport.Return(ports)
  1772  	cfg.Command = "echo"
  1773  	cfg.Args = []string{"hello"}
  1774  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1775  
  1776  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1777  	defer cleanup()
  1778  
  1779  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  1780  	require.NoError(t, err)
  1781  
  1782  	select {
  1783  	case res := <-waitCh:
  1784  		if !res.Successful() {
  1785  			t.Fatalf("err: %v", res)
  1786  		}
  1787  
  1788  		err = d.DestroyTask(task.ID, false)
  1789  		require.NoError(t, err)
  1790  
  1791  		time.Sleep(3 * time.Second)
  1792  
  1793  		// Ensure that the container isn't present
  1794  		_, err := client.InspectContainer(handle.containerID)
  1795  		if err == nil {
  1796  			t.Fatalf("expected to not get container")
  1797  		}
  1798  
  1799  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1800  		t.Fatalf("timeout")
  1801  	}
  1802  }
  1803  
  1804  func TestDockerDriver_EnableImageGC(t *testing.T) {
  1805  	testutil.DockerCompatible(t)
  1806  
  1807  	task, cfg, ports := dockerTask(t)
  1808  	defer freeport.Return(ports)
  1809  	cfg.Command = "echo"
  1810  	cfg.Args = []string{"hello"}
  1811  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1812  
  1813  	client := newTestDockerClient(t)
  1814  	driver := dockerDriverHarness(t, map[string]interface{}{
  1815  		"gc": map[string]interface{}{
  1816  			"container":   true,
  1817  			"image":       true,
  1818  			"image_delay": "2s",
  1819  		},
  1820  	})
  1821  	cleanup := driver.MkAllocDir(task, true)
  1822  	defer cleanup()
  1823  
  1824  	cleanSlate(client, cfg.Image)
  1825  
  1826  	copyImage(t, task.TaskDir(), "busybox.tar")
  1827  	_, _, err := driver.StartTask(task)
  1828  	require.NoError(t, err)
  1829  
  1830  	dockerDriver, ok := driver.Impl().(*Driver)
  1831  	require.True(t, ok)
  1832  	_, ok = dockerDriver.tasks.Get(task.ID)
  1833  	require.True(t, ok)
  1834  
  1835  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1836  	require.NoError(t, err)
  1837  	select {
  1838  	case res := <-waitCh:
  1839  		if !res.Successful() {
  1840  			t.Fatalf("err: %v", res)
  1841  		}
  1842  
  1843  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1844  		t.Fatalf("timeout")
  1845  	}
  1846  
  1847  	// we haven't called DestroyTask, image should be present
  1848  	_, err = client.InspectImage(cfg.Image)
  1849  	require.NoError(t, err)
  1850  
  1851  	err = dockerDriver.DestroyTask(task.ID, false)
  1852  	require.NoError(t, err)
  1853  
  1854  	// image_delay is 3s, so image should still be around for a bit
  1855  	_, err = client.InspectImage(cfg.Image)
  1856  	require.NoError(t, err)
  1857  
  1858  	// Ensure image was removed
  1859  	tu.WaitForResult(func() (bool, error) {
  1860  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1861  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1862  		}
  1863  
  1864  		return true, nil
  1865  	}, func(err error) {
  1866  		require.NoError(t, err)
  1867  	})
  1868  }
  1869  
  1870  func TestDockerDriver_DisableImageGC(t *testing.T) {
  1871  	testutil.DockerCompatible(t)
  1872  
  1873  	task, cfg, ports := dockerTask(t)
  1874  	defer freeport.Return(ports)
  1875  	cfg.Command = "echo"
  1876  	cfg.Args = []string{"hello"}
  1877  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1878  
  1879  	client := newTestDockerClient(t)
  1880  	driver := dockerDriverHarness(t, map[string]interface{}{
  1881  		"gc": map[string]interface{}{
  1882  			"container":   true,
  1883  			"image":       false,
  1884  			"image_delay": "1s",
  1885  		},
  1886  	})
  1887  	cleanup := driver.MkAllocDir(task, true)
  1888  	defer cleanup()
  1889  
  1890  	cleanSlate(client, cfg.Image)
  1891  
  1892  	copyImage(t, task.TaskDir(), "busybox.tar")
  1893  	_, _, err := driver.StartTask(task)
  1894  	require.NoError(t, err)
  1895  
  1896  	dockerDriver, ok := driver.Impl().(*Driver)
  1897  	require.True(t, ok)
  1898  	handle, ok := dockerDriver.tasks.Get(task.ID)
  1899  	require.True(t, ok)
  1900  
  1901  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1902  	require.NoError(t, err)
  1903  	select {
  1904  	case res := <-waitCh:
  1905  		if !res.Successful() {
  1906  			t.Fatalf("err: %v", res)
  1907  		}
  1908  
  1909  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1910  		t.Fatalf("timeout")
  1911  	}
  1912  
  1913  	// we haven't called DestroyTask, image should be present
  1914  	_, err = client.InspectImage(handle.containerImage)
  1915  	require.NoError(t, err)
  1916  
  1917  	err = dockerDriver.DestroyTask(task.ID, false)
  1918  	require.NoError(t, err)
  1919  
  1920  	// image_delay is 1s, wait a little longer
  1921  	time.Sleep(3 * time.Second)
  1922  
  1923  	// image should not have been removed or scheduled to be removed
  1924  	_, err = client.InspectImage(cfg.Image)
  1925  	require.NoError(t, err)
  1926  	dockerDriver.coordinator.imageLock.Lock()
  1927  	_, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage]
  1928  	require.False(t, ok, "image should not be registered for deletion")
  1929  	dockerDriver.coordinator.imageLock.Unlock()
  1930  }
  1931  
  1932  func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
  1933  	testutil.DockerCompatible(t)
  1934  
  1935  	task, cfg, ports := dockerTask(t)
  1936  	defer freeport.Return(ports)
  1937  	cfg.Command = "echo"
  1938  	cfg.Args = []string{"hello"}
  1939  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1940  
  1941  	client := newTestDockerClient(t)
  1942  	driver := dockerDriverHarness(t, map[string]interface{}{
  1943  		"gc": map[string]interface{}{
  1944  			"container":   true,
  1945  			"image":       true,
  1946  			"image_delay": "0s",
  1947  		},
  1948  	})
  1949  	cleanup := driver.MkAllocDir(task, true)
  1950  	defer cleanup()
  1951  
  1952  	cleanSlate(client, cfg.Image)
  1953  
  1954  	copyImage(t, task.TaskDir(), "busybox.tar")
  1955  	_, _, err := driver.StartTask(task)
  1956  	require.NoError(t, err)
  1957  
  1958  	dockerDriver, ok := driver.Impl().(*Driver)
  1959  	require.True(t, ok)
  1960  	h, ok := dockerDriver.tasks.Get(task.ID)
  1961  	require.True(t, ok)
  1962  
  1963  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1964  	require.NoError(t, err)
  1965  	select {
  1966  	case res := <-waitCh:
  1967  		if !res.Successful() {
  1968  			t.Fatalf("err: %v", res)
  1969  		}
  1970  
  1971  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1972  		t.Fatalf("timeout")
  1973  	}
  1974  
  1975  	// remove the container out-of-band
  1976  	require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{
  1977  		ID: h.containerID,
  1978  	}))
  1979  
  1980  	require.NoError(t, dockerDriver.DestroyTask(task.ID, false))
  1981  
  1982  	// Ensure image was removed
  1983  	tu.WaitForResult(func() (bool, error) {
  1984  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1985  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1986  		}
  1987  
  1988  		return true, nil
  1989  	}, func(err error) {
  1990  		require.NoError(t, err)
  1991  	})
  1992  
  1993  	// Ensure that task handle was removed
  1994  	_, ok = dockerDriver.tasks.Get(task.ID)
  1995  	require.False(t, ok)
  1996  }
  1997  
  1998  func TestDockerDriver_Stats(t *testing.T) {
  1999  	if !tu.IsCI() {
  2000  		t.Parallel()
  2001  	}
  2002  	testutil.DockerCompatible(t)
  2003  
  2004  	task, cfg, ports := dockerTask(t)
  2005  	defer freeport.Return(ports)
  2006  	cfg.Command = "sleep"
  2007  	cfg.Args = []string{"1000"}
  2008  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2009  
  2010  	_, d, handle, cleanup := dockerSetup(t, task, nil)
  2011  	defer cleanup()
  2012  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  2013  
  2014  	go func() {
  2015  		defer d.DestroyTask(task.ID, true)
  2016  		ctx, cancel := context.WithCancel(context.Background())
  2017  		defer cancel()
  2018  		ch, err := handle.Stats(ctx, 1*time.Second)
  2019  		assert.NoError(t, err)
  2020  		select {
  2021  		case ru := <-ch:
  2022  			assert.NotNil(t, ru.ResourceUsage)
  2023  		case <-time.After(3 * time.Second):
  2024  			assert.Fail(t, "stats timeout")
  2025  		}
  2026  	}()
  2027  
  2028  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  2029  	require.NoError(t, err)
  2030  	select {
  2031  	case res := <-waitCh:
  2032  		if res.Successful() {
  2033  			t.Fatalf("should err: %v", res)
  2034  		}
  2035  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2036  		t.Fatalf("timeout")
  2037  	}
  2038  }
  2039  
  2040  func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) {
  2041  	testutil.DockerCompatible(t)
  2042  
  2043  	randfn := fmt.Sprintf("test-%d", rand.Int())
  2044  	hostfile := filepath.Join(hostpath, randfn)
  2045  	var containerPath string
  2046  	if runtime.GOOS == "windows" {
  2047  		containerPath = "C:\\data"
  2048  	} else {
  2049  		containerPath = "/mnt/vol"
  2050  	}
  2051  	containerFile := filepath.Join(containerPath, randfn)
  2052  
  2053  	taskCfg := newTaskConfig("", []string{"touch", containerFile})
  2054  	taskCfg.Volumes = []string{fmt.Sprintf("%s:%s", hostpath, containerPath)}
  2055  
  2056  	task := &drivers.TaskConfig{
  2057  		ID:        uuid.Generate(),
  2058  		Name:      "ls",
  2059  		AllocID:   uuid.Generate(),
  2060  		Env:       map[string]string{"VOL_PATH": containerPath},
  2061  		Resources: basicResources,
  2062  	}
  2063  	require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  2064  
  2065  	d := dockerDriverHarness(t, cfg)
  2066  	cleanup := d.MkAllocDir(task, true)
  2067  
  2068  	copyImage(t, task.TaskDir(), "busybox.tar")
  2069  
  2070  	return task, d, &taskCfg, hostfile, cleanup
  2071  }
  2072  
  2073  func TestDockerDriver_VolumesDisabled(t *testing.T) {
  2074  	if !tu.IsCI() {
  2075  		t.Parallel()
  2076  	}
  2077  	testutil.DockerCompatible(t)
  2078  
  2079  	cfg := map[string]interface{}{
  2080  		"volumes": map[string]interface{}{
  2081  			"enabled": false,
  2082  		},
  2083  		"gc": map[string]interface{}{
  2084  			"image": false,
  2085  		},
  2086  	}
  2087  
  2088  	{
  2089  		tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesdisabled")
  2090  		if err != nil {
  2091  			t.Fatalf("error creating temporary dir: %v", err)
  2092  		}
  2093  
  2094  		task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
  2095  		defer cleanup()
  2096  
  2097  		_, _, err = driver.StartTask(task)
  2098  		defer driver.DestroyTask(task.ID, true)
  2099  		if err == nil {
  2100  			require.Fail(t, "Started driver successfully when volumes should have been disabled.")
  2101  		}
  2102  	}
  2103  
  2104  	// Relative paths should still be allowed
  2105  	{
  2106  		task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".")
  2107  		defer cleanup()
  2108  
  2109  		_, _, err := driver.StartTask(task)
  2110  		require.NoError(t, err)
  2111  		defer driver.DestroyTask(task.ID, true)
  2112  
  2113  		waitCh, err := driver.WaitTask(context.Background(), task.ID)
  2114  		require.NoError(t, err)
  2115  		select {
  2116  		case res := <-waitCh:
  2117  			if !res.Successful() {
  2118  				t.Fatalf("unexpected err: %v", res)
  2119  			}
  2120  		case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2121  			t.Fatalf("timeout")
  2122  		}
  2123  
  2124  		if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil {
  2125  			t.Fatalf("unexpected error reading %s: %v", fn, err)
  2126  		}
  2127  	}
  2128  
  2129  	// Volume Drivers should be rejected (error)
  2130  	{
  2131  		task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol")
  2132  		defer cleanup()
  2133  
  2134  		taskCfg.VolumeDriver = "flocker"
  2135  		require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  2136  
  2137  		_, _, err := driver.StartTask(task)
  2138  		defer driver.DestroyTask(task.ID, true)
  2139  		if err == nil {
  2140  			require.Fail(t, "Started driver successfully when volume drivers should have been disabled.")
  2141  		}
  2142  	}
  2143  }
  2144  
  2145  func TestDockerDriver_VolumesEnabled(t *testing.T) {
  2146  	if !tu.IsCI() {
  2147  		t.Parallel()
  2148  	}
  2149  	testutil.DockerCompatible(t)
  2150  
  2151  	cfg := map[string]interface{}{
  2152  		"volumes": map[string]interface{}{
  2153  			"enabled": true,
  2154  		},
  2155  		"gc": map[string]interface{}{
  2156  			"image": false,
  2157  		},
  2158  	}
  2159  
  2160  	tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled")
  2161  	require.NoError(t, err)
  2162  
  2163  	// Evaluate symlinks so it works on MacOS
  2164  	tmpvol, err = filepath.EvalSymlinks(tmpvol)
  2165  	require.NoError(t, err)
  2166  
  2167  	task, driver, _, hostpath, cleanup := setupDockerVolumes(t, cfg, tmpvol)
  2168  	defer cleanup()
  2169  
  2170  	_, _, err = driver.StartTask(task)
  2171  	require.NoError(t, err)
  2172  	defer driver.DestroyTask(task.ID, true)
  2173  
  2174  	waitCh, err := driver.WaitTask(context.Background(), task.ID)
  2175  	require.NoError(t, err)
  2176  	select {
  2177  	case res := <-waitCh:
  2178  		if !res.Successful() {
  2179  			t.Fatalf("unexpected err: %v", res)
  2180  		}
  2181  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2182  		t.Fatalf("timeout")
  2183  	}
  2184  
  2185  	if _, err := ioutil.ReadFile(hostpath); err != nil {
  2186  		t.Fatalf("unexpected error reading %s: %v", hostpath, err)
  2187  	}
  2188  }
  2189  
  2190  func TestDockerDriver_Mounts(t *testing.T) {
  2191  	if !tu.IsCI() {
  2192  		t.Parallel()
  2193  	}
  2194  	testutil.DockerCompatible(t)
  2195  
  2196  	goodMount := DockerMount{
  2197  		Target: "/nomad",
  2198  		VolumeOptions: DockerVolumeOptions{
  2199  			Labels: map[string]string{"foo": "bar"},
  2200  			DriverConfig: DockerVolumeDriverConfig{
  2201  				Name: "local",
  2202  			},
  2203  		},
  2204  		ReadOnly: true,
  2205  		Source:   "test",
  2206  	}
  2207  
  2208  	if runtime.GOOS == "windows" {
  2209  		goodMount.Target = "C:\\nomad"
  2210  	}
  2211  
  2212  	cases := []struct {
  2213  		Name   string
  2214  		Mounts []DockerMount
  2215  		Error  string
  2216  	}{
  2217  		{
  2218  			Name:   "good-one",
  2219  			Error:  "",
  2220  			Mounts: []DockerMount{goodMount},
  2221  		},
  2222  		{
  2223  			Name:   "duplicate",
  2224  			Error:  "Duplicate mount point",
  2225  			Mounts: []DockerMount{goodMount, goodMount, goodMount},
  2226  		},
  2227  	}
  2228  
  2229  	for _, c := range cases {
  2230  		t.Run(c.Name, func(t *testing.T) {
  2231  			d := dockerDriverHarness(t, nil)
  2232  			driver := d.Impl().(*Driver)
  2233  			driver.config.Volumes.Enabled = true
  2234  
  2235  			// Build the task
  2236  			task, cfg, ports := dockerTask(t)
  2237  			defer freeport.Return(ports)
  2238  			cfg.Command = "sleep"
  2239  			cfg.Args = []string{"10000"}
  2240  			cfg.Mounts = c.Mounts
  2241  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2242  			cleanup := d.MkAllocDir(task, true)
  2243  			defer cleanup()
  2244  
  2245  			copyImage(t, task.TaskDir(), "busybox.tar")
  2246  
  2247  			_, _, err := d.StartTask(task)
  2248  			defer d.DestroyTask(task.ID, true)
  2249  			if err == nil && c.Error != "" {
  2250  				t.Fatalf("expected error: %v", c.Error)
  2251  			} else if err != nil {
  2252  				if c.Error == "" {
  2253  					t.Fatalf("unexpected error in prestart: %v", err)
  2254  				} else if !strings.Contains(err.Error(), c.Error) {
  2255  					t.Fatalf("expected error %q; got %v", c.Error, err)
  2256  				}
  2257  			}
  2258  		})
  2259  	}
  2260  }
  2261  
  2262  func TestDockerDriver_AuthConfiguration(t *testing.T) {
  2263  	if !tu.IsCI() {
  2264  		t.Parallel()
  2265  	}
  2266  	testutil.DockerCompatible(t)
  2267  
  2268  	path := "./test-resources/docker/auth.json"
  2269  	cases := []struct {
  2270  		Repo       string
  2271  		AuthConfig *docker.AuthConfiguration
  2272  	}{
  2273  		{
  2274  			Repo:       "lolwhat.com/what:1337",
  2275  			AuthConfig: nil,
  2276  		},
  2277  		{
  2278  			Repo: "redis:3.2",
  2279  			AuthConfig: &docker.AuthConfiguration{
  2280  				Username:      "test",
  2281  				Password:      "1234",
  2282  				Email:         "",
  2283  				ServerAddress: "https://index.docker.io/v1/",
  2284  			},
  2285  		},
  2286  		{
  2287  			Repo: "quay.io/redis:3.2",
  2288  			AuthConfig: &docker.AuthConfiguration{
  2289  				Username:      "test",
  2290  				Password:      "5678",
  2291  				Email:         "",
  2292  				ServerAddress: "quay.io",
  2293  			},
  2294  		},
  2295  		{
  2296  			Repo: "other.io/redis:3.2",
  2297  			AuthConfig: &docker.AuthConfiguration{
  2298  				Username:      "test",
  2299  				Password:      "abcd",
  2300  				Email:         "",
  2301  				ServerAddress: "https://other.io/v1/",
  2302  			},
  2303  		},
  2304  	}
  2305  
  2306  	for _, c := range cases {
  2307  		act, err := authFromDockerConfig(path)(c.Repo)
  2308  		require.NoError(t, err)
  2309  		require.Exactly(t, c.AuthConfig, act)
  2310  	}
  2311  }
  2312  
  2313  func TestDockerDriver_AuthFromTaskConfig(t *testing.T) {
  2314  	if !tu.IsCI() {
  2315  		t.Parallel()
  2316  	}
  2317  
  2318  	cases := []struct {
  2319  		Auth       DockerAuth
  2320  		AuthConfig *docker.AuthConfiguration
  2321  		Desc       string
  2322  	}{
  2323  		{
  2324  			Auth:       DockerAuth{},
  2325  			AuthConfig: nil,
  2326  			Desc:       "Empty Config",
  2327  		},
  2328  		{
  2329  			Auth: DockerAuth{
  2330  				Username:   "foo",
  2331  				Password:   "bar",
  2332  				Email:      "foo@bar.com",
  2333  				ServerAddr: "www.foobar.com",
  2334  			},
  2335  			AuthConfig: &docker.AuthConfiguration{
  2336  				Username:      "foo",
  2337  				Password:      "bar",
  2338  				Email:         "foo@bar.com",
  2339  				ServerAddress: "www.foobar.com",
  2340  			},
  2341  			Desc: "All fields set",
  2342  		},
  2343  		{
  2344  			Auth: DockerAuth{
  2345  				Username:   "foo",
  2346  				Password:   "bar",
  2347  				ServerAddr: "www.foobar.com",
  2348  			},
  2349  			AuthConfig: &docker.AuthConfiguration{
  2350  				Username:      "foo",
  2351  				Password:      "bar",
  2352  				ServerAddress: "www.foobar.com",
  2353  			},
  2354  			Desc: "Email not set",
  2355  		},
  2356  	}
  2357  
  2358  	for _, c := range cases {
  2359  		t.Run(c.Desc, func(t *testing.T) {
  2360  			act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test")
  2361  			require.NoError(t, err)
  2362  			require.Exactly(t, c.AuthConfig, act)
  2363  		})
  2364  	}
  2365  }
  2366  
  2367  func TestDockerDriver_OOMKilled(t *testing.T) {
  2368  	if !tu.IsCI() {
  2369  		t.Parallel()
  2370  	}
  2371  	testutil.DockerCompatible(t)
  2372  
  2373  	if runtime.GOOS == "windows" {
  2374  		t.Skip("Windows does not support OOM Killer")
  2375  	}
  2376  
  2377  	taskCfg := newTaskConfig("", []string{"sh", "-c", `sleep 2 && x=a && while true; do x="$x$x"; done`})
  2378  	task := &drivers.TaskConfig{
  2379  		ID:        uuid.Generate(),
  2380  		Name:      "oom-killed",
  2381  		AllocID:   uuid.Generate(),
  2382  		Resources: basicResources,
  2383  	}
  2384  	task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024
  2385  	task.Resources.NomadResources.Memory.MemoryMB = 10
  2386  
  2387  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
  2388  
  2389  	d := dockerDriverHarness(t, nil)
  2390  	cleanup := d.MkAllocDir(task, true)
  2391  	defer cleanup()
  2392  	copyImage(t, task.TaskDir(), "busybox.tar")
  2393  
  2394  	_, _, err := d.StartTask(task)
  2395  	require.NoError(t, err)
  2396  
  2397  	defer d.DestroyTask(task.ID, true)
  2398  
  2399  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  2400  	require.NoError(t, err)
  2401  	select {
  2402  	case res := <-waitCh:
  2403  		if res.Successful() {
  2404  			t.Fatalf("expected error, but container exited successful")
  2405  		}
  2406  
  2407  		if !res.OOMKilled {
  2408  			t.Fatalf("not killed by OOM killer: %s", res.Err)
  2409  		}
  2410  
  2411  		t.Logf("Successfully killed by OOM killer")
  2412  
  2413  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  2414  		t.Fatalf("timeout")
  2415  	}
  2416  }
  2417  
  2418  func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
  2419  	if !tu.IsCI() {
  2420  		t.Parallel()
  2421  	}
  2422  	testutil.DockerCompatible(t)
  2423  
  2424  	brokenConfigs := []DockerDevice{
  2425  		{
  2426  			HostPath: "",
  2427  		},
  2428  		{
  2429  			HostPath:          "/dev/sda1",
  2430  			CgroupPermissions: "rxb",
  2431  		},
  2432  	}
  2433  
  2434  	testCases := []struct {
  2435  		deviceConfig []DockerDevice
  2436  		err          error
  2437  	}{
  2438  		{brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
  2439  		{brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
  2440  	}
  2441  
  2442  	for _, tc := range testCases {
  2443  		task, cfg, ports := dockerTask(t)
  2444  		cfg.Devices = tc.deviceConfig
  2445  		require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2446  		d := dockerDriverHarness(t, nil)
  2447  		cleanup := d.MkAllocDir(task, true)
  2448  		copyImage(t, task.TaskDir(), "busybox.tar")
  2449  		defer cleanup()
  2450  
  2451  		_, _, err := d.StartTask(task)
  2452  		require.Error(t, err)
  2453  		require.Contains(t, err.Error(), tc.err.Error())
  2454  		freeport.Return(ports)
  2455  	}
  2456  }
  2457  
  2458  func TestDockerDriver_Device_Success(t *testing.T) {
  2459  	if !tu.IsCI() {
  2460  		t.Parallel()
  2461  	}
  2462  	testutil.DockerCompatible(t)
  2463  
  2464  	if runtime.GOOS != "linux" {
  2465  		t.Skip("test device mounts only on linux")
  2466  	}
  2467  
  2468  	hostPath := "/dev/random"
  2469  	containerPath := "/dev/myrandom"
  2470  	perms := "rwm"
  2471  
  2472  	expectedDevice := docker.Device{
  2473  		PathOnHost:        hostPath,
  2474  		PathInContainer:   containerPath,
  2475  		CgroupPermissions: perms,
  2476  	}
  2477  	config := DockerDevice{
  2478  		HostPath:      hostPath,
  2479  		ContainerPath: containerPath,
  2480  	}
  2481  
  2482  	task, cfg, ports := dockerTask(t)
  2483  	defer freeport.Return(ports)
  2484  	cfg.Devices = []DockerDevice{config}
  2485  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2486  
  2487  	client, driver, handle, cleanup := dockerSetup(t, task, nil)
  2488  	defer cleanup()
  2489  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2490  
  2491  	container, err := client.InspectContainer(handle.containerID)
  2492  	require.NoError(t, err)
  2493  
  2494  	require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
  2495  	require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
  2496  }
  2497  
  2498  func TestDockerDriver_Entrypoint(t *testing.T) {
  2499  	if !tu.IsCI() {
  2500  		t.Parallel()
  2501  	}
  2502  	testutil.DockerCompatible(t)
  2503  
  2504  	entrypoint := []string{"sh", "-c"}
  2505  	task, cfg, ports := dockerTask(t)
  2506  	defer freeport.Return(ports)
  2507  	cfg.Entrypoint = entrypoint
  2508  	cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
  2509  	cfg.Args = []string{}
  2510  
  2511  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2512  
  2513  	client, driver, handle, cleanup := dockerSetup(t, task, nil)
  2514  	defer cleanup()
  2515  
  2516  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2517  
  2518  	container, err := client.InspectContainer(handle.containerID)
  2519  	require.NoError(t, err)
  2520  
  2521  	require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
  2522  	require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
  2523  }
  2524  
  2525  func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
  2526  	if !tu.IsCI() {
  2527  		t.Parallel()
  2528  	}
  2529  	testutil.DockerCompatible(t)
  2530  
  2531  	if runtime.GOOS == "windows" {
  2532  		t.Skip("Windows Docker does not support root filesystem in read-only mode")
  2533  	}
  2534  
  2535  	task, cfg, ports := dockerTask(t)
  2536  	defer freeport.Return(ports)
  2537  	cfg.ReadonlyRootfs = true
  2538  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2539  
  2540  	client, driver, handle, cleanup := dockerSetup(t, task, nil)
  2541  	defer cleanup()
  2542  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2543  
  2544  	container, err := client.InspectContainer(handle.containerID)
  2545  	require.NoError(t, err)
  2546  
  2547  	require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
  2548  }
  2549  
  2550  // fakeDockerClient can be used in places that accept an interface for the
  2551  // docker client such as createContainer.
  2552  type fakeDockerClient struct{}
  2553  
  2554  func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
  2555  	return nil, fmt.Errorf("volume is attached on another node")
  2556  }
  2557  func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
  2558  	panic("not implemented")
  2559  }
  2560  func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) {
  2561  	panic("not implemented")
  2562  }
  2563  func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error {
  2564  	panic("not implemented")
  2565  }
  2566  
  2567  // TestDockerDriver_VolumeError asserts volume related errors when creating a
  2568  // container are recoverable.
  2569  func TestDockerDriver_VolumeError(t *testing.T) {
  2570  	if !tu.IsCI() {
  2571  		t.Parallel()
  2572  	}
  2573  
  2574  	// setup
  2575  	_, cfg, ports := dockerTask(t)
  2576  	defer freeport.Return(ports)
  2577  	driver := dockerDriverHarness(t, nil)
  2578  
  2579  	// assert volume error is recoverable
  2580  	_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image)
  2581  	require.True(t, structs.IsRecoverable(err))
  2582  }
  2583  
  2584  func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
  2585  	if !tu.IsCI() {
  2586  		t.Parallel()
  2587  	}
  2588  	testutil.DockerCompatible(t)
  2589  
  2590  	expectedPrefix := "2001:db8:1::242:ac11"
  2591  	expectedAdvertise := true
  2592  	task, cfg, ports := dockerTask(t)
  2593  	defer freeport.Return(ports)
  2594  	cfg.AdvertiseIPv6Addr = expectedAdvertise
  2595  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2596  
  2597  	client := newTestDockerClient(t)
  2598  
  2599  	// Make sure IPv6 is enabled
  2600  	net, err := client.NetworkInfo("bridge")
  2601  	if err != nil {
  2602  		t.Skip("error retrieving bridge network information, skipping")
  2603  	}
  2604  	if net == nil || !net.EnableIPv6 {
  2605  		t.Skip("IPv6 not enabled on bridge network, skipping")
  2606  	}
  2607  
  2608  	driver := dockerDriverHarness(t, nil)
  2609  	cleanup := driver.MkAllocDir(task, true)
  2610  	copyImage(t, task.TaskDir(), "busybox.tar")
  2611  	defer cleanup()
  2612  
  2613  	_, network, err := driver.StartTask(task)
  2614  	defer driver.DestroyTask(task.ID, true)
  2615  	require.NoError(t, err)
  2616  
  2617  	require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
  2618  
  2619  	if !strings.HasPrefix(network.IP, expectedPrefix) {
  2620  		t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
  2621  	}
  2622  
  2623  	handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
  2624  	require.True(t, ok)
  2625  
  2626  	require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second))
  2627  
  2628  	container, err := client.InspectContainer(handle.containerID)
  2629  	require.NoError(t, err)
  2630  
  2631  	if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
  2632  		t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
  2633  	}
  2634  }
  2635  
  2636  func TestParseDockerImage(t *testing.T) {
  2637  	tests := []struct {
  2638  		Image string
  2639  		Repo  string
  2640  		Tag   string
  2641  	}{
  2642  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2643  		{"library/hello-world", "library/hello-world", "latest"},
  2644  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2645  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2646  	}
  2647  	for _, test := range tests {
  2648  		t.Run(test.Image, func(t *testing.T) {
  2649  			repo, tag := parseDockerImage(test.Image)
  2650  			require.Equal(t, test.Repo, repo)
  2651  			require.Equal(t, test.Tag, tag)
  2652  		})
  2653  	}
  2654  }
  2655  
  2656  func TestDockerImageRef(t *testing.T) {
  2657  	tests := []struct {
  2658  		Image string
  2659  		Repo  string
  2660  		Tag   string
  2661  	}{
  2662  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2663  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2664  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2665  	}
  2666  	for _, test := range tests {
  2667  		t.Run(test.Image, func(t *testing.T) {
  2668  			image := dockerImageRef(test.Repo, test.Tag)
  2669  			require.Equal(t, test.Image, image)
  2670  		})
  2671  	}
  2672  }
  2673  
  2674  func waitForExist(t *testing.T, client *docker.Client, containerID string) {
  2675  	tu.WaitForResult(func() (bool, error) {
  2676  		container, err := client.InspectContainer(containerID)
  2677  		if err != nil {
  2678  			if _, ok := err.(*docker.NoSuchContainer); !ok {
  2679  				return false, err
  2680  			}
  2681  		}
  2682  
  2683  		return container != nil, nil
  2684  	}, func(err error) {
  2685  		require.NoError(t, err)
  2686  	})
  2687  }
  2688  
  2689  // TestDockerDriver_CreationIdempotent asserts that createContainer and
  2690  // and startContainers functions are idempotent, as we have some retry
  2691  // logic there without ensureing we delete/destroy containers
  2692  func TestDockerDriver_CreationIdempotent(t *testing.T) {
  2693  	if !tu.IsCI() {
  2694  		t.Parallel()
  2695  	}
  2696  	testutil.DockerCompatible(t)
  2697  
  2698  	task, cfg, ports := dockerTask(t)
  2699  	defer freeport.Return(ports)
  2700  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2701  
  2702  	client := newTestDockerClient(t)
  2703  	driver := dockerDriverHarness(t, nil)
  2704  	cleanup := driver.MkAllocDir(task, true)
  2705  	defer cleanup()
  2706  
  2707  	copyImage(t, task.TaskDir(), "busybox.tar")
  2708  
  2709  	d, ok := driver.Impl().(*Driver)
  2710  	require.True(t, ok)
  2711  
  2712  	_, err := d.createImage(task, cfg, client)
  2713  	require.NoError(t, err)
  2714  
  2715  	containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
  2716  	require.NoError(t, err)
  2717  
  2718  	c, err := d.createContainer(client, containerCfg, cfg.Image)
  2719  	require.NoError(t, err)
  2720  	defer client.RemoveContainer(docker.RemoveContainerOptions{
  2721  		ID:    c.ID,
  2722  		Force: true,
  2723  	})
  2724  
  2725  	// calling createContainer again creates a new one and remove old one
  2726  	c2, err := d.createContainer(client, containerCfg, cfg.Image)
  2727  	require.NoError(t, err)
  2728  	defer client.RemoveContainer(docker.RemoveContainerOptions{
  2729  		ID:    c2.ID,
  2730  		Force: true,
  2731  	})
  2732  
  2733  	require.NotEqual(t, c.ID, c2.ID)
  2734  	// old container was destroyed
  2735  	{
  2736  		_, err := client.InspectContainer(c.ID)
  2737  		require.Error(t, err)
  2738  		require.Contains(t, err.Error(), NoSuchContainerError)
  2739  	}
  2740  
  2741  	// now start container twice
  2742  	require.NoError(t, d.startContainer(c2))
  2743  	require.NoError(t, d.startContainer(c2))
  2744  
  2745  	tu.WaitForResult(func() (bool, error) {
  2746  		c, err := client.InspectContainer(c2.ID)
  2747  		if err != nil {
  2748  			return false, fmt.Errorf("failed to get container status: %v", err)
  2749  		}
  2750  
  2751  		if !c.State.Running {
  2752  			return false, fmt.Errorf("container is not running but %v", c.State)
  2753  		}
  2754  
  2755  		return true, nil
  2756  	}, func(err error) {
  2757  		require.NoError(t, err)
  2758  	})
  2759  }
  2760  
  2761  // TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default
  2762  // CPU quota and period are set when cpu_hard_limit = true.
  2763  func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
  2764  	t.Parallel()
  2765  
  2766  	task, _, ports := dockerTask(t)
  2767  	defer freeport.Return(ports)
  2768  
  2769  	dh := dockerDriverHarness(t, nil)
  2770  	driver := dh.Impl().(*Driver)
  2771  	schema, _ := driver.TaskConfigSchema()
  2772  	spec, _ := hclspecutils.Convert(schema)
  2773  
  2774  	val, _, _ := hclutils.ParseHclInterface(map[string]interface{}{
  2775  		"image":          "foo/bar",
  2776  		"cpu_hard_limit": true,
  2777  	}, spec, nil)
  2778  
  2779  	require.NoError(t, task.EncodeDriverConfig(val))
  2780  	cfg := &TaskConfig{}
  2781  	require.NoError(t, task.DecodeDriverConfig(cfg))
  2782  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  2783  	require.NoError(t, err)
  2784  
  2785  	require.NotZero(t, c.HostConfig.CPUQuota)
  2786  	require.NotZero(t, c.HostConfig.CPUPeriod)
  2787  }
  2788  
  2789  func TestDockerDriver_memoryLimits(t *testing.T) {
  2790  	t.Parallel()
  2791  
  2792  	t.Run("driver hard limit not set", func(t *testing.T) {
  2793  		memory, memoryReservation := new(Driver).memoryLimits(0, 256*1024*1024)
  2794  		require.Equal(t, int64(256*1024*1024), memory)
  2795  		require.Equal(t, int64(0), memoryReservation)
  2796  	})
  2797  
  2798  	t.Run("driver hard limit is set", func(t *testing.T) {
  2799  		memory, memoryReservation := new(Driver).memoryLimits(512, 256*1024*1024)
  2800  		require.Equal(t, int64(512*1024*1024), memory)
  2801  		require.Equal(t, int64(256*1024*1024), memoryReservation)
  2802  	})
  2803  }
  2804  
  2805  func TestDockerDriver_parseSignal(t *testing.T) {
  2806  	t.Parallel()
  2807  
  2808  	d := new(Driver)
  2809  
  2810  	t.Run("default", func(t *testing.T) {
  2811  		s, err := d.parseSignal(runtime.GOOS, "")
  2812  		require.NoError(t, err)
  2813  		require.Equal(t, syscall.SIGTERM, s)
  2814  	})
  2815  
  2816  	t.Run("set", func(t *testing.T) {
  2817  		s, err := d.parseSignal(runtime.GOOS, "SIGHUP")
  2818  		require.NoError(t, err)
  2819  		require.Equal(t, syscall.SIGHUP, s)
  2820  	})
  2821  
  2822  	t.Run("windows conversion", func(t *testing.T) {
  2823  		s, err := d.parseSignal("windows", "SIGINT")
  2824  		require.NoError(t, err)
  2825  		require.Equal(t, syscall.SIGTERM, s)
  2826  	})
  2827  
  2828  	t.Run("not a signal", func(t *testing.T) {
  2829  		_, err := d.parseSignal(runtime.GOOS, "SIGDOESNOTEXIST")
  2830  		require.Error(t, err)
  2831  	})
  2832  }