github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/drivers/docker/driver_test.go (about)

     1  package docker
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io/ioutil"
     7  	"math/rand"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"runtime/debug"
    12  	"sort"
    13  	"strings"
    14  	"syscall"
    15  	"testing"
    16  	"time"
    17  
    18  	docker "github.com/fsouza/go-dockerclient"
    19  	hclog "github.com/hashicorp/go-hclog"
    20  	"github.com/hashicorp/nomad/ci"
    21  	"github.com/hashicorp/nomad/client/taskenv"
    22  	"github.com/hashicorp/nomad/client/testutil"
    23  	"github.com/hashicorp/nomad/helper/freeport"
    24  	"github.com/hashicorp/nomad/helper/pluginutils/hclspecutils"
    25  	"github.com/hashicorp/nomad/helper/pluginutils/hclutils"
    26  	"github.com/hashicorp/nomad/helper/pluginutils/loader"
    27  	"github.com/hashicorp/nomad/helper/testlog"
    28  	"github.com/hashicorp/nomad/helper/uuid"
    29  	"github.com/hashicorp/nomad/nomad/structs"
    30  	"github.com/hashicorp/nomad/plugins/base"
    31  	"github.com/hashicorp/nomad/plugins/drivers"
    32  	dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils"
    33  	tu "github.com/hashicorp/nomad/testutil"
    34  	"github.com/shoenig/test/must"
    35  	"github.com/stretchr/testify/assert"
    36  	"github.com/stretchr/testify/require"
    37  )
    38  
    39  var (
    40  	basicResources = &drivers.Resources{
    41  		NomadResources: &structs.AllocatedTaskResources{
    42  			Memory: structs.AllocatedMemoryResources{
    43  				MemoryMB: 256,
    44  			},
    45  			Cpu: structs.AllocatedCpuResources{
    46  				CpuShares: 250,
    47  			},
    48  		},
    49  		LinuxResources: &drivers.LinuxResources{
    50  			CPUShares:        512,
    51  			MemoryLimitBytes: 256 * 1024 * 1024,
    52  		},
    53  	}
    54  )
    55  
    56  func dockerIsRemote(t *testing.T) bool {
    57  	client, err := docker.NewClientFromEnv()
    58  	if err != nil {
    59  		return false
    60  	}
    61  
    62  	// Technically this could be a local tcp socket but for testing purposes
    63  	// we'll just assume that tcp is only used for remote connections.
    64  	if client.Endpoint()[0:3] == "tcp" {
    65  		return true
    66  	}
    67  	return false
    68  }
    69  
    70  var (
    71  	// busyboxLongRunningCmd is a busybox command that runs indefinitely, and
    72  	// ideally responds to SIGINT/SIGTERM.  Sadly, busybox:1.29.3 /bin/sleep doesn't.
    73  	busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"}
    74  )
    75  
    76  // Returns a task with a reserved and dynamic port. The ports are returned
    77  // respectively, and should be reclaimed with freeport.Return at the end of a test.
    78  func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) {
    79  	ports := freeport.MustTake(2)
    80  	dockerReserved := ports[0]
    81  	dockerDynamic := ports[1]
    82  
    83  	cfg := newTaskConfig("", busyboxLongRunningCmd)
    84  	task := &drivers.TaskConfig{
    85  		ID:      uuid.Generate(),
    86  		Name:    "redis-demo",
    87  		AllocID: uuid.Generate(),
    88  		Env: map[string]string{
    89  			"test": t.Name(),
    90  		},
    91  		DeviceEnv: make(map[string]string),
    92  		Resources: &drivers.Resources{
    93  			NomadResources: &structs.AllocatedTaskResources{
    94  				Memory: structs.AllocatedMemoryResources{
    95  					MemoryMB: 256,
    96  				},
    97  				Cpu: structs.AllocatedCpuResources{
    98  					CpuShares: 512,
    99  				},
   100  				Networks: []*structs.NetworkResource{
   101  					{
   102  						IP:            "127.0.0.1",
   103  						ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}},
   104  						DynamicPorts:  []structs.Port{{Label: "REDIS", Value: dockerDynamic}},
   105  					},
   106  				},
   107  			},
   108  			LinuxResources: &drivers.LinuxResources{
   109  				CPUShares:        512,
   110  				MemoryLimitBytes: 256 * 1024 * 1024,
   111  				PercentTicks:     float64(512) / float64(4096),
   112  			},
   113  		},
   114  	}
   115  
   116  	require.NoError(t, task.EncodeConcreteDriverConfig(&cfg))
   117  
   118  	return task, &cfg, ports
   119  }
   120  
   121  // dockerSetup does all of the basic setup you need to get a running docker
   122  // process up and running for testing. Use like:
   123  //
   124  //	task := taskTemplate()
   125  //	// do custom task configuration
   126  //	client, handle, cleanup := dockerSetup(t, task, nil)
   127  //	defer cleanup()
   128  //	// do test stuff
   129  //
   130  // If there is a problem during setup this function will abort or skip the test
   131  // and indicate the reason.
   132  func dockerSetup(t *testing.T, task *drivers.TaskConfig, driverCfg map[string]interface{}) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) {
   133  	client := newTestDockerClient(t)
   134  	driver := dockerDriverHarness(t, driverCfg)
   135  	cleanup := driver.MkAllocDir(task, true)
   136  
   137  	copyImage(t, task.TaskDir(), "busybox.tar")
   138  	_, _, err := driver.StartTask(task)
   139  	require.NoError(t, err)
   140  
   141  	dockerDriver, ok := driver.Impl().(*Driver)
   142  	require.True(t, ok)
   143  	handle, ok := dockerDriver.tasks.Get(task.ID)
   144  	require.True(t, ok)
   145  
   146  	return client, driver, handle, func() {
   147  		driver.DestroyTask(task.ID, true)
   148  		cleanup()
   149  	}
   150  }
   151  
   152  // cleanSlate removes the specified docker image, including potentially stopping/removing any
   153  // containers based on that image. This is used to decouple tests that would be coupled
   154  // by using the same container image.
   155  func cleanSlate(client *docker.Client, imageID string) {
   156  	if img, _ := client.InspectImage(imageID); img == nil {
   157  		return
   158  	}
   159  	containers, _ := client.ListContainers(docker.ListContainersOptions{
   160  		All: true,
   161  		Filters: map[string][]string{
   162  			"ancestor": {imageID},
   163  		},
   164  	})
   165  	for _, c := range containers {
   166  		client.RemoveContainer(docker.RemoveContainerOptions{
   167  			Force: true,
   168  			ID:    c.ID,
   169  		})
   170  	}
   171  	client.RemoveImageExtended(imageID, docker.RemoveImageOptions{
   172  		Force: true,
   173  	})
   174  	return
   175  }
   176  
   177  // dockerDriverHarness wires up everything needed to launch a task with a docker driver.
   178  // A driver plugin interface and cleanup function is returned
   179  func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness {
   180  	logger := testlog.HCLogger(t)
   181  	ctx, cancel := context.WithCancel(context.Background())
   182  	t.Cleanup(func() { cancel() })
   183  	harness := dtestutil.NewDriverHarness(t, NewDockerDriver(ctx, logger))
   184  	if cfg == nil {
   185  		cfg = map[string]interface{}{
   186  			"gc": map[string]interface{}{
   187  				"image":       false,
   188  				"image_delay": "1s",
   189  			},
   190  		}
   191  	}
   192  	plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{
   193  		Logger:            logger,
   194  		PluginDir:         "./plugins",
   195  		SupportedVersions: loader.AgentSupportedApiVersions,
   196  		InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{
   197  			PluginID: {
   198  				Config: cfg,
   199  				Factory: func(context.Context, hclog.Logger) interface{} {
   200  					return harness
   201  				},
   202  			},
   203  		},
   204  	})
   205  
   206  	require.NoError(t, err)
   207  	instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger)
   208  	require.NoError(t, err)
   209  	driver, ok := instance.Plugin().(*dtestutil.DriverHarness)
   210  	if !ok {
   211  		t.Fatal("plugin instance is not a driver... wat?")
   212  	}
   213  
   214  	return driver
   215  }
   216  
   217  func newTestDockerClient(t *testing.T) *docker.Client {
   218  	t.Helper()
   219  	testutil.DockerCompatible(t)
   220  
   221  	client, err := docker.NewClientFromEnv()
   222  	if err != nil {
   223  		t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack())
   224  	}
   225  	return client
   226  }
   227  
   228  // Following tests have been removed from this file.
   229  // [TestDockerDriver_Fingerprint, TestDockerDriver_Fingerprint_Bridge, TestDockerDriver_Check_DockerHealthStatus]
   230  // If you want to checkout/revert those tests, please check commit: 41715b1860778aa80513391bd64abd721d768ab0
   231  
   232  func TestDockerDriver_Start_Wait(t *testing.T) {
   233  	ci.Parallel(t)
   234  	testutil.DockerCompatible(t)
   235  
   236  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   237  	task := &drivers.TaskConfig{
   238  		ID:        uuid.Generate(),
   239  		Name:      "nc-demo",
   240  		AllocID:   uuid.Generate(),
   241  		Resources: basicResources,
   242  	}
   243  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   244  
   245  	d := dockerDriverHarness(t, nil)
   246  	cleanup := d.MkAllocDir(task, true)
   247  	defer cleanup()
   248  	copyImage(t, task.TaskDir(), "busybox.tar")
   249  
   250  	_, _, err := d.StartTask(task)
   251  	require.NoError(t, err)
   252  
   253  	defer d.DestroyTask(task.ID, true)
   254  
   255  	// Attempt to wait
   256  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   257  	require.NoError(t, err)
   258  
   259  	select {
   260  	case <-waitCh:
   261  		t.Fatalf("wait channel should not have received an exit result")
   262  	case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second):
   263  	}
   264  }
   265  
   266  func TestDockerDriver_Start_WaitFinish(t *testing.T) {
   267  	ci.Parallel(t)
   268  	testutil.DockerCompatible(t)
   269  
   270  	taskCfg := newTaskConfig("", []string{"echo", "hello"})
   271  	task := &drivers.TaskConfig{
   272  		ID:        uuid.Generate(),
   273  		Name:      "nc-demo",
   274  		AllocID:   uuid.Generate(),
   275  		Resources: basicResources,
   276  	}
   277  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   278  
   279  	d := dockerDriverHarness(t, nil)
   280  	cleanup := d.MkAllocDir(task, true)
   281  	defer cleanup()
   282  	copyImage(t, task.TaskDir(), "busybox.tar")
   283  
   284  	_, _, err := d.StartTask(task)
   285  	require.NoError(t, err)
   286  
   287  	defer d.DestroyTask(task.ID, true)
   288  
   289  	// Attempt to wait
   290  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   291  	require.NoError(t, err)
   292  
   293  	select {
   294  	case res := <-waitCh:
   295  		if !res.Successful() {
   296  			require.Fail(t, "ExitResult should be successful: %v", res)
   297  		}
   298  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   299  		require.Fail(t, "timeout")
   300  	}
   301  }
   302  
   303  // TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a
   304  // stopped task container, remove it, and start a new container.
   305  //
   306  // See https://github.com/hashicorp/nomad/issues/3419
   307  func TestDockerDriver_Start_StoppedContainer(t *testing.T) {
   308  	ci.Parallel(t)
   309  	testutil.DockerCompatible(t)
   310  
   311  	taskCfg := newTaskConfig("", []string{"sleep", "9001"})
   312  	task := &drivers.TaskConfig{
   313  		ID:        uuid.Generate(),
   314  		Name:      "nc-demo",
   315  		AllocID:   uuid.Generate(),
   316  		Resources: basicResources,
   317  	}
   318  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   319  
   320  	d := dockerDriverHarness(t, nil)
   321  	cleanup := d.MkAllocDir(task, true)
   322  	defer cleanup()
   323  	copyImage(t, task.TaskDir(), "busybox.tar")
   324  
   325  	client := newTestDockerClient(t)
   326  
   327  	var imageID string
   328  	var err error
   329  
   330  	if runtime.GOOS != "windows" {
   331  		imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client)
   332  	} else {
   333  		image, lErr := client.InspectImage(taskCfg.Image)
   334  		err = lErr
   335  		if image != nil {
   336  			imageID = image.ID
   337  		}
   338  	}
   339  	require.NoError(t, err)
   340  	require.NotEmpty(t, imageID)
   341  
   342  	// Create a container of the same name but don't start it. This mimics
   343  	// the case of dockerd getting restarted and stopping containers while
   344  	// Nomad is watching them.
   345  	opts := docker.CreateContainerOptions{
   346  		Name: strings.Replace(task.ID, "/", "_", -1),
   347  		Config: &docker.Config{
   348  			Image: taskCfg.Image,
   349  			Cmd:   []string{"sleep", "9000"},
   350  			Env:   []string{fmt.Sprintf("test=%s", t.Name())},
   351  		},
   352  	}
   353  
   354  	if _, err := client.CreateContainer(opts); err != nil {
   355  		t.Fatalf("error creating initial container: %v", err)
   356  	}
   357  
   358  	_, _, err = d.StartTask(task)
   359  	defer d.DestroyTask(task.ID, true)
   360  	require.NoError(t, err)
   361  
   362  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   363  	require.NoError(t, d.DestroyTask(task.ID, true))
   364  }
   365  
   366  func TestDockerDriver_Start_LoadImage(t *testing.T) {
   367  	ci.Parallel(t)
   368  	testutil.DockerCompatible(t)
   369  
   370  	taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"})
   371  	task := &drivers.TaskConfig{
   372  		ID:        uuid.Generate(),
   373  		Name:      "busybox-demo",
   374  		AllocID:   uuid.Generate(),
   375  		Resources: basicResources,
   376  	}
   377  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   378  
   379  	d := dockerDriverHarness(t, nil)
   380  	cleanup := d.MkAllocDir(task, true)
   381  	defer cleanup()
   382  	copyImage(t, task.TaskDir(), "busybox.tar")
   383  
   384  	_, _, err := d.StartTask(task)
   385  	require.NoError(t, err)
   386  
   387  	defer d.DestroyTask(task.ID, true)
   388  
   389  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   390  	require.NoError(t, err)
   391  	select {
   392  	case res := <-waitCh:
   393  		if !res.Successful() {
   394  			require.Fail(t, "ExitResult should be successful: %v", res)
   395  		}
   396  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   397  		require.Fail(t, "timeout")
   398  	}
   399  
   400  	// Check that data was written to the shared alloc directory.
   401  	outputFile := filepath.Join(task.TaskDir().LocalDir, "output")
   402  	act, err := ioutil.ReadFile(outputFile)
   403  	if err != nil {
   404  		t.Fatalf("Couldn't read expected output: %v", err)
   405  	}
   406  
   407  	exp := "hello"
   408  	if strings.TrimSpace(string(act)) != exp {
   409  		t.Fatalf("Command outputted %v; want %v", act, exp)
   410  	}
   411  
   412  }
   413  
   414  // Tests that starting a task without an image fails
   415  func TestDockerDriver_Start_NoImage(t *testing.T) {
   416  	ci.Parallel(t)
   417  	testutil.DockerCompatible(t)
   418  
   419  	taskCfg := TaskConfig{
   420  		Command: "echo",
   421  		Args:    []string{"foo"},
   422  	}
   423  	task := &drivers.TaskConfig{
   424  		ID:        uuid.Generate(),
   425  		Name:      "echo",
   426  		AllocID:   uuid.Generate(),
   427  		Resources: basicResources,
   428  	}
   429  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   430  
   431  	d := dockerDriverHarness(t, nil)
   432  	cleanup := d.MkAllocDir(task, false)
   433  	defer cleanup()
   434  
   435  	_, _, err := d.StartTask(task)
   436  	require.Error(t, err)
   437  	require.Contains(t, err.Error(), "image name required")
   438  
   439  	d.DestroyTask(task.ID, true)
   440  }
   441  
   442  func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) {
   443  	ci.Parallel(t)
   444  	testutil.DockerCompatible(t)
   445  
   446  	taskCfg := TaskConfig{
   447  		Image:            "127.0.0.1:32121/foo", // bad path
   448  		ImagePullTimeout: "5m",
   449  		Command:          "echo",
   450  		Args: []string{
   451  			"hello",
   452  		},
   453  	}
   454  	task := &drivers.TaskConfig{
   455  		ID:        uuid.Generate(),
   456  		Name:      "busybox-demo",
   457  		AllocID:   uuid.Generate(),
   458  		Resources: basicResources,
   459  	}
   460  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   461  
   462  	d := dockerDriverHarness(t, nil)
   463  	cleanup := d.MkAllocDir(task, true)
   464  	defer cleanup()
   465  
   466  	_, _, err := d.StartTask(task)
   467  	require.Error(t, err)
   468  
   469  	defer d.DestroyTask(task.ID, true)
   470  
   471  	if rerr, ok := err.(*structs.RecoverableError); !ok {
   472  		t.Fatalf("want recoverable error: %+v", err)
   473  	} else if !rerr.IsRecoverable() {
   474  		t.Fatalf("error not recoverable: %+v", err)
   475  	}
   476  }
   477  
   478  func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) {
   479  	ci.Parallel(t)
   480  	// This test requires that the alloc dir be mounted into docker as a volume.
   481  	// Because this cannot happen when docker is run remotely, e.g. when running
   482  	// docker in a VM, we skip this when we detect Docker is being run remotely.
   483  	if !testutil.DockerIsConnected(t) || dockerIsRemote(t) {
   484  		t.Skip("Docker not connected")
   485  	}
   486  
   487  	exp := []byte{'w', 'i', 'n'}
   488  	file := "output.txt"
   489  
   490  	taskCfg := newTaskConfig("", []string{
   491  		"sh",
   492  		"-c",
   493  		fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`,
   494  			string(exp), taskenv.AllocDir, file),
   495  	})
   496  	task := &drivers.TaskConfig{
   497  		ID:        uuid.Generate(),
   498  		Name:      "busybox-demo",
   499  		AllocID:   uuid.Generate(),
   500  		Resources: basicResources,
   501  	}
   502  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   503  
   504  	d := dockerDriverHarness(t, nil)
   505  	cleanup := d.MkAllocDir(task, true)
   506  	defer cleanup()
   507  	copyImage(t, task.TaskDir(), "busybox.tar")
   508  
   509  	_, _, err := d.StartTask(task)
   510  	require.NoError(t, err)
   511  
   512  	defer d.DestroyTask(task.ID, true)
   513  
   514  	// Attempt to wait
   515  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   516  	require.NoError(t, err)
   517  
   518  	select {
   519  	case res := <-waitCh:
   520  		if !res.Successful() {
   521  			require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res))
   522  		}
   523  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   524  		require.Fail(t, "timeout")
   525  	}
   526  
   527  	// Check that data was written to the shared alloc directory.
   528  	outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file)
   529  	act, err := ioutil.ReadFile(outputFile)
   530  	if err != nil {
   531  		t.Fatalf("Couldn't read expected output: %v", err)
   532  	}
   533  
   534  	if !reflect.DeepEqual(act, exp) {
   535  		t.Fatalf("Command outputted %v; want %v", act, exp)
   536  	}
   537  }
   538  
   539  func TestDockerDriver_Start_Kill_Wait(t *testing.T) {
   540  	ci.Parallel(t)
   541  	testutil.DockerCompatible(t)
   542  
   543  	taskCfg := newTaskConfig("", busyboxLongRunningCmd)
   544  	task := &drivers.TaskConfig{
   545  		ID:        uuid.Generate(),
   546  		Name:      "busybox-demo",
   547  		AllocID:   uuid.Generate(),
   548  		Resources: basicResources,
   549  	}
   550  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   551  
   552  	d := dockerDriverHarness(t, nil)
   553  	cleanup := d.MkAllocDir(task, true)
   554  	defer cleanup()
   555  	copyImage(t, task.TaskDir(), "busybox.tar")
   556  
   557  	_, _, err := d.StartTask(task)
   558  	require.NoError(t, err)
   559  
   560  	defer d.DestroyTask(task.ID, true)
   561  
   562  	go func(t *testing.T) {
   563  		time.Sleep(100 * time.Millisecond)
   564  		signal := "SIGINT"
   565  		if runtime.GOOS == "windows" {
   566  			signal = "SIGKILL"
   567  		}
   568  		require.NoError(t, d.StopTask(task.ID, time.Second, signal))
   569  	}(t)
   570  
   571  	// Attempt to wait
   572  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   573  	require.NoError(t, err)
   574  
   575  	select {
   576  	case res := <-waitCh:
   577  		if res.Successful() {
   578  			require.Fail(t, "ExitResult should err: %v", res)
   579  		}
   580  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   581  		require.Fail(t, "timeout")
   582  	}
   583  }
   584  
   585  func TestDockerDriver_Start_KillTimeout(t *testing.T) {
   586  	ci.Parallel(t)
   587  	testutil.DockerCompatible(t)
   588  
   589  	if runtime.GOOS == "windows" {
   590  		t.Skip("Windows Docker does not support SIGUSR1")
   591  	}
   592  
   593  	timeout := 2 * time.Second
   594  	taskCfg := newTaskConfig("", []string{"sleep", "10"})
   595  	task := &drivers.TaskConfig{
   596  		ID:        uuid.Generate(),
   597  		Name:      "busybox-demo",
   598  		AllocID:   uuid.Generate(),
   599  		Resources: basicResources,
   600  	}
   601  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
   602  
   603  	d := dockerDriverHarness(t, nil)
   604  	cleanup := d.MkAllocDir(task, true)
   605  	defer cleanup()
   606  	copyImage(t, task.TaskDir(), "busybox.tar")
   607  
   608  	_, _, err := d.StartTask(task)
   609  	require.NoError(t, err)
   610  
   611  	defer d.DestroyTask(task.ID, true)
   612  
   613  	var killSent time.Time
   614  	go func() {
   615  		time.Sleep(100 * time.Millisecond)
   616  		killSent = time.Now()
   617  		require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1"))
   618  	}()
   619  
   620  	// Attempt to wait
   621  	waitCh, err := d.WaitTask(context.Background(), task.ID)
   622  	require.NoError(t, err)
   623  
   624  	var killed time.Time
   625  	select {
   626  	case <-waitCh:
   627  		killed = time.Now()
   628  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   629  		require.Fail(t, "timeout")
   630  	}
   631  
   632  	require.True(t, killed.Sub(killSent) > timeout)
   633  }
   634  
   635  func TestDockerDriver_StartN(t *testing.T) {
   636  	ci.Parallel(t)
   637  	if runtime.GOOS == "windows" {
   638  		t.Skip("Windows Docker does not support SIGINT")
   639  	}
   640  	testutil.DockerCompatible(t)
   641  	require := require.New(t)
   642  
   643  	task1, _, ports1 := dockerTask(t)
   644  	defer freeport.Return(ports1)
   645  
   646  	task2, _, ports2 := dockerTask(t)
   647  	defer freeport.Return(ports2)
   648  
   649  	task3, _, ports3 := dockerTask(t)
   650  	defer freeport.Return(ports3)
   651  
   652  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   653  
   654  	t.Logf("Starting %d tasks", len(taskList))
   655  
   656  	d := dockerDriverHarness(t, nil)
   657  	// Let's spin up a bunch of things
   658  	for _, task := range taskList {
   659  		cleanup := d.MkAllocDir(task, true)
   660  		defer cleanup()
   661  		copyImage(t, task.TaskDir(), "busybox.tar")
   662  		_, _, err := d.StartTask(task)
   663  		require.NoError(err)
   664  
   665  	}
   666  
   667  	defer d.DestroyTask(task3.ID, true)
   668  	defer d.DestroyTask(task2.ID, true)
   669  	defer d.DestroyTask(task1.ID, true)
   670  
   671  	t.Log("All tasks are started. Terminating...")
   672  	for _, task := range taskList {
   673  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   674  
   675  		// Attempt to wait
   676  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   677  		require.NoError(err)
   678  
   679  		select {
   680  		case <-waitCh:
   681  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   682  			require.Fail("timeout waiting on task")
   683  		}
   684  	}
   685  
   686  	t.Log("Test complete!")
   687  }
   688  
   689  func TestDockerDriver_StartNVersions(t *testing.T) {
   690  	ci.Parallel(t)
   691  	if runtime.GOOS == "windows" {
   692  		t.Skip("Skipped on windows, we don't have image variants available")
   693  	}
   694  	testutil.DockerCompatible(t)
   695  	require := require.New(t)
   696  
   697  	task1, cfg1, ports1 := dockerTask(t)
   698  	defer freeport.Return(ports1)
   699  	tcfg1 := newTaskConfig("", []string{"echo", "hello"})
   700  	cfg1.Image = tcfg1.Image
   701  	cfg1.LoadImage = tcfg1.LoadImage
   702  	require.NoError(task1.EncodeConcreteDriverConfig(cfg1))
   703  
   704  	task2, cfg2, ports2 := dockerTask(t)
   705  	defer freeport.Return(ports2)
   706  	tcfg2 := newTaskConfig("musl", []string{"echo", "hello"})
   707  	cfg2.Image = tcfg2.Image
   708  	cfg2.LoadImage = tcfg2.LoadImage
   709  	require.NoError(task2.EncodeConcreteDriverConfig(cfg2))
   710  
   711  	task3, cfg3, ports3 := dockerTask(t)
   712  	defer freeport.Return(ports3)
   713  	tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"})
   714  	cfg3.Image = tcfg3.Image
   715  	cfg3.LoadImage = tcfg3.LoadImage
   716  	require.NoError(task3.EncodeConcreteDriverConfig(cfg3))
   717  
   718  	taskList := []*drivers.TaskConfig{task1, task2, task3}
   719  
   720  	t.Logf("Starting %d tasks", len(taskList))
   721  	d := dockerDriverHarness(t, nil)
   722  
   723  	// Let's spin up a bunch of things
   724  	for _, task := range taskList {
   725  		cleanup := d.MkAllocDir(task, true)
   726  		defer cleanup()
   727  		copyImage(t, task.TaskDir(), "busybox.tar")
   728  		copyImage(t, task.TaskDir(), "busybox_musl.tar")
   729  		copyImage(t, task.TaskDir(), "busybox_glibc.tar")
   730  		_, _, err := d.StartTask(task)
   731  		require.NoError(err)
   732  
   733  		require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second))
   734  	}
   735  
   736  	defer d.DestroyTask(task3.ID, true)
   737  	defer d.DestroyTask(task2.ID, true)
   738  	defer d.DestroyTask(task1.ID, true)
   739  
   740  	t.Log("All tasks are started. Terminating...")
   741  	for _, task := range taskList {
   742  		require.NoError(d.StopTask(task.ID, time.Second, "SIGINT"))
   743  
   744  		// Attempt to wait
   745  		waitCh, err := d.WaitTask(context.Background(), task.ID)
   746  		require.NoError(err)
   747  
   748  		select {
   749  		case <-waitCh:
   750  		case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
   751  			require.Fail("timeout waiting on task")
   752  		}
   753  	}
   754  
   755  	t.Log("Test complete!")
   756  }
   757  
   758  func TestDockerDriver_Labels(t *testing.T) {
   759  	ci.Parallel(t)
   760  	testutil.DockerCompatible(t)
   761  
   762  	task, cfg, ports := dockerTask(t)
   763  	defer freeport.Return(ports)
   764  
   765  	cfg.Labels = map[string]string{
   766  		"label1": "value1",
   767  		"label2": "value2",
   768  	}
   769  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   770  
   771  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   772  	defer cleanup()
   773  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   774  
   775  	container, err := client.InspectContainer(handle.containerID)
   776  	if err != nil {
   777  		t.Fatalf("err: %v", err)
   778  	}
   779  
   780  	// expect to see 1 additional standard labels (allocID)
   781  	require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels))
   782  	for k, v := range cfg.Labels {
   783  		require.Equal(t, v, container.Config.Labels[k])
   784  	}
   785  }
   786  
   787  func TestDockerDriver_ExtraLabels(t *testing.T) {
   788  	ci.Parallel(t)
   789  	testutil.DockerCompatible(t)
   790  
   791  	task, cfg, ports := dockerTask(t)
   792  	defer freeport.Return(ports)
   793  
   794  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   795  
   796  	dockerClientConfig := make(map[string]interface{})
   797  
   798  	dockerClientConfig["extra_labels"] = []string{"task*", "job_name"}
   799  	client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig)
   800  	defer cleanup()
   801  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   802  
   803  	container, err := client.InspectContainer(handle.containerID)
   804  	if err != nil {
   805  		t.Fatalf("err: %v", err)
   806  	}
   807  
   808  	expectedLabels := map[string]string{
   809  		"com.hashicorp.nomad.alloc_id":        task.AllocID,
   810  		"com.hashicorp.nomad.task_name":       task.Name,
   811  		"com.hashicorp.nomad.task_group_name": task.TaskGroupName,
   812  		"com.hashicorp.nomad.job_name":        task.JobName,
   813  	}
   814  
   815  	// expect to see 4 labels (allocID by default, task_name and task_group_name due to task*, and job_name)
   816  	require.Equal(t, 4, len(container.Config.Labels))
   817  	for k, v := range expectedLabels {
   818  		require.Equal(t, v, container.Config.Labels[k])
   819  	}
   820  }
   821  
   822  func TestDockerDriver_LoggingConfiguration(t *testing.T) {
   823  	ci.Parallel(t)
   824  	testutil.DockerCompatible(t)
   825  
   826  	task, cfg, ports := dockerTask(t)
   827  	defer freeport.Return(ports)
   828  
   829  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   830  
   831  	dockerClientConfig := make(map[string]interface{})
   832  	loggerConfig := map[string]string{"gelf-address": "udp://1.2.3.4:12201", "tag": "gelf"}
   833  
   834  	dockerClientConfig["logging"] = LoggingConfig{
   835  		Type:   "gelf",
   836  		Config: loggerConfig,
   837  	}
   838  	client, d, handle, cleanup := dockerSetup(t, task, dockerClientConfig)
   839  	defer cleanup()
   840  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   841  
   842  	container, err := client.InspectContainer(handle.containerID)
   843  	require.NoError(t, err)
   844  
   845  	require.Equal(t, "gelf", container.HostConfig.LogConfig.Type)
   846  	require.Equal(t, loggerConfig, container.HostConfig.LogConfig.Config)
   847  }
   848  
   849  func TestDockerDriver_HealthchecksDisable(t *testing.T) {
   850  	ci.Parallel(t)
   851  	testutil.DockerCompatible(t)
   852  
   853  	task, cfg, ports := dockerTask(t)
   854  	cfg.Healthchecks.Disable = true
   855  	defer freeport.Return(ports)
   856  	must.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   857  
   858  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   859  	defer cleanup()
   860  	must.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   861  
   862  	container, err := client.InspectContainer(handle.containerID)
   863  	must.NoError(t, err)
   864  
   865  	must.NotNil(t, container.Config.Healthcheck)
   866  	must.Eq(t, []string{"NONE"}, container.Config.Healthcheck.Test)
   867  }
   868  
   869  func TestDockerDriver_ForcePull(t *testing.T) {
   870  	ci.Parallel(t)
   871  	testutil.DockerCompatible(t)
   872  
   873  	task, cfg, ports := dockerTask(t)
   874  	defer freeport.Return(ports)
   875  
   876  	cfg.ForcePull = true
   877  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   878  
   879  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   880  	defer cleanup()
   881  
   882  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   883  
   884  	_, err := client.InspectContainer(handle.containerID)
   885  	if err != nil {
   886  		t.Fatalf("err: %v", err)
   887  	}
   888  }
   889  
   890  func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) {
   891  	ci.Parallel(t)
   892  	if runtime.GOOS == "windows" {
   893  		t.Skip("TODO: Skipped digest test on Windows")
   894  	}
   895  	testutil.DockerCompatible(t)
   896  
   897  	task, cfg, ports := dockerTask(t)
   898  	defer freeport.Return(ports)
   899  	cfg.LoadImage = ""
   900  	cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64"
   901  	localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7"
   902  	cfg.ForcePull = true
   903  	cfg.Command = busyboxLongRunningCmd[0]
   904  	cfg.Args = busyboxLongRunningCmd[1:]
   905  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   906  
   907  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   908  	defer cleanup()
   909  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   910  
   911  	container, err := client.InspectContainer(handle.containerID)
   912  	require.NoError(t, err)
   913  	require.Equal(t, localDigest, container.Image)
   914  }
   915  
   916  func TestDockerDriver_SecurityOptUnconfined(t *testing.T) {
   917  	ci.Parallel(t)
   918  	if runtime.GOOS == "windows" {
   919  		t.Skip("Windows does not support seccomp")
   920  	}
   921  	testutil.DockerCompatible(t)
   922  
   923  	task, cfg, ports := dockerTask(t)
   924  	defer freeport.Return(ports)
   925  	cfg.SecurityOpt = []string{"seccomp=unconfined"}
   926  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   927  
   928  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   929  	defer cleanup()
   930  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   931  
   932  	container, err := client.InspectContainer(handle.containerID)
   933  	if err != nil {
   934  		t.Fatalf("err: %v", err)
   935  	}
   936  
   937  	require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt)
   938  }
   939  
   940  func TestDockerDriver_SecurityOptFromFile(t *testing.T) {
   941  	ci.Parallel(t)
   942  	if runtime.GOOS == "windows" {
   943  		t.Skip("Windows does not support seccomp")
   944  	}
   945  	testutil.DockerCompatible(t)
   946  
   947  	task, cfg, ports := dockerTask(t)
   948  	defer freeport.Return(ports)
   949  	cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"}
   950  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   951  
   952  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   953  	defer cleanup()
   954  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   955  
   956  	container, err := client.InspectContainer(handle.containerID)
   957  	require.NoError(t, err)
   958  
   959  	require.Contains(t, container.HostConfig.SecurityOpt[0], "reboot")
   960  }
   961  
   962  func TestDockerDriver_Runtime(t *testing.T) {
   963  	ci.Parallel(t)
   964  	testutil.DockerCompatible(t)
   965  
   966  	task, cfg, ports := dockerTask(t)
   967  	defer freeport.Return(ports)
   968  	cfg.Runtime = "runc"
   969  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   970  
   971  	client, d, handle, cleanup := dockerSetup(t, task, nil)
   972  	defer cleanup()
   973  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
   974  
   975  	container, err := client.InspectContainer(handle.containerID)
   976  	if err != nil {
   977  		t.Fatalf("err: %v", err)
   978  	}
   979  
   980  	require.Exactly(t, cfg.Runtime, container.HostConfig.Runtime)
   981  }
   982  
   983  func TestDockerDriver_CreateContainerConfig(t *testing.T) {
   984  	ci.Parallel(t)
   985  
   986  	task, cfg, ports := dockerTask(t)
   987  	defer freeport.Return(ports)
   988  	opt := map[string]string{"size": "120G"}
   989  
   990  	cfg.StorageOpt = opt
   991  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
   992  
   993  	dh := dockerDriverHarness(t, nil)
   994  	driver := dh.Impl().(*Driver)
   995  
   996  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
   997  	require.NoError(t, err)
   998  
   999  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1000  	require.EqualValues(t, opt, c.HostConfig.StorageOpt)
  1001  
  1002  	// Container name should be /<task_name>-<alloc_id> for backward compat
  1003  	containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID)
  1004  	require.Equal(t, containerName, c.Name)
  1005  }
  1006  
  1007  func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) {
  1008  	ci.Parallel(t)
  1009  
  1010  	task, cfg, ports := dockerTask(t)
  1011  	defer freeport.Return(ports)
  1012  	task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
  1013  
  1014  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1015  
  1016  	dh := dockerDriverHarness(t, nil)
  1017  	driver := dh.Impl().(*Driver)
  1018  	driver.gpuRuntime = true
  1019  
  1020  	// Should error if a runtime was explicitly set that doesn't match gpu runtime
  1021  	cfg.Runtime = "nvidia"
  1022  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1023  	require.NoError(t, err)
  1024  	require.Equal(t, "nvidia", c.HostConfig.Runtime)
  1025  
  1026  	cfg.Runtime = "custom"
  1027  	_, err = driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1028  	require.Error(t, err)
  1029  	require.Contains(t, err.Error(), "conflicting runtime requests")
  1030  }
  1031  
  1032  func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) {
  1033  	ci.Parallel(t)
  1034  
  1035  	dh := dockerDriverHarness(t, nil)
  1036  	driver := dh.Impl().(*Driver)
  1037  	driver.gpuRuntime = true
  1038  	driver.config.allowRuntimes = map[string]struct{}{
  1039  		"runc":   {},
  1040  		"custom": {},
  1041  	}
  1042  
  1043  	allowRuntime := []string{
  1044  		"", // default always works
  1045  		"runc",
  1046  		"custom",
  1047  	}
  1048  
  1049  	task, cfg, ports := dockerTask(t)
  1050  	defer freeport.Return(ports)
  1051  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1052  
  1053  	for _, runtime := range allowRuntime {
  1054  		t.Run(runtime, func(t *testing.T) {
  1055  			cfg.Runtime = runtime
  1056  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1057  			require.NoError(t, err)
  1058  			require.Equal(t, runtime, c.HostConfig.Runtime)
  1059  		})
  1060  	}
  1061  
  1062  	t.Run("not allowed: denied", func(t *testing.T) {
  1063  		cfg.Runtime = "denied"
  1064  		_, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1065  		require.Error(t, err)
  1066  		require.Contains(t, err.Error(), `runtime "denied" is not allowed`)
  1067  	})
  1068  
  1069  }
  1070  
  1071  func TestDockerDriver_CreateContainerConfig_User(t *testing.T) {
  1072  	ci.Parallel(t)
  1073  
  1074  	task, cfg, ports := dockerTask(t)
  1075  	defer freeport.Return(ports)
  1076  	task.User = "random-user-1"
  1077  
  1078  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1079  
  1080  	dh := dockerDriverHarness(t, nil)
  1081  	driver := dh.Impl().(*Driver)
  1082  
  1083  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1084  	require.NoError(t, err)
  1085  
  1086  	require.Equal(t, task.User, c.Config.User)
  1087  }
  1088  
  1089  func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) {
  1090  	ci.Parallel(t)
  1091  
  1092  	task, cfg, ports := dockerTask(t)
  1093  	defer freeport.Return(ports)
  1094  	task.AllocID = uuid.Generate()
  1095  	task.JobName = "redis-demo-job"
  1096  
  1097  	cfg.Labels = map[string]string{
  1098  		"user_label": "user_value",
  1099  
  1100  		// com.hashicorp.nomad. labels are reserved and
  1101  		// cannot be overridden
  1102  		"com.hashicorp.nomad.alloc_id": "bad_value",
  1103  	}
  1104  
  1105  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1106  
  1107  	dh := dockerDriverHarness(t, nil)
  1108  	driver := dh.Impl().(*Driver)
  1109  
  1110  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1111  	require.NoError(t, err)
  1112  
  1113  	expectedLabels := map[string]string{
  1114  		// user provided labels
  1115  		"user_label": "user_value",
  1116  		// default label
  1117  		"com.hashicorp.nomad.alloc_id": task.AllocID,
  1118  	}
  1119  
  1120  	require.Equal(t, expectedLabels, c.Config.Labels)
  1121  }
  1122  
  1123  func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) {
  1124  	ci.Parallel(t)
  1125  
  1126  	cases := []struct {
  1127  		name           string
  1128  		loggingConfig  DockerLogging
  1129  		expectedConfig DockerLogging
  1130  	}{
  1131  		{
  1132  			"simple type",
  1133  			DockerLogging{Type: "fluentd"},
  1134  			DockerLogging{
  1135  				Type:   "fluentd",
  1136  				Config: map[string]string{},
  1137  			},
  1138  		},
  1139  		{
  1140  			"simple driver",
  1141  			DockerLogging{Driver: "fluentd"},
  1142  			DockerLogging{
  1143  				Type:   "fluentd",
  1144  				Config: map[string]string{},
  1145  			},
  1146  		},
  1147  		{
  1148  			"type takes precedence",
  1149  			DockerLogging{
  1150  				Type:   "json-file",
  1151  				Driver: "fluentd",
  1152  			},
  1153  			DockerLogging{
  1154  				Type:   "json-file",
  1155  				Config: map[string]string{},
  1156  			},
  1157  		},
  1158  		{
  1159  			"user config takes precedence, even if no type provided",
  1160  			DockerLogging{
  1161  				Type:   "",
  1162  				Config: map[string]string{"max-file": "3", "max-size": "10m"},
  1163  			},
  1164  			DockerLogging{
  1165  				Type:   "",
  1166  				Config: map[string]string{"max-file": "3", "max-size": "10m"},
  1167  			},
  1168  		},
  1169  		{
  1170  			"defaults to json-file w/ log rotation",
  1171  			DockerLogging{
  1172  				Type: "",
  1173  			},
  1174  			DockerLogging{
  1175  				Type:   "json-file",
  1176  				Config: map[string]string{"max-file": "2", "max-size": "2m"},
  1177  			},
  1178  		},
  1179  	}
  1180  
  1181  	for _, c := range cases {
  1182  		t.Run(c.name, func(t *testing.T) {
  1183  			task, cfg, ports := dockerTask(t)
  1184  			defer freeport.Return(ports)
  1185  
  1186  			cfg.Logging = c.loggingConfig
  1187  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1188  
  1189  			dh := dockerDriverHarness(t, nil)
  1190  			driver := dh.Impl().(*Driver)
  1191  
  1192  			cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1193  			require.NoError(t, err)
  1194  
  1195  			require.Equal(t, c.expectedConfig.Type, cc.HostConfig.LogConfig.Type)
  1196  			require.Equal(t, c.expectedConfig.Config["max-file"], cc.HostConfig.LogConfig.Config["max-file"])
  1197  			require.Equal(t, c.expectedConfig.Config["max-size"], cc.HostConfig.LogConfig.Config["max-size"])
  1198  		})
  1199  	}
  1200  }
  1201  
  1202  func TestDockerDriver_CreateContainerConfig_Mounts(t *testing.T) {
  1203  	ci.Parallel(t)
  1204  
  1205  	task, cfg, ports := dockerTask(t)
  1206  	defer freeport.Return(ports)
  1207  
  1208  	cfg.Mounts = []DockerMount{
  1209  		{
  1210  			Type:   "bind",
  1211  			Target: "/map-bind-target",
  1212  			Source: "/map-source",
  1213  		},
  1214  		{
  1215  			Type:   "tmpfs",
  1216  			Target: "/map-tmpfs-target",
  1217  		},
  1218  	}
  1219  	cfg.MountsList = []DockerMount{
  1220  		{
  1221  			Type:   "bind",
  1222  			Target: "/list-bind-target",
  1223  			Source: "/list-source",
  1224  		},
  1225  		{
  1226  			Type:   "tmpfs",
  1227  			Target: "/list-tmpfs-target",
  1228  		},
  1229  	}
  1230  
  1231  	expectedSrcPrefix := "/"
  1232  	if runtime.GOOS == "windows" {
  1233  		expectedSrcPrefix = "redis-demo\\"
  1234  	}
  1235  	expected := []docker.HostMount{
  1236  		// from mount map
  1237  		{
  1238  			Type:        "bind",
  1239  			Target:      "/map-bind-target",
  1240  			Source:      expectedSrcPrefix + "map-source",
  1241  			BindOptions: &docker.BindOptions{},
  1242  		},
  1243  		{
  1244  			Type:          "tmpfs",
  1245  			Target:        "/map-tmpfs-target",
  1246  			TempfsOptions: &docker.TempfsOptions{},
  1247  		},
  1248  		// from mount list
  1249  		{
  1250  			Type:        "bind",
  1251  			Target:      "/list-bind-target",
  1252  			Source:      expectedSrcPrefix + "list-source",
  1253  			BindOptions: &docker.BindOptions{},
  1254  		},
  1255  		{
  1256  			Type:          "tmpfs",
  1257  			Target:        "/list-tmpfs-target",
  1258  			TempfsOptions: &docker.TempfsOptions{},
  1259  		},
  1260  	}
  1261  
  1262  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1263  
  1264  	dh := dockerDriverHarness(t, nil)
  1265  	driver := dh.Impl().(*Driver)
  1266  	driver.config.Volumes.Enabled = true
  1267  
  1268  	cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1269  	require.NoError(t, err)
  1270  
  1271  	found := cc.HostConfig.Mounts
  1272  	sort.Slice(found, func(i, j int) bool { return strings.Compare(found[i].Target, found[j].Target) < 0 })
  1273  	sort.Slice(expected, func(i, j int) bool {
  1274  		return strings.Compare(expected[i].Target, expected[j].Target) < 0
  1275  	})
  1276  
  1277  	require.Equal(t, expected, found)
  1278  }
  1279  
  1280  func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) {
  1281  	ci.Parallel(t)
  1282  	testCases := []struct {
  1283  		description           string
  1284  		gpuRuntimeSet         bool
  1285  		expectToReturnError   bool
  1286  		expectedRuntime       string
  1287  		nvidiaDevicesProvided bool
  1288  	}{
  1289  		{
  1290  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 1",
  1291  			gpuRuntimeSet:         true,
  1292  			expectToReturnError:   false,
  1293  			expectedRuntime:       "nvidia",
  1294  			nvidiaDevicesProvided: true,
  1295  		},
  1296  		{
  1297  			description:           "gpu devices are provided, docker driver was able to detect nvidia-runtime 2",
  1298  			gpuRuntimeSet:         true,
  1299  			expectToReturnError:   false,
  1300  			expectedRuntime:       "nvidia-runtime-modified-name",
  1301  			nvidiaDevicesProvided: true,
  1302  		},
  1303  		{
  1304  			description:           "no gpu devices provided - no runtime should be set",
  1305  			gpuRuntimeSet:         true,
  1306  			expectToReturnError:   false,
  1307  			expectedRuntime:       "nvidia",
  1308  			nvidiaDevicesProvided: false,
  1309  		},
  1310  		{
  1311  			description:           "no gpuRuntime supported by docker driver",
  1312  			gpuRuntimeSet:         false,
  1313  			expectToReturnError:   true,
  1314  			expectedRuntime:       "nvidia",
  1315  			nvidiaDevicesProvided: true,
  1316  		},
  1317  	}
  1318  	for _, testCase := range testCases {
  1319  		t.Run(testCase.description, func(t *testing.T) {
  1320  			task, cfg, ports := dockerTask(t)
  1321  			defer freeport.Return(ports)
  1322  
  1323  			dh := dockerDriverHarness(t, map[string]interface{}{
  1324  				"allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"},
  1325  			})
  1326  			driver := dh.Impl().(*Driver)
  1327  
  1328  			driver.gpuRuntime = testCase.gpuRuntimeSet
  1329  			driver.config.GPURuntimeName = testCase.expectedRuntime
  1330  			if testCase.nvidiaDevicesProvided {
  1331  				task.DeviceEnv["NVIDIA_VISIBLE_DEVICES"] = "GPU_UUID_1"
  1332  			}
  1333  
  1334  			c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1335  			if testCase.expectToReturnError {
  1336  				require.NotNil(t, err)
  1337  			} else {
  1338  				require.NoError(t, err)
  1339  				if testCase.nvidiaDevicesProvided {
  1340  					require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime)
  1341  				} else {
  1342  					// no nvidia devices provided -> no point to use nvidia runtime
  1343  					require.Equal(t, "", c.HostConfig.Runtime)
  1344  				}
  1345  			}
  1346  		})
  1347  	}
  1348  }
  1349  
  1350  func TestDockerDriver_Capabilities(t *testing.T) {
  1351  	ci.Parallel(t)
  1352  	testutil.DockerCompatible(t)
  1353  	if runtime.GOOS == "windows" {
  1354  		t.Skip("Capabilities not supported on windows")
  1355  	}
  1356  
  1357  	testCases := []struct {
  1358  		Name       string
  1359  		CapAdd     []string
  1360  		CapDrop    []string
  1361  		Allowlist  string
  1362  		StartError string
  1363  	}{
  1364  		{
  1365  			Name:    "default-allowlist-add-allowed",
  1366  			CapAdd:  []string{"fowner", "mknod"},
  1367  			CapDrop: []string{"all"},
  1368  		},
  1369  		{
  1370  			Name:       "default-allowlist-add-forbidden",
  1371  			CapAdd:     []string{"net_admin"},
  1372  			StartError: "net_admin",
  1373  		},
  1374  		{
  1375  			Name:    "default-allowlist-drop-existing",
  1376  			CapDrop: []string{"fowner", "mknod", "net_raw"},
  1377  		},
  1378  		{
  1379  			Name:      "restrictive-allowlist-drop-all",
  1380  			CapDrop:   []string{"all"},
  1381  			Allowlist: "fowner,mknod",
  1382  		},
  1383  		{
  1384  			Name:      "restrictive-allowlist-add-allowed",
  1385  			CapAdd:    []string{"fowner", "mknod"},
  1386  			CapDrop:   []string{"all"},
  1387  			Allowlist: "mknod,fowner",
  1388  		},
  1389  		{
  1390  			Name:       "restrictive-allowlist-add-forbidden",
  1391  			CapAdd:     []string{"net_admin", "mknod"},
  1392  			CapDrop:    []string{"all"},
  1393  			Allowlist:  "fowner,mknod",
  1394  			StartError: "net_admin",
  1395  		},
  1396  		{
  1397  			Name:      "permissive-allowlist",
  1398  			CapAdd:    []string{"mknod", "net_admin"},
  1399  			Allowlist: "all",
  1400  		},
  1401  		{
  1402  			Name:      "permissive-allowlist-add-all",
  1403  			CapAdd:    []string{"all"},
  1404  			Allowlist: "all",
  1405  		},
  1406  	}
  1407  
  1408  	for _, tc := range testCases {
  1409  		t.Run(tc.Name, func(t *testing.T) {
  1410  			client := newTestDockerClient(t)
  1411  			task, cfg, ports := dockerTask(t)
  1412  			defer freeport.Return(ports)
  1413  
  1414  			if len(tc.CapAdd) > 0 {
  1415  				cfg.CapAdd = tc.CapAdd
  1416  			}
  1417  			if len(tc.CapDrop) > 0 {
  1418  				cfg.CapDrop = tc.CapDrop
  1419  			}
  1420  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1421  
  1422  			d := dockerDriverHarness(t, nil)
  1423  			dockerDriver, ok := d.Impl().(*Driver)
  1424  			require.True(t, ok)
  1425  			if tc.Allowlist != "" {
  1426  				dockerDriver.config.AllowCaps = strings.Split(tc.Allowlist, ",")
  1427  			}
  1428  
  1429  			cleanup := d.MkAllocDir(task, true)
  1430  			defer cleanup()
  1431  			copyImage(t, task.TaskDir(), "busybox.tar")
  1432  
  1433  			_, _, err := d.StartTask(task)
  1434  			defer d.DestroyTask(task.ID, true)
  1435  			if err == nil && tc.StartError != "" {
  1436  				t.Fatalf("Expected error in start: %v", tc.StartError)
  1437  			} else if err != nil {
  1438  				if tc.StartError == "" {
  1439  					require.NoError(t, err)
  1440  				} else {
  1441  					require.Contains(t, err.Error(), tc.StartError)
  1442  				}
  1443  				return
  1444  			}
  1445  
  1446  			handle, ok := dockerDriver.tasks.Get(task.ID)
  1447  			require.True(t, ok)
  1448  
  1449  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1450  
  1451  			container, err := client.InspectContainer(handle.containerID)
  1452  			require.NoError(t, err)
  1453  
  1454  			require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd)
  1455  			require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop)
  1456  		})
  1457  	}
  1458  }
  1459  
  1460  func TestDockerDriver_DNS(t *testing.T) {
  1461  	ci.Parallel(t)
  1462  	testutil.DockerCompatible(t)
  1463  	testutil.ExecCompatible(t)
  1464  
  1465  	cases := []struct {
  1466  		name string
  1467  		cfg  *drivers.DNSConfig
  1468  	}{
  1469  		{
  1470  			name: "nil DNSConfig",
  1471  		},
  1472  		{
  1473  			name: "basic",
  1474  			cfg: &drivers.DNSConfig{
  1475  				Servers: []string{"1.1.1.1", "1.0.0.1"},
  1476  			},
  1477  		},
  1478  		{
  1479  			name: "full",
  1480  			cfg: &drivers.DNSConfig{
  1481  				Servers:  []string{"1.1.1.1", "1.0.0.1"},
  1482  				Searches: []string{"local.test", "node.consul"},
  1483  				Options:  []string{"ndots:2", "edns0"},
  1484  			},
  1485  		},
  1486  	}
  1487  
  1488  	for _, c := range cases {
  1489  		task, cfg, ports := dockerTask(t)
  1490  		defer freeport.Return(ports)
  1491  		task.DNS = c.cfg
  1492  		require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1493  
  1494  		_, d, _, cleanup := dockerSetup(t, task, nil)
  1495  		defer cleanup()
  1496  
  1497  		require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1498  		defer d.DestroyTask(task.ID, true)
  1499  
  1500  		dtestutil.TestTaskDNSConfig(t, d, task.ID, c.cfg)
  1501  	}
  1502  
  1503  }
  1504  
  1505  func TestDockerDriver_Init(t *testing.T) {
  1506  	ci.Parallel(t)
  1507  	testutil.DockerCompatible(t)
  1508  	if runtime.GOOS == "windows" {
  1509  		t.Skip("Windows does not support init.")
  1510  	}
  1511  
  1512  	task, cfg, ports := dockerTask(t)
  1513  	defer freeport.Return(ports)
  1514  
  1515  	cfg.Init = true
  1516  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1517  
  1518  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1519  	defer cleanup()
  1520  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1521  
  1522  	container, err := client.InspectContainer(handle.containerID)
  1523  	require.NoError(t, err)
  1524  
  1525  	require.Equal(t, cfg.Init, container.HostConfig.Init)
  1526  }
  1527  
  1528  func TestDockerDriver_CPUSetCPUs(t *testing.T) {
  1529  	ci.Parallel(t)
  1530  	testutil.DockerCompatible(t)
  1531  	testutil.CgroupsCompatible(t)
  1532  
  1533  	testCases := []struct {
  1534  		Name       string
  1535  		CPUSetCPUs string
  1536  	}{
  1537  		{
  1538  			Name:       "Single CPU",
  1539  			CPUSetCPUs: "0",
  1540  		},
  1541  		{
  1542  			Name:       "Comma separated list of CPUs",
  1543  			CPUSetCPUs: "0,1",
  1544  		},
  1545  		{
  1546  			Name:       "Range of CPUs",
  1547  			CPUSetCPUs: "0-1",
  1548  		},
  1549  	}
  1550  
  1551  	for _, testCase := range testCases {
  1552  		t.Run(testCase.Name, func(t *testing.T) {
  1553  			task, cfg, ports := dockerTask(t)
  1554  			defer freeport.Return(ports)
  1555  
  1556  			cfg.CPUSetCPUs = testCase.CPUSetCPUs
  1557  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1558  
  1559  			client, d, handle, cleanup := dockerSetup(t, task, nil)
  1560  			defer cleanup()
  1561  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1562  
  1563  			container, err := client.InspectContainer(handle.containerID)
  1564  			require.NoError(t, err)
  1565  
  1566  			require.Equal(t, cfg.CPUSetCPUs, container.HostConfig.CPUSetCPUs)
  1567  		})
  1568  	}
  1569  }
  1570  
  1571  func TestDockerDriver_MemoryHardLimit(t *testing.T) {
  1572  	ci.Parallel(t)
  1573  	testutil.DockerCompatible(t)
  1574  	if runtime.GOOS == "windows" {
  1575  		t.Skip("Windows does not support MemoryReservation")
  1576  	}
  1577  
  1578  	task, cfg, ports := dockerTask(t)
  1579  	defer freeport.Return(ports)
  1580  
  1581  	cfg.MemoryHardLimit = 300
  1582  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1583  
  1584  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1585  	defer cleanup()
  1586  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1587  
  1588  	container, err := client.InspectContainer(handle.containerID)
  1589  	require.NoError(t, err)
  1590  
  1591  	require.Equal(t, task.Resources.LinuxResources.MemoryLimitBytes, container.HostConfig.MemoryReservation)
  1592  	require.Equal(t, cfg.MemoryHardLimit*1024*1024, container.HostConfig.Memory)
  1593  }
  1594  
  1595  func TestDockerDriver_MACAddress(t *testing.T) {
  1596  	ci.Parallel(t)
  1597  	testutil.DockerCompatible(t)
  1598  	if runtime.GOOS == "windows" {
  1599  		t.Skip("Windows docker does not support setting MacAddress")
  1600  	}
  1601  
  1602  	task, cfg, ports := dockerTask(t)
  1603  	defer freeport.Return(ports)
  1604  	cfg.MacAddress = "00:16:3e:00:00:00"
  1605  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1606  
  1607  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1608  	defer cleanup()
  1609  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1610  
  1611  	container, err := client.InspectContainer(handle.containerID)
  1612  	require.NoError(t, err)
  1613  
  1614  	require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress)
  1615  }
  1616  
  1617  func TestDockerWorkDir(t *testing.T) {
  1618  	ci.Parallel(t)
  1619  	testutil.DockerCompatible(t)
  1620  
  1621  	task, cfg, ports := dockerTask(t)
  1622  	defer freeport.Return(ports)
  1623  	cfg.WorkDir = "/some/path"
  1624  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1625  
  1626  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1627  	defer cleanup()
  1628  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1629  
  1630  	container, err := client.InspectContainer(handle.containerID)
  1631  	require.NoError(t, err)
  1632  	require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir))
  1633  }
  1634  
  1635  func inSlice(needle string, haystack []string) bool {
  1636  	for _, h := range haystack {
  1637  		if h == needle {
  1638  			return true
  1639  		}
  1640  	}
  1641  	return false
  1642  }
  1643  
  1644  func TestDockerDriver_PortsNoMap(t *testing.T) {
  1645  	ci.Parallel(t)
  1646  	testutil.DockerCompatible(t)
  1647  
  1648  	task, _, ports := dockerTask(t)
  1649  	defer freeport.Return(ports)
  1650  	res := ports[0]
  1651  	dyn := ports[1]
  1652  
  1653  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1654  	defer cleanup()
  1655  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1656  
  1657  	container, err := client.InspectContainer(handle.containerID)
  1658  	require.NoError(t, err)
  1659  
  1660  	// Verify that the correct ports are EXPOSED
  1661  	expectedExposedPorts := map[docker.Port]struct{}{
  1662  		docker.Port(fmt.Sprintf("%d/tcp", res)): {},
  1663  		docker.Port(fmt.Sprintf("%d/udp", res)): {},
  1664  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {},
  1665  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {},
  1666  	}
  1667  
  1668  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1669  
  1670  	hostIP := "127.0.0.1"
  1671  	if runtime.GOOS == "windows" {
  1672  		hostIP = ""
  1673  	}
  1674  
  1675  	// Verify that the correct ports are FORWARDED
  1676  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1677  		docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1678  		docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1679  		docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1680  		docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1681  	}
  1682  
  1683  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1684  }
  1685  
  1686  func TestDockerDriver_PortsMapping(t *testing.T) {
  1687  	ci.Parallel(t)
  1688  	testutil.DockerCompatible(t)
  1689  
  1690  	task, cfg, ports := dockerTask(t)
  1691  	defer freeport.Return(ports)
  1692  	res := ports[0]
  1693  	dyn := ports[1]
  1694  	cfg.PortMap = map[string]int{
  1695  		"main":  8080,
  1696  		"REDIS": 6379,
  1697  	}
  1698  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1699  
  1700  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1701  	defer cleanup()
  1702  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  1703  
  1704  	container, err := client.InspectContainer(handle.containerID)
  1705  	require.NoError(t, err)
  1706  
  1707  	// Verify that the port environment variables are set
  1708  	require.Contains(t, container.Config.Env, "NOMAD_PORT_main=8080")
  1709  	require.Contains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379")
  1710  
  1711  	// Verify that the correct ports are EXPOSED
  1712  	expectedExposedPorts := map[docker.Port]struct{}{
  1713  		docker.Port("8080/tcp"): {},
  1714  		docker.Port("8080/udp"): {},
  1715  		docker.Port("6379/tcp"): {},
  1716  		docker.Port("6379/udp"): {},
  1717  	}
  1718  
  1719  	require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts)
  1720  
  1721  	hostIP := "127.0.0.1"
  1722  	if runtime.GOOS == "windows" {
  1723  		hostIP = ""
  1724  	}
  1725  
  1726  	// Verify that the correct ports are FORWARDED
  1727  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1728  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1729  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1730  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1731  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1732  	}
  1733  	require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings)
  1734  }
  1735  
  1736  func TestDockerDriver_CreateContainerConfig_Ports(t *testing.T) {
  1737  	ci.Parallel(t)
  1738  
  1739  	task, cfg, ports := dockerTask(t)
  1740  	defer freeport.Return(ports)
  1741  	hostIP := "127.0.0.1"
  1742  	if runtime.GOOS == "windows" {
  1743  		hostIP = ""
  1744  	}
  1745  	portmappings := structs.AllocatedPorts(make([]structs.AllocatedPortMapping, len(ports)))
  1746  	portmappings[0] = structs.AllocatedPortMapping{
  1747  		Label:  "main",
  1748  		Value:  ports[0],
  1749  		HostIP: hostIP,
  1750  		To:     8080,
  1751  	}
  1752  	portmappings[1] = structs.AllocatedPortMapping{
  1753  		Label:  "REDIS",
  1754  		Value:  ports[1],
  1755  		HostIP: hostIP,
  1756  		To:     6379,
  1757  	}
  1758  	task.Resources.Ports = &portmappings
  1759  	cfg.Ports = []string{"main", "REDIS"}
  1760  
  1761  	dh := dockerDriverHarness(t, nil)
  1762  	driver := dh.Impl().(*Driver)
  1763  
  1764  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1765  	require.NoError(t, err)
  1766  
  1767  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1768  
  1769  	// Verify that the correct ports are FORWARDED
  1770  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1771  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}},
  1772  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[0])}},
  1773  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}},
  1774  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", ports[1])}},
  1775  	}
  1776  	require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
  1777  
  1778  }
  1779  func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) {
  1780  	ci.Parallel(t)
  1781  
  1782  	task, cfg, ports := dockerTask(t)
  1783  	defer freeport.Return(ports)
  1784  	res := ports[0]
  1785  	dyn := ports[1]
  1786  	cfg.PortMap = map[string]int{
  1787  		"main":  8080,
  1788  		"REDIS": 6379,
  1789  	}
  1790  	dh := dockerDriverHarness(t, nil)
  1791  	driver := dh.Impl().(*Driver)
  1792  
  1793  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  1794  	require.NoError(t, err)
  1795  
  1796  	require.Equal(t, "org/repo:0.1", c.Config.Image)
  1797  	require.Contains(t, c.Config.Env, "NOMAD_PORT_main=8080")
  1798  	require.Contains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379")
  1799  
  1800  	// Verify that the correct ports are FORWARDED
  1801  	hostIP := "127.0.0.1"
  1802  	if runtime.GOOS == "windows" {
  1803  		hostIP = ""
  1804  	}
  1805  	expectedPortBindings := map[docker.Port][]docker.PortBinding{
  1806  		docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1807  		docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}},
  1808  		docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1809  		docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}},
  1810  	}
  1811  	require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings)
  1812  
  1813  }
  1814  
  1815  func TestDockerDriver_CleanupContainer(t *testing.T) {
  1816  	ci.Parallel(t)
  1817  	testutil.DockerCompatible(t)
  1818  
  1819  	task, cfg, ports := dockerTask(t)
  1820  	defer freeport.Return(ports)
  1821  	cfg.Command = "echo"
  1822  	cfg.Args = []string{"hello"}
  1823  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1824  
  1825  	client, d, handle, cleanup := dockerSetup(t, task, nil)
  1826  	defer cleanup()
  1827  
  1828  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  1829  	require.NoError(t, err)
  1830  
  1831  	select {
  1832  	case res := <-waitCh:
  1833  		if !res.Successful() {
  1834  			t.Fatalf("err: %v", res)
  1835  		}
  1836  
  1837  		err = d.DestroyTask(task.ID, false)
  1838  		require.NoError(t, err)
  1839  
  1840  		time.Sleep(3 * time.Second)
  1841  
  1842  		// Ensure that the container isn't present
  1843  		_, err := client.InspectContainer(handle.containerID)
  1844  		if err == nil {
  1845  			t.Fatalf("expected to not get container")
  1846  		}
  1847  
  1848  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1849  		t.Fatalf("timeout")
  1850  	}
  1851  }
  1852  
  1853  func TestDockerDriver_EnableImageGC(t *testing.T) {
  1854  	ci.Parallel(t)
  1855  	testutil.DockerCompatible(t)
  1856  
  1857  	task, cfg, ports := dockerTask(t)
  1858  	defer freeport.Return(ports)
  1859  	cfg.Command = "echo"
  1860  	cfg.Args = []string{"hello"}
  1861  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1862  
  1863  	client := newTestDockerClient(t)
  1864  	driver := dockerDriverHarness(t, map[string]interface{}{
  1865  		"gc": map[string]interface{}{
  1866  			"container":   true,
  1867  			"image":       true,
  1868  			"image_delay": "2s",
  1869  		},
  1870  	})
  1871  	cleanup := driver.MkAllocDir(task, true)
  1872  	defer cleanup()
  1873  
  1874  	cleanSlate(client, cfg.Image)
  1875  
  1876  	copyImage(t, task.TaskDir(), "busybox.tar")
  1877  	_, _, err := driver.StartTask(task)
  1878  	require.NoError(t, err)
  1879  
  1880  	dockerDriver, ok := driver.Impl().(*Driver)
  1881  	require.True(t, ok)
  1882  	_, ok = dockerDriver.tasks.Get(task.ID)
  1883  	require.True(t, ok)
  1884  
  1885  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1886  	require.NoError(t, err)
  1887  	select {
  1888  	case res := <-waitCh:
  1889  		if !res.Successful() {
  1890  			t.Fatalf("err: %v", res)
  1891  		}
  1892  
  1893  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1894  		t.Fatalf("timeout")
  1895  	}
  1896  
  1897  	// we haven't called DestroyTask, image should be present
  1898  	_, err = client.InspectImage(cfg.Image)
  1899  	require.NoError(t, err)
  1900  
  1901  	err = dockerDriver.DestroyTask(task.ID, false)
  1902  	require.NoError(t, err)
  1903  
  1904  	// image_delay is 3s, so image should still be around for a bit
  1905  	_, err = client.InspectImage(cfg.Image)
  1906  	require.NoError(t, err)
  1907  
  1908  	// Ensure image was removed
  1909  	tu.WaitForResult(func() (bool, error) {
  1910  		if _, err := client.InspectImage(cfg.Image); err == nil {
  1911  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  1912  		}
  1913  
  1914  		return true, nil
  1915  	}, func(err error) {
  1916  		require.NoError(t, err)
  1917  	})
  1918  }
  1919  
  1920  func TestDockerDriver_DisableImageGC(t *testing.T) {
  1921  	ci.Parallel(t)
  1922  	testutil.DockerCompatible(t)
  1923  
  1924  	task, cfg, ports := dockerTask(t)
  1925  	defer freeport.Return(ports)
  1926  	cfg.Command = "echo"
  1927  	cfg.Args = []string{"hello"}
  1928  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1929  
  1930  	client := newTestDockerClient(t)
  1931  	driver := dockerDriverHarness(t, map[string]interface{}{
  1932  		"gc": map[string]interface{}{
  1933  			"container":   true,
  1934  			"image":       false,
  1935  			"image_delay": "1s",
  1936  		},
  1937  	})
  1938  	cleanup := driver.MkAllocDir(task, true)
  1939  	defer cleanup()
  1940  
  1941  	cleanSlate(client, cfg.Image)
  1942  
  1943  	copyImage(t, task.TaskDir(), "busybox.tar")
  1944  	_, _, err := driver.StartTask(task)
  1945  	require.NoError(t, err)
  1946  
  1947  	dockerDriver, ok := driver.Impl().(*Driver)
  1948  	require.True(t, ok)
  1949  	handle, ok := dockerDriver.tasks.Get(task.ID)
  1950  	require.True(t, ok)
  1951  
  1952  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  1953  	require.NoError(t, err)
  1954  	select {
  1955  	case res := <-waitCh:
  1956  		if !res.Successful() {
  1957  			t.Fatalf("err: %v", res)
  1958  		}
  1959  
  1960  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  1961  		t.Fatalf("timeout")
  1962  	}
  1963  
  1964  	// we haven't called DestroyTask, image should be present
  1965  	_, err = client.InspectImage(handle.containerImage)
  1966  	require.NoError(t, err)
  1967  
  1968  	err = dockerDriver.DestroyTask(task.ID, false)
  1969  	require.NoError(t, err)
  1970  
  1971  	// image_delay is 1s, wait a little longer
  1972  	time.Sleep(3 * time.Second)
  1973  
  1974  	// image should not have been removed or scheduled to be removed
  1975  	_, err = client.InspectImage(cfg.Image)
  1976  	require.NoError(t, err)
  1977  	dockerDriver.coordinator.imageLock.Lock()
  1978  	_, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage]
  1979  	require.False(t, ok, "image should not be registered for deletion")
  1980  	dockerDriver.coordinator.imageLock.Unlock()
  1981  }
  1982  
  1983  func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) {
  1984  	ci.Parallel(t)
  1985  	testutil.DockerCompatible(t)
  1986  
  1987  	task, cfg, ports := dockerTask(t)
  1988  	defer freeport.Return(ports)
  1989  	cfg.Command = "echo"
  1990  	cfg.Args = []string{"hello"}
  1991  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  1992  
  1993  	client := newTestDockerClient(t)
  1994  	driver := dockerDriverHarness(t, map[string]interface{}{
  1995  		"gc": map[string]interface{}{
  1996  			"container":   true,
  1997  			"image":       true,
  1998  			"image_delay": "0s",
  1999  		},
  2000  	})
  2001  	cleanup := driver.MkAllocDir(task, true)
  2002  	defer cleanup()
  2003  
  2004  	cleanSlate(client, cfg.Image)
  2005  
  2006  	copyImage(t, task.TaskDir(), "busybox.tar")
  2007  	_, _, err := driver.StartTask(task)
  2008  	require.NoError(t, err)
  2009  
  2010  	dockerDriver, ok := driver.Impl().(*Driver)
  2011  	require.True(t, ok)
  2012  	h, ok := dockerDriver.tasks.Get(task.ID)
  2013  	require.True(t, ok)
  2014  
  2015  	waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID)
  2016  	require.NoError(t, err)
  2017  	select {
  2018  	case res := <-waitCh:
  2019  		if !res.Successful() {
  2020  			t.Fatalf("err: %v", res)
  2021  		}
  2022  
  2023  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  2024  		t.Fatalf("timeout")
  2025  	}
  2026  
  2027  	// remove the container out-of-band
  2028  	require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{
  2029  		ID: h.containerID,
  2030  	}))
  2031  
  2032  	require.NoError(t, dockerDriver.DestroyTask(task.ID, false))
  2033  
  2034  	// Ensure image was removed
  2035  	tu.WaitForResult(func() (bool, error) {
  2036  		if _, err := client.InspectImage(cfg.Image); err == nil {
  2037  			return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image)
  2038  		}
  2039  
  2040  		return true, nil
  2041  	}, func(err error) {
  2042  		require.NoError(t, err)
  2043  	})
  2044  
  2045  	// Ensure that task handle was removed
  2046  	_, ok = dockerDriver.tasks.Get(task.ID)
  2047  	require.False(t, ok)
  2048  }
  2049  
  2050  func TestDockerDriver_Stats(t *testing.T) {
  2051  	ci.Parallel(t)
  2052  	testutil.DockerCompatible(t)
  2053  
  2054  	task, cfg, ports := dockerTask(t)
  2055  	defer freeport.Return(ports)
  2056  	cfg.Command = "sleep"
  2057  	cfg.Args = []string{"1000"}
  2058  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2059  
  2060  	_, d, handle, cleanup := dockerSetup(t, task, nil)
  2061  	defer cleanup()
  2062  	require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  2063  
  2064  	go func() {
  2065  		defer d.DestroyTask(task.ID, true)
  2066  		ctx, cancel := context.WithCancel(context.Background())
  2067  		defer cancel()
  2068  		ch, err := handle.Stats(ctx, 1*time.Second)
  2069  		assert.NoError(t, err)
  2070  		select {
  2071  		case ru := <-ch:
  2072  			assert.NotNil(t, ru.ResourceUsage)
  2073  		case <-time.After(3 * time.Second):
  2074  			assert.Fail(t, "stats timeout")
  2075  		}
  2076  	}()
  2077  
  2078  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  2079  	require.NoError(t, err)
  2080  	select {
  2081  	case res := <-waitCh:
  2082  		if res.Successful() {
  2083  			t.Fatalf("should err: %v", res)
  2084  		}
  2085  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2086  		t.Fatalf("timeout")
  2087  	}
  2088  }
  2089  
  2090  func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) {
  2091  	testutil.DockerCompatible(t)
  2092  
  2093  	randfn := fmt.Sprintf("test-%d", rand.Int())
  2094  	hostfile := filepath.Join(hostpath, randfn)
  2095  	var containerPath string
  2096  	if runtime.GOOS == "windows" {
  2097  		containerPath = "C:\\data"
  2098  	} else {
  2099  		containerPath = "/mnt/vol"
  2100  	}
  2101  	containerFile := filepath.Join(containerPath, randfn)
  2102  
  2103  	taskCfg := newTaskConfig("", []string{"touch", containerFile})
  2104  	taskCfg.Volumes = []string{fmt.Sprintf("%s:%s", hostpath, containerPath)}
  2105  
  2106  	task := &drivers.TaskConfig{
  2107  		ID:        uuid.Generate(),
  2108  		Name:      "ls",
  2109  		AllocID:   uuid.Generate(),
  2110  		Env:       map[string]string{"VOL_PATH": containerPath},
  2111  		Resources: basicResources,
  2112  	}
  2113  	require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  2114  
  2115  	d := dockerDriverHarness(t, cfg)
  2116  	cleanup := d.MkAllocDir(task, true)
  2117  
  2118  	copyImage(t, task.TaskDir(), "busybox.tar")
  2119  
  2120  	return task, d, &taskCfg, hostfile, cleanup
  2121  }
  2122  
  2123  func TestDockerDriver_VolumesDisabled(t *testing.T) {
  2124  	ci.Parallel(t)
  2125  	testutil.DockerCompatible(t)
  2126  
  2127  	cfg := map[string]interface{}{
  2128  		"volumes": map[string]interface{}{
  2129  			"enabled": false,
  2130  		},
  2131  		"gc": map[string]interface{}{
  2132  			"image": false,
  2133  		},
  2134  	}
  2135  
  2136  	{
  2137  		tmpvol := t.TempDir()
  2138  
  2139  		task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol)
  2140  		defer cleanup()
  2141  
  2142  		_, _, err := driver.StartTask(task)
  2143  		defer driver.DestroyTask(task.ID, true)
  2144  		if err == nil {
  2145  			require.Fail(t, "Started driver successfully when volumes should have been disabled.")
  2146  		}
  2147  	}
  2148  
  2149  	// Relative paths should still be allowed
  2150  	{
  2151  		task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".")
  2152  		defer cleanup()
  2153  
  2154  		_, _, err := driver.StartTask(task)
  2155  		require.NoError(t, err)
  2156  		defer driver.DestroyTask(task.ID, true)
  2157  
  2158  		waitCh, err := driver.WaitTask(context.Background(), task.ID)
  2159  		require.NoError(t, err)
  2160  		select {
  2161  		case res := <-waitCh:
  2162  			if !res.Successful() {
  2163  				t.Fatalf("unexpected err: %v", res)
  2164  			}
  2165  		case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2166  			t.Fatalf("timeout")
  2167  		}
  2168  
  2169  		if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil {
  2170  			t.Fatalf("unexpected error reading %s: %v", fn, err)
  2171  		}
  2172  	}
  2173  
  2174  	// Volume Drivers should be rejected (error)
  2175  	{
  2176  		task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol")
  2177  		defer cleanup()
  2178  
  2179  		taskCfg.VolumeDriver = "flocker"
  2180  		require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg))
  2181  
  2182  		_, _, err := driver.StartTask(task)
  2183  		defer driver.DestroyTask(task.ID, true)
  2184  		if err == nil {
  2185  			require.Fail(t, "Started driver successfully when volume drivers should have been disabled.")
  2186  		}
  2187  	}
  2188  }
  2189  
  2190  func TestDockerDriver_VolumesEnabled(t *testing.T) {
  2191  	ci.Parallel(t)
  2192  	testutil.DockerCompatible(t)
  2193  
  2194  	cfg := map[string]interface{}{
  2195  		"volumes": map[string]interface{}{
  2196  			"enabled": true,
  2197  		},
  2198  		"gc": map[string]interface{}{
  2199  			"image": false,
  2200  		},
  2201  	}
  2202  
  2203  	tmpvol := t.TempDir()
  2204  
  2205  	// Evaluate symlinks so it works on MacOS
  2206  	tmpvol, err := filepath.EvalSymlinks(tmpvol)
  2207  	require.NoError(t, err)
  2208  
  2209  	task, driver, _, hostpath, cleanup := setupDockerVolumes(t, cfg, tmpvol)
  2210  	defer cleanup()
  2211  
  2212  	_, _, err = driver.StartTask(task)
  2213  	require.NoError(t, err)
  2214  	defer driver.DestroyTask(task.ID, true)
  2215  
  2216  	waitCh, err := driver.WaitTask(context.Background(), task.ID)
  2217  	require.NoError(t, err)
  2218  	select {
  2219  	case res := <-waitCh:
  2220  		if !res.Successful() {
  2221  			t.Fatalf("unexpected err: %v", res)
  2222  		}
  2223  	case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second):
  2224  		t.Fatalf("timeout")
  2225  	}
  2226  
  2227  	if _, err := ioutil.ReadFile(hostpath); err != nil {
  2228  		t.Fatalf("unexpected error reading %s: %v", hostpath, err)
  2229  	}
  2230  }
  2231  
  2232  func TestDockerDriver_Mounts(t *testing.T) {
  2233  	ci.Parallel(t)
  2234  	testutil.DockerCompatible(t)
  2235  
  2236  	goodMount := DockerMount{
  2237  		Target: "/nomad",
  2238  		VolumeOptions: DockerVolumeOptions{
  2239  			Labels: map[string]string{"foo": "bar"},
  2240  			DriverConfig: DockerVolumeDriverConfig{
  2241  				Name: "local",
  2242  			},
  2243  		},
  2244  		ReadOnly: true,
  2245  		Source:   "test",
  2246  	}
  2247  
  2248  	if runtime.GOOS == "windows" {
  2249  		goodMount.Target = "C:\\nomad"
  2250  	}
  2251  
  2252  	cases := []struct {
  2253  		Name   string
  2254  		Mounts []DockerMount
  2255  		Error  string
  2256  	}{
  2257  		{
  2258  			Name:   "good-one",
  2259  			Error:  "",
  2260  			Mounts: []DockerMount{goodMount},
  2261  		},
  2262  		{
  2263  			Name:   "duplicate",
  2264  			Error:  "Duplicate mount point",
  2265  			Mounts: []DockerMount{goodMount, goodMount, goodMount},
  2266  		},
  2267  	}
  2268  
  2269  	for _, c := range cases {
  2270  		t.Run(c.Name, func(t *testing.T) {
  2271  			d := dockerDriverHarness(t, nil)
  2272  			driver := d.Impl().(*Driver)
  2273  			driver.config.Volumes.Enabled = true
  2274  
  2275  			// Build the task
  2276  			task, cfg, ports := dockerTask(t)
  2277  			defer freeport.Return(ports)
  2278  			cfg.Command = "sleep"
  2279  			cfg.Args = []string{"10000"}
  2280  			cfg.Mounts = c.Mounts
  2281  			require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2282  			cleanup := d.MkAllocDir(task, true)
  2283  			defer cleanup()
  2284  
  2285  			copyImage(t, task.TaskDir(), "busybox.tar")
  2286  
  2287  			_, _, err := d.StartTask(task)
  2288  			defer d.DestroyTask(task.ID, true)
  2289  			if err == nil && c.Error != "" {
  2290  				t.Fatalf("expected error: %v", c.Error)
  2291  			} else if err != nil {
  2292  				if c.Error == "" {
  2293  					t.Fatalf("unexpected error in prestart: %v", err)
  2294  				} else if !strings.Contains(err.Error(), c.Error) {
  2295  					t.Fatalf("expected error %q; got %v", c.Error, err)
  2296  				}
  2297  			}
  2298  		})
  2299  	}
  2300  }
  2301  
  2302  func TestDockerDriver_AuthConfiguration(t *testing.T) {
  2303  	ci.Parallel(t)
  2304  	testutil.DockerCompatible(t)
  2305  
  2306  	path := "./test-resources/docker/auth.json"
  2307  	cases := []struct {
  2308  		Repo       string
  2309  		AuthConfig *docker.AuthConfiguration
  2310  	}{
  2311  		{
  2312  			Repo:       "lolwhat.com/what:1337",
  2313  			AuthConfig: nil,
  2314  		},
  2315  		{
  2316  			Repo: "redis:7",
  2317  			AuthConfig: &docker.AuthConfiguration{
  2318  				Username:      "test",
  2319  				Password:      "1234",
  2320  				Email:         "",
  2321  				ServerAddress: "https://index.docker.io/v1/",
  2322  			},
  2323  		},
  2324  		{
  2325  			Repo: "quay.io/redis:7",
  2326  			AuthConfig: &docker.AuthConfiguration{
  2327  				Username:      "test",
  2328  				Password:      "5678",
  2329  				Email:         "",
  2330  				ServerAddress: "quay.io",
  2331  			},
  2332  		},
  2333  		{
  2334  			Repo: "other.io/redis:7",
  2335  			AuthConfig: &docker.AuthConfiguration{
  2336  				Username:      "test",
  2337  				Password:      "abcd",
  2338  				Email:         "",
  2339  				ServerAddress: "https://other.io/v1/",
  2340  			},
  2341  		},
  2342  	}
  2343  
  2344  	for _, c := range cases {
  2345  		act, err := authFromDockerConfig(path)(c.Repo)
  2346  		require.NoError(t, err)
  2347  		require.Exactly(t, c.AuthConfig, act)
  2348  	}
  2349  }
  2350  
  2351  func TestDockerDriver_AuthFromTaskConfig(t *testing.T) {
  2352  	ci.Parallel(t)
  2353  
  2354  	cases := []struct {
  2355  		Auth       DockerAuth
  2356  		AuthConfig *docker.AuthConfiguration
  2357  		Desc       string
  2358  	}{
  2359  		{
  2360  			Auth:       DockerAuth{},
  2361  			AuthConfig: nil,
  2362  			Desc:       "Empty Config",
  2363  		},
  2364  		{
  2365  			Auth: DockerAuth{
  2366  				Username:   "foo",
  2367  				Password:   "bar",
  2368  				Email:      "foo@bar.com",
  2369  				ServerAddr: "www.foobar.com",
  2370  			},
  2371  			AuthConfig: &docker.AuthConfiguration{
  2372  				Username:      "foo",
  2373  				Password:      "bar",
  2374  				Email:         "foo@bar.com",
  2375  				ServerAddress: "www.foobar.com",
  2376  			},
  2377  			Desc: "All fields set",
  2378  		},
  2379  		{
  2380  			Auth: DockerAuth{
  2381  				Username:   "foo",
  2382  				Password:   "bar",
  2383  				ServerAddr: "www.foobar.com",
  2384  			},
  2385  			AuthConfig: &docker.AuthConfiguration{
  2386  				Username:      "foo",
  2387  				Password:      "bar",
  2388  				ServerAddress: "www.foobar.com",
  2389  			},
  2390  			Desc: "Email not set",
  2391  		},
  2392  	}
  2393  
  2394  	for _, c := range cases {
  2395  		t.Run(c.Desc, func(t *testing.T) {
  2396  			act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test")
  2397  			require.NoError(t, err)
  2398  			require.Exactly(t, c.AuthConfig, act)
  2399  		})
  2400  	}
  2401  }
  2402  
  2403  func TestDockerDriver_OOMKilled(t *testing.T) {
  2404  	ci.Parallel(t)
  2405  	testutil.DockerCompatible(t)
  2406  
  2407  	// waiting on upstream fix for cgroups v2
  2408  	// see https://github.com/hashicorp/nomad/issues/13119
  2409  	testutil.CgroupsCompatibleV1(t)
  2410  
  2411  	taskCfg := newTaskConfig("", []string{"sh", "-c", `sleep 2 && x=a && while true; do x="$x$x"; done`})
  2412  	task := &drivers.TaskConfig{
  2413  		ID:        uuid.Generate(),
  2414  		Name:      "oom-killed",
  2415  		AllocID:   uuid.Generate(),
  2416  		Resources: basicResources,
  2417  	}
  2418  	task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024
  2419  	task.Resources.NomadResources.Memory.MemoryMB = 10
  2420  
  2421  	require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
  2422  
  2423  	d := dockerDriverHarness(t, nil)
  2424  	cleanup := d.MkAllocDir(task, true)
  2425  	defer cleanup()
  2426  	copyImage(t, task.TaskDir(), "busybox.tar")
  2427  
  2428  	_, _, err := d.StartTask(task)
  2429  	require.NoError(t, err)
  2430  
  2431  	defer d.DestroyTask(task.ID, true)
  2432  
  2433  	waitCh, err := d.WaitTask(context.Background(), task.ID)
  2434  	require.NoError(t, err)
  2435  	select {
  2436  	case res := <-waitCh:
  2437  		if res.Successful() {
  2438  			t.Fatalf("expected error, but container exited successful")
  2439  		}
  2440  
  2441  		if !res.OOMKilled {
  2442  			t.Fatalf("not killed by OOM killer: %s", res.Err)
  2443  		}
  2444  
  2445  		t.Logf("Successfully killed by OOM killer")
  2446  
  2447  	case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second):
  2448  		t.Fatalf("timeout")
  2449  	}
  2450  }
  2451  
  2452  func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) {
  2453  	ci.Parallel(t)
  2454  	testutil.DockerCompatible(t)
  2455  
  2456  	brokenConfigs := []DockerDevice{
  2457  		{
  2458  			HostPath: "",
  2459  		},
  2460  		{
  2461  			HostPath:          "/dev/sda1",
  2462  			CgroupPermissions: "rxb",
  2463  		},
  2464  	}
  2465  
  2466  	testCases := []struct {
  2467  		deviceConfig []DockerDevice
  2468  		err          error
  2469  	}{
  2470  		{brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")},
  2471  		{brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")},
  2472  	}
  2473  
  2474  	for _, tc := range testCases {
  2475  		task, cfg, ports := dockerTask(t)
  2476  		cfg.Devices = tc.deviceConfig
  2477  		require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2478  		d := dockerDriverHarness(t, nil)
  2479  		cleanup := d.MkAllocDir(task, true)
  2480  		copyImage(t, task.TaskDir(), "busybox.tar")
  2481  		defer cleanup()
  2482  
  2483  		_, _, err := d.StartTask(task)
  2484  		require.Error(t, err)
  2485  		require.Contains(t, err.Error(), tc.err.Error())
  2486  		freeport.Return(ports)
  2487  	}
  2488  }
  2489  
  2490  func TestDockerDriver_Device_Success(t *testing.T) {
  2491  	ci.Parallel(t)
  2492  	testutil.DockerCompatible(t)
  2493  
  2494  	if runtime.GOOS != "linux" {
  2495  		t.Skip("test device mounts only on linux")
  2496  	}
  2497  
  2498  	hostPath := "/dev/random"
  2499  	containerPath := "/dev/myrandom"
  2500  	perms := "rwm"
  2501  
  2502  	expectedDevice := docker.Device{
  2503  		PathOnHost:        hostPath,
  2504  		PathInContainer:   containerPath,
  2505  		CgroupPermissions: perms,
  2506  	}
  2507  	config := DockerDevice{
  2508  		HostPath:      hostPath,
  2509  		ContainerPath: containerPath,
  2510  	}
  2511  
  2512  	task, cfg, ports := dockerTask(t)
  2513  	defer freeport.Return(ports)
  2514  	cfg.Devices = []DockerDevice{config}
  2515  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2516  
  2517  	client, driver, handle, cleanup := dockerSetup(t, task, nil)
  2518  	defer cleanup()
  2519  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2520  
  2521  	container, err := client.InspectContainer(handle.containerID)
  2522  	require.NoError(t, err)
  2523  
  2524  	require.NotEmpty(t, container.HostConfig.Devices, "Expected one device")
  2525  	require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ")
  2526  }
  2527  
  2528  func TestDockerDriver_Entrypoint(t *testing.T) {
  2529  	ci.Parallel(t)
  2530  	testutil.DockerCompatible(t)
  2531  
  2532  	entrypoint := []string{"sh", "-c"}
  2533  	task, cfg, ports := dockerTask(t)
  2534  	defer freeport.Return(ports)
  2535  	cfg.Entrypoint = entrypoint
  2536  	cfg.Command = strings.Join(busyboxLongRunningCmd, " ")
  2537  	cfg.Args = []string{}
  2538  
  2539  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2540  
  2541  	client, driver, handle, cleanup := dockerSetup(t, task, nil)
  2542  	defer cleanup()
  2543  
  2544  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2545  
  2546  	container, err := client.InspectContainer(handle.containerID)
  2547  	require.NoError(t, err)
  2548  
  2549  	require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint")
  2550  	require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ")
  2551  }
  2552  
  2553  func TestDockerDriver_ReadonlyRootfs(t *testing.T) {
  2554  	ci.Parallel(t)
  2555  	testutil.DockerCompatible(t)
  2556  
  2557  	if runtime.GOOS == "windows" {
  2558  		t.Skip("Windows Docker does not support root filesystem in read-only mode")
  2559  	}
  2560  
  2561  	task, cfg, ports := dockerTask(t)
  2562  	defer freeport.Return(ports)
  2563  	cfg.ReadonlyRootfs = true
  2564  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2565  
  2566  	client, driver, handle, cleanup := dockerSetup(t, task, nil)
  2567  	defer cleanup()
  2568  	require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second))
  2569  
  2570  	container, err := client.InspectContainer(handle.containerID)
  2571  	require.NoError(t, err)
  2572  
  2573  	require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set")
  2574  }
  2575  
  2576  // fakeDockerClient can be used in places that accept an interface for the
  2577  // docker client such as createContainer.
  2578  type fakeDockerClient struct{}
  2579  
  2580  func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) {
  2581  	return nil, fmt.Errorf("volume is attached on another node")
  2582  }
  2583  func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) {
  2584  	panic("not implemented")
  2585  }
  2586  func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) {
  2587  	panic("not implemented")
  2588  }
  2589  func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error {
  2590  	panic("not implemented")
  2591  }
  2592  
  2593  // TestDockerDriver_VolumeError asserts volume related errors when creating a
  2594  // container are recoverable.
  2595  func TestDockerDriver_VolumeError(t *testing.T) {
  2596  	ci.Parallel(t)
  2597  
  2598  	// setup
  2599  	_, cfg, ports := dockerTask(t)
  2600  	defer freeport.Return(ports)
  2601  	driver := dockerDriverHarness(t, nil)
  2602  
  2603  	// assert volume error is recoverable
  2604  	_, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image)
  2605  	require.True(t, structs.IsRecoverable(err))
  2606  }
  2607  
  2608  func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) {
  2609  	ci.Parallel(t)
  2610  	testutil.DockerCompatible(t)
  2611  
  2612  	expectedPrefix := "2001:db8:1::242:ac11"
  2613  	expectedAdvertise := true
  2614  	task, cfg, ports := dockerTask(t)
  2615  	defer freeport.Return(ports)
  2616  	cfg.AdvertiseIPv6Addr = expectedAdvertise
  2617  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2618  
  2619  	client := newTestDockerClient(t)
  2620  
  2621  	// Make sure IPv6 is enabled
  2622  	net, err := client.NetworkInfo("bridge")
  2623  	if err != nil {
  2624  		t.Skip("error retrieving bridge network information, skipping")
  2625  	}
  2626  	if net == nil || !net.EnableIPv6 {
  2627  		t.Skip("IPv6 not enabled on bridge network, skipping")
  2628  	}
  2629  
  2630  	driver := dockerDriverHarness(t, nil)
  2631  	cleanup := driver.MkAllocDir(task, true)
  2632  	copyImage(t, task.TaskDir(), "busybox.tar")
  2633  	defer cleanup()
  2634  
  2635  	_, network, err := driver.StartTask(task)
  2636  	defer driver.DestroyTask(task.ID, true)
  2637  	require.NoError(t, err)
  2638  
  2639  	require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise)
  2640  
  2641  	if !strings.HasPrefix(network.IP, expectedPrefix) {
  2642  		t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix)
  2643  	}
  2644  
  2645  	handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID)
  2646  	require.True(t, ok)
  2647  
  2648  	require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second))
  2649  
  2650  	container, err := client.InspectContainer(handle.containerID)
  2651  	require.NoError(t, err)
  2652  
  2653  	if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) {
  2654  		t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address)
  2655  	}
  2656  }
  2657  
  2658  func TestParseDockerImage(t *testing.T) {
  2659  	ci.Parallel(t)
  2660  
  2661  	tests := []struct {
  2662  		Image string
  2663  		Repo  string
  2664  		Tag   string
  2665  	}{
  2666  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2667  		{"library/hello-world", "library/hello-world", "latest"},
  2668  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2669  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2670  	}
  2671  	for _, test := range tests {
  2672  		t.Run(test.Image, func(t *testing.T) {
  2673  			repo, tag := parseDockerImage(test.Image)
  2674  			require.Equal(t, test.Repo, repo)
  2675  			require.Equal(t, test.Tag, tag)
  2676  		})
  2677  	}
  2678  }
  2679  
  2680  func TestDockerImageRef(t *testing.T) {
  2681  	ci.Parallel(t)
  2682  	tests := []struct {
  2683  		Image string
  2684  		Repo  string
  2685  		Tag   string
  2686  	}{
  2687  		{"library/hello-world:1.0", "library/hello-world", "1.0"},
  2688  		{"library/hello-world:latest", "library/hello-world", "latest"},
  2689  		{"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""},
  2690  	}
  2691  	for _, test := range tests {
  2692  		t.Run(test.Image, func(t *testing.T) {
  2693  			image := dockerImageRef(test.Repo, test.Tag)
  2694  			require.Equal(t, test.Image, image)
  2695  		})
  2696  	}
  2697  }
  2698  
  2699  func waitForExist(t *testing.T, client *docker.Client, containerID string) {
  2700  	tu.WaitForResult(func() (bool, error) {
  2701  		container, err := client.InspectContainer(containerID)
  2702  		if err != nil {
  2703  			if _, ok := err.(*docker.NoSuchContainer); !ok {
  2704  				return false, err
  2705  			}
  2706  		}
  2707  
  2708  		return container != nil, nil
  2709  	}, func(err error) {
  2710  		require.NoError(t, err)
  2711  	})
  2712  }
  2713  
  2714  // TestDockerDriver_CreationIdempotent asserts that createContainer and
  2715  // and startContainers functions are idempotent, as we have some retry
  2716  // logic there without ensureing we delete/destroy containers
  2717  func TestDockerDriver_CreationIdempotent(t *testing.T) {
  2718  	ci.Parallel(t)
  2719  	testutil.DockerCompatible(t)
  2720  
  2721  	task, cfg, ports := dockerTask(t)
  2722  	defer freeport.Return(ports)
  2723  	require.NoError(t, task.EncodeConcreteDriverConfig(cfg))
  2724  
  2725  	client := newTestDockerClient(t)
  2726  	driver := dockerDriverHarness(t, nil)
  2727  	cleanup := driver.MkAllocDir(task, true)
  2728  	defer cleanup()
  2729  
  2730  	copyImage(t, task.TaskDir(), "busybox.tar")
  2731  
  2732  	d, ok := driver.Impl().(*Driver)
  2733  	require.True(t, ok)
  2734  
  2735  	_, err := d.createImage(task, cfg, client)
  2736  	require.NoError(t, err)
  2737  
  2738  	containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image)
  2739  	require.NoError(t, err)
  2740  
  2741  	c, err := d.createContainer(client, containerCfg, cfg.Image)
  2742  	require.NoError(t, err)
  2743  	defer client.RemoveContainer(docker.RemoveContainerOptions{
  2744  		ID:    c.ID,
  2745  		Force: true,
  2746  	})
  2747  
  2748  	// calling createContainer again creates a new one and remove old one
  2749  	c2, err := d.createContainer(client, containerCfg, cfg.Image)
  2750  	require.NoError(t, err)
  2751  	defer client.RemoveContainer(docker.RemoveContainerOptions{
  2752  		ID:    c2.ID,
  2753  		Force: true,
  2754  	})
  2755  
  2756  	require.NotEqual(t, c.ID, c2.ID)
  2757  	// old container was destroyed
  2758  	{
  2759  		_, err := client.InspectContainer(c.ID)
  2760  		require.Error(t, err)
  2761  		require.Contains(t, err.Error(), NoSuchContainerError)
  2762  	}
  2763  
  2764  	// now start container twice
  2765  	require.NoError(t, d.startContainer(c2))
  2766  	require.NoError(t, d.startContainer(c2))
  2767  
  2768  	tu.WaitForResult(func() (bool, error) {
  2769  		c, err := client.InspectContainer(c2.ID)
  2770  		if err != nil {
  2771  			return false, fmt.Errorf("failed to get container status: %v", err)
  2772  		}
  2773  
  2774  		if !c.State.Running {
  2775  			return false, fmt.Errorf("container is not running but %v", c.State)
  2776  		}
  2777  
  2778  		return true, nil
  2779  	}, func(err error) {
  2780  		require.NoError(t, err)
  2781  	})
  2782  }
  2783  
  2784  // TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default
  2785  // CPU quota and period are set when cpu_hard_limit = true.
  2786  func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) {
  2787  	ci.Parallel(t)
  2788  
  2789  	task, _, ports := dockerTask(t)
  2790  	defer freeport.Return(ports)
  2791  
  2792  	dh := dockerDriverHarness(t, nil)
  2793  	driver := dh.Impl().(*Driver)
  2794  	schema, _ := driver.TaskConfigSchema()
  2795  	spec, _ := hclspecutils.Convert(schema)
  2796  
  2797  	val, _, _ := hclutils.ParseHclInterface(map[string]interface{}{
  2798  		"image":          "foo/bar",
  2799  		"cpu_hard_limit": true,
  2800  	}, spec, nil)
  2801  
  2802  	require.NoError(t, task.EncodeDriverConfig(val))
  2803  	cfg := &TaskConfig{}
  2804  	require.NoError(t, task.DecodeDriverConfig(cfg))
  2805  	c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1")
  2806  	require.NoError(t, err)
  2807  
  2808  	require.NotZero(t, c.HostConfig.CPUQuota)
  2809  	require.NotZero(t, c.HostConfig.CPUPeriod)
  2810  }
  2811  
  2812  func TestDockerDriver_memoryLimits(t *testing.T) {
  2813  	ci.Parallel(t)
  2814  
  2815  	cases := []struct {
  2816  		name           string
  2817  		driverMemoryMB int64
  2818  		taskResources  drivers.MemoryResources
  2819  		expectedHard   int64
  2820  		expectedSoft   int64
  2821  	}{
  2822  		{
  2823  			"plain request",
  2824  			0,
  2825  			drivers.MemoryResources{MemoryMB: 10},
  2826  			10 * 1024 * 1024,
  2827  			0,
  2828  		},
  2829  		{
  2830  			"with driver max",
  2831  			20,
  2832  			drivers.MemoryResources{MemoryMB: 10},
  2833  			20 * 1024 * 1024,
  2834  			10 * 1024 * 1024,
  2835  		},
  2836  		{
  2837  			"with resources max",
  2838  			20,
  2839  			drivers.MemoryResources{MemoryMB: 10, MemoryMaxMB: 20},
  2840  			20 * 1024 * 1024,
  2841  			10 * 1024 * 1024,
  2842  		},
  2843  		{
  2844  			"with driver and resources max: higher driver",
  2845  			30,
  2846  			drivers.MemoryResources{MemoryMB: 10, MemoryMaxMB: 20},
  2847  			30 * 1024 * 1024,
  2848  			10 * 1024 * 1024,
  2849  		},
  2850  		{
  2851  			"with driver and resources max: higher resources",
  2852  			20,
  2853  			drivers.MemoryResources{MemoryMB: 10, MemoryMaxMB: 30},
  2854  			30 * 1024 * 1024,
  2855  			10 * 1024 * 1024,
  2856  		},
  2857  	}
  2858  
  2859  	for _, c := range cases {
  2860  		t.Run(c.name, func(t *testing.T) {
  2861  			hard, soft := memoryLimits(c.driverMemoryMB, c.taskResources)
  2862  			require.Equal(t, c.expectedHard, hard)
  2863  			require.Equal(t, c.expectedSoft, soft)
  2864  		})
  2865  	}
  2866  }
  2867  
  2868  func TestDockerDriver_cgroupParent(t *testing.T) {
  2869  	ci.Parallel(t)
  2870  
  2871  	t.Run("v1", func(t *testing.T) {
  2872  		testutil.CgroupsCompatibleV1(t)
  2873  
  2874  		parent := cgroupParent(&drivers.Resources{
  2875  			LinuxResources: &drivers.LinuxResources{
  2876  				CpusetCgroupPath: "/sys/fs/cgroup/cpuset/nomad",
  2877  			},
  2878  		})
  2879  		require.Equal(t, "", parent)
  2880  	})
  2881  
  2882  	t.Run("v2", func(t *testing.T) {
  2883  		testutil.CgroupsCompatibleV2(t)
  2884  
  2885  		parent := cgroupParent(&drivers.Resources{
  2886  			LinuxResources: &drivers.LinuxResources{
  2887  				CpusetCgroupPath: "/sys/fs/cgroup/nomad.slice",
  2888  			},
  2889  		})
  2890  		require.Equal(t, "nomad.slice", parent)
  2891  	})
  2892  }
  2893  
  2894  func TestDockerDriver_parseSignal(t *testing.T) {
  2895  	ci.Parallel(t)
  2896  
  2897  	tests := []struct {
  2898  		name            string
  2899  		runtime         string
  2900  		specifiedSignal string
  2901  		expectedSignal  string
  2902  	}{
  2903  		{
  2904  			name:            "default",
  2905  			runtime:         runtime.GOOS,
  2906  			specifiedSignal: "",
  2907  			expectedSignal:  "SIGTERM",
  2908  		},
  2909  		{
  2910  			name:            "set",
  2911  			runtime:         runtime.GOOS,
  2912  			specifiedSignal: "SIGHUP",
  2913  			expectedSignal:  "SIGHUP",
  2914  		},
  2915  		{
  2916  			name:            "windows conversion",
  2917  			runtime:         "windows",
  2918  			specifiedSignal: "SIGINT",
  2919  			expectedSignal:  "SIGTERM",
  2920  		},
  2921  		{
  2922  			name:            "not signal",
  2923  			runtime:         runtime.GOOS,
  2924  			specifiedSignal: "SIGDOESNOTEXIST",
  2925  			expectedSignal:  "", // throws error
  2926  		},
  2927  	}
  2928  
  2929  	for _, tc := range tests {
  2930  		t.Run(tc.name, func(t *testing.T) {
  2931  			s, err := parseSignal(tc.runtime, tc.specifiedSignal)
  2932  			if tc.expectedSignal == "" {
  2933  				require.Error(t, err, "invalid signal")
  2934  			} else {
  2935  				require.NoError(t, err)
  2936  				require.Equal(t, s.(syscall.Signal), s)
  2937  			}
  2938  		})
  2939  	}
  2940  }
  2941  
  2942  // This test asserts that Nomad isn't overriding the STOPSIGNAL in a Dockerfile
  2943  func TestDockerDriver_StopSignal(t *testing.T) {
  2944  	ci.Parallel(t)
  2945  	testutil.DockerCompatible(t)
  2946  	if runtime.GOOS == "windows" {
  2947  		t.Skip("Skipped on windows, we don't have image variants available")
  2948  	}
  2949  
  2950  	cases := []struct {
  2951  		name            string
  2952  		variant         string
  2953  		jobKillSignal   string
  2954  		expectedSignals []string
  2955  	}{
  2956  		{
  2957  			name:            "stopsignal-only",
  2958  			variant:         "stopsignal",
  2959  			jobKillSignal:   "",
  2960  			expectedSignals: []string{"19", "9"},
  2961  		},
  2962  		{
  2963  			name:            "stopsignal-killsignal",
  2964  			variant:         "stopsignal",
  2965  			jobKillSignal:   "SIGTERM",
  2966  			expectedSignals: []string{"15", "19", "9"},
  2967  		},
  2968  		{
  2969  			name:            "killsignal-only",
  2970  			variant:         "",
  2971  			jobKillSignal:   "SIGTERM",
  2972  			expectedSignals: []string{"15", "15", "9"},
  2973  		},
  2974  		{
  2975  			name:            "nosignals-default",
  2976  			variant:         "",
  2977  			jobKillSignal:   "",
  2978  			expectedSignals: []string{"15", "9"},
  2979  		},
  2980  	}
  2981  
  2982  	for i := range cases {
  2983  		c := cases[i]
  2984  		t.Run(c.name, func(t *testing.T) {
  2985  			taskCfg := newTaskConfig(c.variant, []string{"sleep", "9901"})
  2986  
  2987  			task := &drivers.TaskConfig{
  2988  				ID:        uuid.Generate(),
  2989  				Name:      c.name,
  2990  				AllocID:   uuid.Generate(),
  2991  				Resources: basicResources,
  2992  			}
  2993  			require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg))
  2994  
  2995  			d := dockerDriverHarness(t, nil)
  2996  			cleanup := d.MkAllocDir(task, true)
  2997  			defer cleanup()
  2998  
  2999  			if c.variant == "stopsignal" {
  3000  				copyImage(t, task.TaskDir(), "busybox_stopsignal.tar") // Default busybox image with STOPSIGNAL 19 added
  3001  			} else {
  3002  				copyImage(t, task.TaskDir(), "busybox.tar")
  3003  			}
  3004  
  3005  			client := newTestDockerClient(t)
  3006  
  3007  			listener := make(chan *docker.APIEvents)
  3008  			err := client.AddEventListener(listener)
  3009  			require.NoError(t, err)
  3010  
  3011  			defer func() {
  3012  				err := client.RemoveEventListener(listener)
  3013  				require.NoError(t, err)
  3014  			}()
  3015  
  3016  			_, _, err = d.StartTask(task)
  3017  			require.NoError(t, err)
  3018  			require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second))
  3019  
  3020  			stopErr := make(chan error, 1)
  3021  			go func() {
  3022  				err := d.StopTask(task.ID, 1*time.Second, c.jobKillSignal)
  3023  				stopErr <- err
  3024  			}()
  3025  
  3026  			timeout := time.After(10 * time.Second)
  3027  			var receivedSignals []string
  3028  		WAIT:
  3029  			for {
  3030  				select {
  3031  				case msg := <-listener:
  3032  					// Only add kill signals
  3033  					if msg.Action == "kill" {
  3034  						sig := msg.Actor.Attributes["signal"]
  3035  						receivedSignals = append(receivedSignals, sig)
  3036  
  3037  						if reflect.DeepEqual(receivedSignals, c.expectedSignals) {
  3038  							break WAIT
  3039  						}
  3040  					}
  3041  				case err := <-stopErr:
  3042  					require.NoError(t, err, "stop task failed")
  3043  				case <-timeout:
  3044  					// timeout waiting for signals
  3045  					require.Equal(t, c.expectedSignals, receivedSignals, "timed out waiting for expected signals")
  3046  				}
  3047  			}
  3048  		})
  3049  	}
  3050  }