github.com/janma/nomad@v0.11.3/drivers/docker/driver_test.go (about) 1 package docker 2 3 import ( 4 "context" 5 "fmt" 6 "io/ioutil" 7 "math/rand" 8 "path/filepath" 9 "reflect" 10 "runtime" 11 "runtime/debug" 12 "strings" 13 "testing" 14 "time" 15 16 docker "github.com/fsouza/go-dockerclient" 17 hclog "github.com/hashicorp/go-hclog" 18 "github.com/hashicorp/nomad/client/taskenv" 19 "github.com/hashicorp/nomad/client/testutil" 20 "github.com/hashicorp/nomad/devices/gpu/nvidia" 21 "github.com/hashicorp/nomad/helper/freeport" 22 "github.com/hashicorp/nomad/helper/pluginutils/hclspecutils" 23 "github.com/hashicorp/nomad/helper/pluginutils/hclutils" 24 "github.com/hashicorp/nomad/helper/pluginutils/loader" 25 "github.com/hashicorp/nomad/helper/testlog" 26 "github.com/hashicorp/nomad/helper/uuid" 27 "github.com/hashicorp/nomad/nomad/structs" 28 "github.com/hashicorp/nomad/plugins/base" 29 "github.com/hashicorp/nomad/plugins/drivers" 30 dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" 31 tu "github.com/hashicorp/nomad/testutil" 32 "github.com/stretchr/testify/assert" 33 "github.com/stretchr/testify/require" 34 ) 35 36 var ( 37 basicResources = &drivers.Resources{ 38 NomadResources: &structs.AllocatedTaskResources{ 39 Memory: structs.AllocatedMemoryResources{ 40 MemoryMB: 256, 41 }, 42 Cpu: structs.AllocatedCpuResources{ 43 CpuShares: 250, 44 }, 45 }, 46 LinuxResources: &drivers.LinuxResources{ 47 CPUShares: 512, 48 MemoryLimitBytes: 256 * 1024 * 1024, 49 }, 50 } 51 ) 52 53 func dockerIsRemote(t *testing.T) bool { 54 client, err := docker.NewClientFromEnv() 55 if err != nil { 56 return false 57 } 58 59 // Technically this could be a local tcp socket but for testing purposes 60 // we'll just assume that tcp is only used for remote connections. 61 if client.Endpoint()[0:3] == "tcp" { 62 return true 63 } 64 return false 65 } 66 67 var ( 68 // busyboxLongRunningCmd is a busybox command that runs indefinitely, and 69 // ideally responds to SIGINT/SIGTERM. Sadly, busybox:1.29.3 /bin/sleep doesn't. 70 busyboxLongRunningCmd = []string{"nc", "-l", "-p", "3000", "127.0.0.1"} 71 ) 72 73 // Returns a task with a reserved and dynamic port. The ports are returned 74 // respectively, and should be reclaimed with freeport.Return at the end of a test. 75 func dockerTask(t *testing.T) (*drivers.TaskConfig, *TaskConfig, []int) { 76 ports := freeport.MustTake(2) 77 dockerReserved := ports[0] 78 dockerDynamic := ports[1] 79 80 cfg := newTaskConfig("", busyboxLongRunningCmd) 81 task := &drivers.TaskConfig{ 82 ID: uuid.Generate(), 83 Name: "redis-demo", 84 AllocID: uuid.Generate(), 85 Env: map[string]string{ 86 "test": t.Name(), 87 }, 88 DeviceEnv: make(map[string]string), 89 Resources: &drivers.Resources{ 90 NomadResources: &structs.AllocatedTaskResources{ 91 Memory: structs.AllocatedMemoryResources{ 92 MemoryMB: 256, 93 }, 94 Cpu: structs.AllocatedCpuResources{ 95 CpuShares: 512, 96 }, 97 Networks: []*structs.NetworkResource{ 98 { 99 IP: "127.0.0.1", 100 ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}}, 101 DynamicPorts: []structs.Port{{Label: "REDIS", Value: dockerDynamic}}, 102 }, 103 }, 104 }, 105 LinuxResources: &drivers.LinuxResources{ 106 CPUShares: 512, 107 MemoryLimitBytes: 256 * 1024 * 1024, 108 PercentTicks: float64(512) / float64(4096), 109 }, 110 }, 111 } 112 113 require.NoError(t, task.EncodeConcreteDriverConfig(&cfg)) 114 115 return task, &cfg, ports 116 } 117 118 // dockerSetup does all of the basic setup you need to get a running docker 119 // process up and running for testing. Use like: 120 // 121 // task := taskTemplate() 122 // // do custom task configuration 123 // client, handle, cleanup := dockerSetup(t, task, nil) 124 // defer cleanup() 125 // // do test stuff 126 // 127 // If there is a problem during setup this function will abort or skip the test 128 // and indicate the reason. 129 func dockerSetup(t *testing.T, task *drivers.TaskConfig, driverCfg map[string]interface{}) (*docker.Client, *dtestutil.DriverHarness, *taskHandle, func()) { 130 client := newTestDockerClient(t) 131 driver := dockerDriverHarness(t, driverCfg) 132 cleanup := driver.MkAllocDir(task, true) 133 134 copyImage(t, task.TaskDir(), "busybox.tar") 135 _, _, err := driver.StartTask(task) 136 require.NoError(t, err) 137 138 dockerDriver, ok := driver.Impl().(*Driver) 139 require.True(t, ok) 140 handle, ok := dockerDriver.tasks.Get(task.ID) 141 require.True(t, ok) 142 143 return client, driver, handle, func() { 144 driver.DestroyTask(task.ID, true) 145 cleanup() 146 } 147 } 148 149 // cleanSlate removes the specified docker image, including potentially stopping/removing any 150 // containers based on that image. This is used to decouple tests that would be coupled 151 // by using the same container image. 152 func cleanSlate(client *docker.Client, imageID string) { 153 if img, _ := client.InspectImage(imageID); img == nil { 154 return 155 } 156 containers, _ := client.ListContainers(docker.ListContainersOptions{ 157 All: true, 158 Filters: map[string][]string{ 159 "ancestor": {imageID}, 160 }, 161 }) 162 for _, c := range containers { 163 client.RemoveContainer(docker.RemoveContainerOptions{ 164 Force: true, 165 ID: c.ID, 166 }) 167 } 168 client.RemoveImageExtended(imageID, docker.RemoveImageOptions{ 169 Force: true, 170 }) 171 return 172 } 173 174 // dockerDriverHarness wires up everything needed to launch a task with a docker driver. 175 // A driver plugin interface and cleanup function is returned 176 func dockerDriverHarness(t *testing.T, cfg map[string]interface{}) *dtestutil.DriverHarness { 177 logger := testlog.HCLogger(t) 178 ctx, cancel := context.WithCancel(context.Background()) 179 t.Cleanup(func() { cancel() }) 180 harness := dtestutil.NewDriverHarness(t, NewDockerDriver(ctx, logger)) 181 if cfg == nil { 182 cfg = map[string]interface{}{ 183 "gc": map[string]interface{}{ 184 "image": false, 185 "image_delay": "1s", 186 }, 187 } 188 } 189 plugLoader, err := loader.NewPluginLoader(&loader.PluginLoaderConfig{ 190 Logger: logger, 191 PluginDir: "./plugins", 192 SupportedVersions: loader.AgentSupportedApiVersions, 193 InternalPlugins: map[loader.PluginID]*loader.InternalPluginConfig{ 194 PluginID: { 195 Config: cfg, 196 Factory: func(context.Context, hclog.Logger) interface{} { 197 return harness 198 }, 199 }, 200 }, 201 }) 202 203 require.NoError(t, err) 204 instance, err := plugLoader.Dispense(pluginName, base.PluginTypeDriver, nil, logger) 205 require.NoError(t, err) 206 driver, ok := instance.Plugin().(*dtestutil.DriverHarness) 207 if !ok { 208 t.Fatal("plugin instance is not a driver... wat?") 209 } 210 211 return driver 212 } 213 214 func newTestDockerClient(t *testing.T) *docker.Client { 215 t.Helper() 216 testutil.DockerCompatible(t) 217 218 client, err := docker.NewClientFromEnv() 219 if err != nil { 220 t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack()) 221 } 222 return client 223 } 224 225 /* 226 // This test should always pass, even if docker daemon is not available 227 func TestDockerDriver_Fingerprint(t *testing.T) { 228 if !tu.IsCI() { 229 t.Parallel() 230 } 231 232 ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources}) 233 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 234 defer ctx.Destroy() 235 d := NewDockerDriver(ctx.DriverCtx) 236 node := &structs.Node{ 237 Attributes: make(map[string]string), 238 } 239 240 request := &fingerprint.FingerprintRequest{Config: &config.Config{}, Node: node} 241 var response fingerprint.FingerprintResponse 242 err := d.Fingerprint(request, &response) 243 if err != nil { 244 t.Fatalf("err: %v", err) 245 } 246 247 attributes := response.Attributes 248 if testutil.DockerIsConnected(t) && attributes["driver.docker"] == "" { 249 t.Fatalf("Fingerprinter should detect when docker is available") 250 } 251 252 if attributes["driver.docker"] != "1" { 253 t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.") 254 } else { 255 256 // if docker is available, make sure that the response is tagged as 257 // applicable 258 if !response.Detected { 259 t.Fatalf("expected response to be applicable") 260 } 261 } 262 263 t.Logf("Found docker version %s", attributes["driver.docker.version"]) 264 } 265 266 // TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set 267 // the bridge network's IP as a node attribute. See #2785 268 func TestDockerDriver_Fingerprint_Bridge(t *testing.T) { 269 if !tu.IsCI() { 270 t.Parallel() 271 } 272 testutil.DockerCompatible(t) 273 if runtime.GOOS != "linux" { 274 t.Skip("expect only on linux") 275 } 276 277 // This seems fragile, so we might need to reconsider this test if it 278 // proves flaky 279 expectedAddr, err := sockaddr.GetInterfaceIP("docker0") 280 if err != nil { 281 t.Fatalf("unable to get ip for docker0: %v", err) 282 } 283 if expectedAddr == "" { 284 t.Fatalf("unable to get ip for docker bridge") 285 } 286 287 conf := testConfig(t) 288 conf.Node = mock.Node() 289 dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil)) 290 291 request := &fingerprint.FingerprintRequest{Config: conf, Node: conf.Node} 292 var response fingerprint.FingerprintResponse 293 294 err = dd.Fingerprint(request, &response) 295 if err != nil { 296 t.Fatalf("error fingerprinting docker: %v", err) 297 } 298 299 if !response.Detected { 300 t.Fatalf("expected response to be applicable") 301 } 302 303 attributes := response.Attributes 304 if attributes == nil { 305 t.Fatalf("expected attributes to be set") 306 } 307 308 if attributes["driver.docker"] == "" { 309 t.Fatalf("expected Docker to be enabled but false was returned") 310 } 311 312 if found := attributes["driver.docker.bridge_ip"]; found != expectedAddr { 313 t.Fatalf("expected bridge ip %q but found: %q", expectedAddr, found) 314 } 315 t.Logf("docker bridge ip: %q", attributes["driver.docker.bridge_ip"]) 316 } 317 318 func TestDockerDriver_Check_DockerHealthStatus(t *testing.T) { 319 if !tu.IsCI() { 320 t.Parallel() 321 } 322 testutil.DockerCompatible(t) 323 if runtime.GOOS != "linux" { 324 t.Skip("expect only on linux") 325 } 326 327 require := require.New(t) 328 329 expectedAddr, err := sockaddr.GetInterfaceIP("docker0") 330 if err != nil { 331 t.Fatalf("unable to get ip for docker0: %v", err) 332 } 333 if expectedAddr == "" { 334 t.Fatalf("unable to get ip for docker bridge") 335 } 336 337 conf := testConfig(t) 338 conf.Node = mock.Node() 339 dd := NewDockerDriver(NewDriverContext("", "", "", "", conf, conf.Node, testlog.Logger(t), nil)) 340 341 request := &cstructs.HealthCheckRequest{} 342 var response cstructs.HealthCheckResponse 343 344 dc, ok := dd.(fingerprint.HealthCheck) 345 require.True(ok) 346 err = dc.HealthCheck(request, &response) 347 require.Nil(err) 348 349 driverInfo := response.Drivers["docker"] 350 require.NotNil(driverInfo) 351 require.True(driverInfo.Healthy) 352 }*/ 353 354 func TestDockerDriver_Start_Wait(t *testing.T) { 355 if !tu.IsCI() { 356 t.Parallel() 357 } 358 testutil.DockerCompatible(t) 359 360 taskCfg := newTaskConfig("", busyboxLongRunningCmd) 361 task := &drivers.TaskConfig{ 362 ID: uuid.Generate(), 363 Name: "nc-demo", 364 AllocID: uuid.Generate(), 365 Resources: basicResources, 366 } 367 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 368 369 d := dockerDriverHarness(t, nil) 370 cleanup := d.MkAllocDir(task, true) 371 defer cleanup() 372 copyImage(t, task.TaskDir(), "busybox.tar") 373 374 _, _, err := d.StartTask(task) 375 require.NoError(t, err) 376 377 defer d.DestroyTask(task.ID, true) 378 379 // Attempt to wait 380 waitCh, err := d.WaitTask(context.Background(), task.ID) 381 require.NoError(t, err) 382 383 select { 384 case <-waitCh: 385 t.Fatalf("wait channel should not have received an exit result") 386 case <-time.After(time.Duration(tu.TestMultiplier()*1) * time.Second): 387 } 388 } 389 390 func TestDockerDriver_Start_WaitFinish(t *testing.T) { 391 if !tu.IsCI() { 392 t.Parallel() 393 } 394 testutil.DockerCompatible(t) 395 396 taskCfg := newTaskConfig("", []string{"echo", "hello"}) 397 task := &drivers.TaskConfig{ 398 ID: uuid.Generate(), 399 Name: "nc-demo", 400 AllocID: uuid.Generate(), 401 Resources: basicResources, 402 } 403 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 404 405 d := dockerDriverHarness(t, nil) 406 cleanup := d.MkAllocDir(task, true) 407 defer cleanup() 408 copyImage(t, task.TaskDir(), "busybox.tar") 409 410 _, _, err := d.StartTask(task) 411 require.NoError(t, err) 412 413 defer d.DestroyTask(task.ID, true) 414 415 // Attempt to wait 416 waitCh, err := d.WaitTask(context.Background(), task.ID) 417 require.NoError(t, err) 418 419 select { 420 case res := <-waitCh: 421 if !res.Successful() { 422 require.Fail(t, "ExitResult should be successful: %v", res) 423 } 424 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 425 require.Fail(t, "timeout") 426 } 427 } 428 429 // TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a 430 // stopped task container, remove it, and start a new container. 431 // 432 // See https://github.com/hashicorp/nomad/issues/3419 433 func TestDockerDriver_Start_StoppedContainer(t *testing.T) { 434 if !tu.IsCI() { 435 t.Parallel() 436 } 437 testutil.DockerCompatible(t) 438 439 taskCfg := newTaskConfig("", []string{"sleep", "9001"}) 440 task := &drivers.TaskConfig{ 441 ID: uuid.Generate(), 442 Name: "nc-demo", 443 AllocID: uuid.Generate(), 444 Resources: basicResources, 445 } 446 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 447 448 d := dockerDriverHarness(t, nil) 449 cleanup := d.MkAllocDir(task, true) 450 defer cleanup() 451 copyImage(t, task.TaskDir(), "busybox.tar") 452 453 client := newTestDockerClient(t) 454 455 var imageID string 456 var err error 457 458 if runtime.GOOS != "windows" { 459 imageID, err = d.Impl().(*Driver).loadImage(task, &taskCfg, client) 460 } else { 461 image, lErr := client.InspectImage("hashicorpnomad/busybox-windows:server2016-0.1") 462 err = lErr 463 if image != nil { 464 imageID = image.ID 465 } 466 } 467 require.NoError(t, err) 468 require.NotEmpty(t, imageID) 469 470 // Create a container of the same name but don't start it. This mimics 471 // the case of dockerd getting restarted and stopping containers while 472 // Nomad is watching them. 473 opts := docker.CreateContainerOptions{ 474 Name: strings.Replace(task.ID, "/", "_", -1), 475 Config: &docker.Config{ 476 Image: taskCfg.Image, 477 Cmd: []string{"sleep", "9000"}, 478 Env: []string{fmt.Sprintf("test=%s", t.Name())}, 479 }, 480 } 481 482 if _, err := client.CreateContainer(opts); err != nil { 483 t.Fatalf("error creating initial container: %v", err) 484 } 485 486 _, _, err = d.StartTask(task) 487 defer d.DestroyTask(task.ID, true) 488 require.NoError(t, err) 489 490 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 491 require.NoError(t, d.DestroyTask(task.ID, true)) 492 } 493 494 func TestDockerDriver_Start_LoadImage(t *testing.T) { 495 if !tu.IsCI() { 496 t.Parallel() 497 } 498 testutil.DockerCompatible(t) 499 500 taskCfg := newTaskConfig("", []string{"sh", "-c", "echo hello > $NOMAD_TASK_DIR/output"}) 501 task := &drivers.TaskConfig{ 502 ID: uuid.Generate(), 503 Name: "busybox-demo", 504 AllocID: uuid.Generate(), 505 Resources: basicResources, 506 } 507 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 508 509 d := dockerDriverHarness(t, nil) 510 cleanup := d.MkAllocDir(task, true) 511 defer cleanup() 512 copyImage(t, task.TaskDir(), "busybox.tar") 513 514 _, _, err := d.StartTask(task) 515 require.NoError(t, err) 516 517 defer d.DestroyTask(task.ID, true) 518 519 waitCh, err := d.WaitTask(context.Background(), task.ID) 520 require.NoError(t, err) 521 select { 522 case res := <-waitCh: 523 if !res.Successful() { 524 require.Fail(t, "ExitResult should be successful: %v", res) 525 } 526 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 527 require.Fail(t, "timeout") 528 } 529 530 // Check that data was written to the shared alloc directory. 531 outputFile := filepath.Join(task.TaskDir().LocalDir, "output") 532 act, err := ioutil.ReadFile(outputFile) 533 if err != nil { 534 t.Fatalf("Couldn't read expected output: %v", err) 535 } 536 537 exp := "hello" 538 if strings.TrimSpace(string(act)) != exp { 539 t.Fatalf("Command outputted %v; want %v", act, exp) 540 } 541 542 } 543 544 // Tests that starting a task without an image fails 545 func TestDockerDriver_Start_NoImage(t *testing.T) { 546 if !tu.IsCI() { 547 t.Parallel() 548 } 549 testutil.DockerCompatible(t) 550 551 taskCfg := TaskConfig{ 552 Command: "echo", 553 Args: []string{"foo"}, 554 } 555 task := &drivers.TaskConfig{ 556 ID: uuid.Generate(), 557 Name: "echo", 558 AllocID: uuid.Generate(), 559 Resources: basicResources, 560 } 561 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 562 563 d := dockerDriverHarness(t, nil) 564 cleanup := d.MkAllocDir(task, false) 565 defer cleanup() 566 567 _, _, err := d.StartTask(task) 568 require.Error(t, err) 569 require.Contains(t, err.Error(), "image name required") 570 571 d.DestroyTask(task.ID, true) 572 } 573 574 func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { 575 if !tu.IsCI() { 576 t.Parallel() 577 } 578 testutil.DockerCompatible(t) 579 580 taskCfg := TaskConfig{ 581 Image: "127.0.0.1:32121/foo", // bad path 582 Command: "echo", 583 Args: []string{ 584 "hello", 585 }, 586 } 587 task := &drivers.TaskConfig{ 588 ID: uuid.Generate(), 589 Name: "busybox-demo", 590 AllocID: uuid.Generate(), 591 Resources: basicResources, 592 } 593 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 594 595 d := dockerDriverHarness(t, nil) 596 cleanup := d.MkAllocDir(task, true) 597 defer cleanup() 598 599 _, _, err := d.StartTask(task) 600 require.Error(t, err) 601 602 defer d.DestroyTask(task.ID, true) 603 604 if rerr, ok := err.(*structs.RecoverableError); !ok { 605 t.Fatalf("want recoverable error: %+v", err) 606 } else if !rerr.IsRecoverable() { 607 t.Fatalf("error not recoverable: %+v", err) 608 } 609 } 610 611 func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { 612 if !tu.IsCI() { 613 t.Parallel() 614 } 615 // This test requires that the alloc dir be mounted into docker as a volume. 616 // Because this cannot happen when docker is run remotely, e.g. when running 617 // docker in a VM, we skip this when we detect Docker is being run remotely. 618 if !testutil.DockerIsConnected(t) || dockerIsRemote(t) { 619 t.Skip("Docker not connected") 620 } 621 622 exp := []byte{'w', 'i', 'n'} 623 file := "output.txt" 624 625 taskCfg := newTaskConfig("", []string{ 626 "sh", 627 "-c", 628 fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`, 629 string(exp), taskenv.AllocDir, file), 630 }) 631 task := &drivers.TaskConfig{ 632 ID: uuid.Generate(), 633 Name: "busybox-demo", 634 AllocID: uuid.Generate(), 635 Resources: basicResources, 636 } 637 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 638 639 d := dockerDriverHarness(t, nil) 640 cleanup := d.MkAllocDir(task, true) 641 defer cleanup() 642 copyImage(t, task.TaskDir(), "busybox.tar") 643 644 _, _, err := d.StartTask(task) 645 require.NoError(t, err) 646 647 defer d.DestroyTask(task.ID, true) 648 649 // Attempt to wait 650 waitCh, err := d.WaitTask(context.Background(), task.ID) 651 require.NoError(t, err) 652 653 select { 654 case res := <-waitCh: 655 if !res.Successful() { 656 require.Fail(t, fmt.Sprintf("ExitResult should be successful: %v", res)) 657 } 658 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 659 require.Fail(t, "timeout") 660 } 661 662 // Check that data was written to the shared alloc directory. 663 outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file) 664 act, err := ioutil.ReadFile(outputFile) 665 if err != nil { 666 t.Fatalf("Couldn't read expected output: %v", err) 667 } 668 669 if !reflect.DeepEqual(act, exp) { 670 t.Fatalf("Command outputted %v; want %v", act, exp) 671 } 672 } 673 674 func TestDockerDriver_Start_Kill_Wait(t *testing.T) { 675 if !tu.IsCI() { 676 t.Parallel() 677 } 678 testutil.DockerCompatible(t) 679 680 taskCfg := newTaskConfig("", busyboxLongRunningCmd) 681 task := &drivers.TaskConfig{ 682 ID: uuid.Generate(), 683 Name: "busybox-demo", 684 AllocID: uuid.Generate(), 685 Resources: basicResources, 686 } 687 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 688 689 d := dockerDriverHarness(t, nil) 690 cleanup := d.MkAllocDir(task, true) 691 defer cleanup() 692 copyImage(t, task.TaskDir(), "busybox.tar") 693 694 _, _, err := d.StartTask(task) 695 require.NoError(t, err) 696 697 defer d.DestroyTask(task.ID, true) 698 699 go func(t *testing.T) { 700 time.Sleep(100 * time.Millisecond) 701 signal := "SIGINT" 702 if runtime.GOOS == "windows" { 703 signal = "SIGKILL" 704 } 705 require.NoError(t, d.StopTask(task.ID, time.Second, signal)) 706 }(t) 707 708 // Attempt to wait 709 waitCh, err := d.WaitTask(context.Background(), task.ID) 710 require.NoError(t, err) 711 712 select { 713 case res := <-waitCh: 714 if res.Successful() { 715 require.Fail(t, "ExitResult should err: %v", res) 716 } 717 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 718 require.Fail(t, "timeout") 719 } 720 } 721 722 func TestDockerDriver_Start_KillTimeout(t *testing.T) { 723 if !tu.IsCI() { 724 t.Parallel() 725 } 726 testutil.DockerCompatible(t) 727 728 if runtime.GOOS == "windows" { 729 t.Skip("Windows Docker does not support SIGUSR1") 730 } 731 732 timeout := 2 * time.Second 733 taskCfg := newTaskConfig("", []string{"sleep", "10"}) 734 task := &drivers.TaskConfig{ 735 ID: uuid.Generate(), 736 Name: "busybox-demo", 737 AllocID: uuid.Generate(), 738 Resources: basicResources, 739 } 740 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 741 742 d := dockerDriverHarness(t, nil) 743 cleanup := d.MkAllocDir(task, true) 744 defer cleanup() 745 copyImage(t, task.TaskDir(), "busybox.tar") 746 747 _, _, err := d.StartTask(task) 748 require.NoError(t, err) 749 750 defer d.DestroyTask(task.ID, true) 751 752 var killSent time.Time 753 go func() { 754 time.Sleep(100 * time.Millisecond) 755 killSent = time.Now() 756 require.NoError(t, d.StopTask(task.ID, timeout, "SIGUSR1")) 757 }() 758 759 // Attempt to wait 760 waitCh, err := d.WaitTask(context.Background(), task.ID) 761 require.NoError(t, err) 762 763 var killed time.Time 764 select { 765 case <-waitCh: 766 killed = time.Now() 767 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 768 require.Fail(t, "timeout") 769 } 770 771 require.True(t, killed.Sub(killSent) > timeout) 772 } 773 774 func TestDockerDriver_StartN(t *testing.T) { 775 if runtime.GOOS == "windows" { 776 t.Skip("Windows Docker does not support SIGINT") 777 } 778 if !tu.IsCI() { 779 t.Parallel() 780 } 781 testutil.DockerCompatible(t) 782 require := require.New(t) 783 784 task1, _, ports1 := dockerTask(t) 785 defer freeport.Return(ports1) 786 787 task2, _, ports2 := dockerTask(t) 788 defer freeport.Return(ports2) 789 790 task3, _, ports3 := dockerTask(t) 791 defer freeport.Return(ports3) 792 793 taskList := []*drivers.TaskConfig{task1, task2, task3} 794 795 t.Logf("Starting %d tasks", len(taskList)) 796 797 d := dockerDriverHarness(t, nil) 798 // Let's spin up a bunch of things 799 for _, task := range taskList { 800 cleanup := d.MkAllocDir(task, true) 801 defer cleanup() 802 copyImage(t, task.TaskDir(), "busybox.tar") 803 _, _, err := d.StartTask(task) 804 require.NoError(err) 805 806 } 807 808 defer d.DestroyTask(task3.ID, true) 809 defer d.DestroyTask(task2.ID, true) 810 defer d.DestroyTask(task1.ID, true) 811 812 t.Log("All tasks are started. Terminating...") 813 for _, task := range taskList { 814 require.NoError(d.StopTask(task.ID, time.Second, "SIGINT")) 815 816 // Attempt to wait 817 waitCh, err := d.WaitTask(context.Background(), task.ID) 818 require.NoError(err) 819 820 select { 821 case <-waitCh: 822 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 823 require.Fail("timeout waiting on task") 824 } 825 } 826 827 t.Log("Test complete!") 828 } 829 830 func TestDockerDriver_StartNVersions(t *testing.T) { 831 if runtime.GOOS == "windows" { 832 t.Skip("Skipped on windows, we don't have image variants available") 833 } 834 if !tu.IsCI() { 835 t.Parallel() 836 } 837 testutil.DockerCompatible(t) 838 require := require.New(t) 839 840 task1, cfg1, ports1 := dockerTask(t) 841 defer freeport.Return(ports1) 842 tcfg1 := newTaskConfig("", []string{"echo", "hello"}) 843 cfg1.Image = tcfg1.Image 844 cfg1.LoadImage = tcfg1.LoadImage 845 require.NoError(task1.EncodeConcreteDriverConfig(cfg1)) 846 847 task2, cfg2, ports2 := dockerTask(t) 848 defer freeport.Return(ports2) 849 tcfg2 := newTaskConfig("musl", []string{"echo", "hello"}) 850 cfg2.Image = tcfg2.Image 851 cfg2.LoadImage = tcfg2.LoadImage 852 require.NoError(task2.EncodeConcreteDriverConfig(cfg2)) 853 854 task3, cfg3, ports3 := dockerTask(t) 855 defer freeport.Return(ports3) 856 tcfg3 := newTaskConfig("glibc", []string{"echo", "hello"}) 857 cfg3.Image = tcfg3.Image 858 cfg3.LoadImage = tcfg3.LoadImage 859 require.NoError(task3.EncodeConcreteDriverConfig(cfg3)) 860 861 taskList := []*drivers.TaskConfig{task1, task2, task3} 862 863 t.Logf("Starting %d tasks", len(taskList)) 864 d := dockerDriverHarness(t, nil) 865 866 // Let's spin up a bunch of things 867 for _, task := range taskList { 868 cleanup := d.MkAllocDir(task, true) 869 defer cleanup() 870 copyImage(t, task.TaskDir(), "busybox.tar") 871 copyImage(t, task.TaskDir(), "busybox_musl.tar") 872 copyImage(t, task.TaskDir(), "busybox_glibc.tar") 873 _, _, err := d.StartTask(task) 874 require.NoError(err) 875 876 require.NoError(d.WaitUntilStarted(task.ID, 5*time.Second)) 877 } 878 879 defer d.DestroyTask(task3.ID, true) 880 defer d.DestroyTask(task2.ID, true) 881 defer d.DestroyTask(task1.ID, true) 882 883 t.Log("All tasks are started. Terminating...") 884 for _, task := range taskList { 885 require.NoError(d.StopTask(task.ID, time.Second, "SIGINT")) 886 887 // Attempt to wait 888 waitCh, err := d.WaitTask(context.Background(), task.ID) 889 require.NoError(err) 890 891 select { 892 case <-waitCh: 893 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 894 require.Fail("timeout waiting on task") 895 } 896 } 897 898 t.Log("Test complete!") 899 } 900 901 func TestDockerDriver_Labels(t *testing.T) { 902 if !tu.IsCI() { 903 t.Parallel() 904 } 905 testutil.DockerCompatible(t) 906 907 task, cfg, ports := dockerTask(t) 908 defer freeport.Return(ports) 909 910 cfg.Labels = map[string]string{ 911 "label1": "value1", 912 "label2": "value2", 913 } 914 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 915 916 client, d, handle, cleanup := dockerSetup(t, task, nil) 917 defer cleanup() 918 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 919 920 container, err := client.InspectContainer(handle.containerID) 921 if err != nil { 922 t.Fatalf("err: %v", err) 923 } 924 925 // expect to see 1 additional standard labels 926 require.Equal(t, len(cfg.Labels)+1, len(container.Config.Labels)) 927 for k, v := range cfg.Labels { 928 require.Equal(t, v, container.Config.Labels[k]) 929 } 930 } 931 932 func TestDockerDriver_ForcePull(t *testing.T) { 933 if !tu.IsCI() { 934 t.Parallel() 935 } 936 testutil.DockerCompatible(t) 937 938 task, cfg, ports := dockerTask(t) 939 defer freeport.Return(ports) 940 941 cfg.ForcePull = true 942 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 943 944 client, d, handle, cleanup := dockerSetup(t, task, nil) 945 defer cleanup() 946 947 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 948 949 _, err := client.InspectContainer(handle.containerID) 950 if err != nil { 951 t.Fatalf("err: %v", err) 952 } 953 } 954 955 func TestDockerDriver_ForcePull_RepoDigest(t *testing.T) { 956 if runtime.GOOS == "windows" { 957 t.Skip("TODO: Skipped digest test on Windows") 958 } 959 960 if !tu.IsCI() { 961 t.Parallel() 962 } 963 testutil.DockerCompatible(t) 964 965 task, cfg, ports := dockerTask(t) 966 defer freeport.Return(ports) 967 cfg.LoadImage = "" 968 cfg.Image = "library/busybox@sha256:58ac43b2cc92c687a32c8be6278e50a063579655fe3090125dcb2af0ff9e1a64" 969 localDigest := "sha256:8ac48589692a53a9b8c2d1ceaa6b402665aa7fe667ba51ccc03002300856d8c7" 970 cfg.ForcePull = true 971 cfg.Command = busyboxLongRunningCmd[0] 972 cfg.Args = busyboxLongRunningCmd[1:] 973 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 974 975 client, d, handle, cleanup := dockerSetup(t, task, nil) 976 defer cleanup() 977 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 978 979 container, err := client.InspectContainer(handle.containerID) 980 require.NoError(t, err) 981 require.Equal(t, localDigest, container.Image) 982 } 983 984 func TestDockerDriver_SecurityOptUnconfined(t *testing.T) { 985 if runtime.GOOS == "windows" { 986 t.Skip("Windows does not support seccomp") 987 } 988 if !tu.IsCI() { 989 t.Parallel() 990 } 991 testutil.DockerCompatible(t) 992 993 task, cfg, ports := dockerTask(t) 994 defer freeport.Return(ports) 995 cfg.SecurityOpt = []string{"seccomp=unconfined"} 996 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 997 998 client, d, handle, cleanup := dockerSetup(t, task, nil) 999 defer cleanup() 1000 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1001 1002 container, err := client.InspectContainer(handle.containerID) 1003 if err != nil { 1004 t.Fatalf("err: %v", err) 1005 } 1006 1007 require.Exactly(t, cfg.SecurityOpt, container.HostConfig.SecurityOpt) 1008 } 1009 1010 func TestDockerDriver_SecurityOptFromFile(t *testing.T) { 1011 1012 if runtime.GOOS == "windows" { 1013 t.Skip("Windows does not support seccomp") 1014 } 1015 if !tu.IsCI() { 1016 t.Parallel() 1017 } 1018 testutil.DockerCompatible(t) 1019 1020 task, cfg, ports := dockerTask(t) 1021 defer freeport.Return(ports) 1022 cfg.SecurityOpt = []string{"seccomp=./test-resources/docker/seccomp.json"} 1023 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1024 1025 client, d, handle, cleanup := dockerSetup(t, task, nil) 1026 defer cleanup() 1027 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1028 1029 container, err := client.InspectContainer(handle.containerID) 1030 require.NoError(t, err) 1031 1032 require.Contains(t, container.HostConfig.SecurityOpt[0], "reboot") 1033 } 1034 1035 func TestDockerDriver_Runtime(t *testing.T) { 1036 if !tu.IsCI() { 1037 t.Parallel() 1038 } 1039 testutil.DockerCompatible(t) 1040 1041 task, cfg, ports := dockerTask(t) 1042 defer freeport.Return(ports) 1043 cfg.Runtime = "runc" 1044 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1045 1046 client, d, handle, cleanup := dockerSetup(t, task, nil) 1047 defer cleanup() 1048 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1049 1050 container, err := client.InspectContainer(handle.containerID) 1051 if err != nil { 1052 t.Fatalf("err: %v", err) 1053 } 1054 1055 require.Exactly(t, cfg.Runtime, container.HostConfig.Runtime) 1056 } 1057 1058 func TestDockerDriver_CreateContainerConfig(t *testing.T) { 1059 t.Parallel() 1060 1061 task, cfg, ports := dockerTask(t) 1062 defer freeport.Return(ports) 1063 opt := map[string]string{"size": "120G"} 1064 1065 cfg.StorageOpt = opt 1066 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1067 1068 dh := dockerDriverHarness(t, nil) 1069 driver := dh.Impl().(*Driver) 1070 1071 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1072 require.NoError(t, err) 1073 1074 require.Equal(t, "org/repo:0.1", c.Config.Image) 1075 require.EqualValues(t, opt, c.HostConfig.StorageOpt) 1076 1077 // Container name should be /<task_name>-<alloc_id> for backward compat 1078 containerName := fmt.Sprintf("%s-%s", strings.Replace(task.Name, "/", "_", -1), task.AllocID) 1079 require.Equal(t, containerName, c.Name) 1080 } 1081 1082 func TestDockerDriver_CreateContainerConfig_RuntimeConflict(t *testing.T) { 1083 t.Parallel() 1084 1085 task, cfg, ports := dockerTask(t) 1086 defer freeport.Return(ports) 1087 task.DeviceEnv[nvidia.NvidiaVisibleDevices] = "GPU_UUID_1" 1088 1089 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1090 1091 dh := dockerDriverHarness(t, nil) 1092 driver := dh.Impl().(*Driver) 1093 driver.gpuRuntime = true 1094 1095 // Should error if a runtime was explicitly set that doesn't match gpu runtime 1096 cfg.Runtime = "nvidia" 1097 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1098 require.NoError(t, err) 1099 require.Equal(t, "nvidia", c.HostConfig.Runtime) 1100 1101 cfg.Runtime = "custom" 1102 _, err = driver.createContainerConfig(task, cfg, "org/repo:0.1") 1103 require.Error(t, err) 1104 require.Contains(t, err.Error(), "conflicting runtime requests") 1105 } 1106 1107 func TestDockerDriver_CreateContainerConfig_ChecksAllowRuntimes(t *testing.T) { 1108 t.Parallel() 1109 1110 dh := dockerDriverHarness(t, nil) 1111 driver := dh.Impl().(*Driver) 1112 driver.gpuRuntime = true 1113 driver.config.allowRuntimes = map[string]struct{}{ 1114 "runc": struct{}{}, 1115 "custom": struct{}{}, 1116 } 1117 1118 allowRuntime := []string{ 1119 "", // default always works 1120 "runc", 1121 "custom", 1122 } 1123 1124 task, cfg, ports := dockerTask(t) 1125 defer freeport.Return(ports) 1126 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1127 1128 for _, runtime := range allowRuntime { 1129 t.Run(runtime, func(t *testing.T) { 1130 cfg.Runtime = runtime 1131 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1132 require.NoError(t, err) 1133 require.Equal(t, runtime, c.HostConfig.Runtime) 1134 }) 1135 } 1136 1137 t.Run("not allowed: denied", func(t *testing.T) { 1138 cfg.Runtime = "denied" 1139 _, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1140 require.Error(t, err) 1141 require.Contains(t, err.Error(), `runtime "denied" is not allowed`) 1142 }) 1143 1144 } 1145 1146 func TestDockerDriver_CreateContainerConfig_User(t *testing.T) { 1147 t.Parallel() 1148 1149 task, cfg, ports := dockerTask(t) 1150 defer freeport.Return(ports) 1151 task.User = "random-user-1" 1152 1153 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1154 1155 dh := dockerDriverHarness(t, nil) 1156 driver := dh.Impl().(*Driver) 1157 1158 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1159 require.NoError(t, err) 1160 1161 require.Equal(t, task.User, c.Config.User) 1162 } 1163 1164 func TestDockerDriver_CreateContainerConfig_Labels(t *testing.T) { 1165 t.Parallel() 1166 1167 task, cfg, ports := dockerTask(t) 1168 defer freeport.Return(ports) 1169 task.AllocID = uuid.Generate() 1170 task.JobName = "redis-demo-job" 1171 1172 cfg.Labels = map[string]string{ 1173 "user_label": "user_value", 1174 1175 // com.hashicorp.nomad. labels are reserved and 1176 // cannot be overridden 1177 "com.hashicorp.nomad.alloc_id": "bad_value", 1178 } 1179 1180 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1181 1182 dh := dockerDriverHarness(t, nil) 1183 driver := dh.Impl().(*Driver) 1184 1185 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1186 require.NoError(t, err) 1187 1188 expectedLabels := map[string]string{ 1189 // user provided labels 1190 "user_label": "user_value", 1191 // default labels 1192 "com.hashicorp.nomad.alloc_id": task.AllocID, 1193 } 1194 1195 require.Equal(t, expectedLabels, c.Config.Labels) 1196 } 1197 1198 func TestDockerDriver_CreateContainerConfig_Logging(t *testing.T) { 1199 t.Parallel() 1200 1201 cases := []struct { 1202 name string 1203 loggingConfig DockerLogging 1204 expectedConfig DockerLogging 1205 }{ 1206 { 1207 "simple type", 1208 DockerLogging{Type: "fluentd"}, 1209 DockerLogging{ 1210 Type: "fluentd", 1211 Config: map[string]string{}, 1212 }, 1213 }, 1214 { 1215 "simple driver", 1216 DockerLogging{Driver: "fluentd"}, 1217 DockerLogging{ 1218 Type: "fluentd", 1219 Config: map[string]string{}, 1220 }, 1221 }, 1222 { 1223 "type takes precedence", 1224 DockerLogging{ 1225 Type: "json-file", 1226 Driver: "fluentd", 1227 }, 1228 DockerLogging{ 1229 Type: "json-file", 1230 Config: map[string]string{}, 1231 }, 1232 }, 1233 { 1234 "user config takes precedence, even if no type provided", 1235 DockerLogging{ 1236 Type: "", 1237 Config: map[string]string{"max-file": "3", "max-size": "10m"}, 1238 }, 1239 DockerLogging{ 1240 Type: "", 1241 Config: map[string]string{"max-file": "3", "max-size": "10m"}, 1242 }, 1243 }, 1244 { 1245 "defaults to json-file w/ log rotation", 1246 DockerLogging{ 1247 Type: "", 1248 }, 1249 DockerLogging{ 1250 Type: "json-file", 1251 Config: map[string]string{"max-file": "2", "max-size": "2m"}, 1252 }, 1253 }, 1254 } 1255 1256 for _, c := range cases { 1257 t.Run(c.name, func(t *testing.T) { 1258 task, cfg, ports := dockerTask(t) 1259 defer freeport.Return(ports) 1260 1261 cfg.Logging = c.loggingConfig 1262 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1263 1264 dh := dockerDriverHarness(t, nil) 1265 driver := dh.Impl().(*Driver) 1266 1267 cc, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1268 require.NoError(t, err) 1269 1270 require.Equal(t, c.expectedConfig.Type, cc.HostConfig.LogConfig.Type) 1271 require.Equal(t, c.expectedConfig.Config["max-file"], cc.HostConfig.LogConfig.Config["max-file"]) 1272 require.Equal(t, c.expectedConfig.Config["max-size"], cc.HostConfig.LogConfig.Config["max-size"]) 1273 }) 1274 } 1275 } 1276 1277 func TestDockerDriver_CreateContainerConfigWithRuntimes(t *testing.T) { 1278 if !tu.IsCI() { 1279 t.Parallel() 1280 } 1281 testCases := []struct { 1282 description string 1283 gpuRuntimeSet bool 1284 expectToReturnError bool 1285 expectedRuntime string 1286 nvidiaDevicesProvided bool 1287 }{ 1288 { 1289 description: "gpu devices are provided, docker driver was able to detect nvidia-runtime 1", 1290 gpuRuntimeSet: true, 1291 expectToReturnError: false, 1292 expectedRuntime: "nvidia", 1293 nvidiaDevicesProvided: true, 1294 }, 1295 { 1296 description: "gpu devices are provided, docker driver was able to detect nvidia-runtime 2", 1297 gpuRuntimeSet: true, 1298 expectToReturnError: false, 1299 expectedRuntime: "nvidia-runtime-modified-name", 1300 nvidiaDevicesProvided: true, 1301 }, 1302 { 1303 description: "no gpu devices provided - no runtime should be set", 1304 gpuRuntimeSet: true, 1305 expectToReturnError: false, 1306 expectedRuntime: "nvidia", 1307 nvidiaDevicesProvided: false, 1308 }, 1309 { 1310 description: "no gpuRuntime supported by docker driver", 1311 gpuRuntimeSet: false, 1312 expectToReturnError: true, 1313 expectedRuntime: "nvidia", 1314 nvidiaDevicesProvided: true, 1315 }, 1316 } 1317 for _, testCase := range testCases { 1318 t.Run(testCase.description, func(t *testing.T) { 1319 task, cfg, ports := dockerTask(t) 1320 defer freeport.Return(ports) 1321 1322 dh := dockerDriverHarness(t, map[string]interface{}{ 1323 "allow_runtimes": []string{"runc", "nvidia", "nvidia-runtime-modified-name"}, 1324 }) 1325 driver := dh.Impl().(*Driver) 1326 1327 driver.gpuRuntime = testCase.gpuRuntimeSet 1328 driver.config.GPURuntimeName = testCase.expectedRuntime 1329 if testCase.nvidiaDevicesProvided { 1330 task.DeviceEnv[nvidia.NvidiaVisibleDevices] = "GPU_UUID_1" 1331 } 1332 1333 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1334 if testCase.expectToReturnError { 1335 require.NotNil(t, err) 1336 } else { 1337 require.NoError(t, err) 1338 if testCase.nvidiaDevicesProvided { 1339 require.Equal(t, testCase.expectedRuntime, c.HostConfig.Runtime) 1340 } else { 1341 // no nvidia devices provided -> no point to use nvidia runtime 1342 require.Equal(t, "", c.HostConfig.Runtime) 1343 } 1344 } 1345 }) 1346 } 1347 } 1348 1349 func TestDockerDriver_Capabilities(t *testing.T) { 1350 if !tu.IsCI() { 1351 t.Parallel() 1352 } 1353 testutil.DockerCompatible(t) 1354 if runtime.GOOS == "windows" { 1355 t.Skip("Capabilities not supported on windows") 1356 } 1357 1358 testCases := []struct { 1359 Name string 1360 CapAdd []string 1361 CapDrop []string 1362 Whitelist string 1363 StartError string 1364 }{ 1365 { 1366 Name: "default-whitelist-add-allowed", 1367 CapAdd: []string{"fowner", "mknod"}, 1368 CapDrop: []string{"all"}, 1369 }, 1370 { 1371 Name: "default-whitelist-add-forbidden", 1372 CapAdd: []string{"net_admin"}, 1373 StartError: "net_admin", 1374 }, 1375 { 1376 Name: "default-whitelist-drop-existing", 1377 CapDrop: []string{"fowner", "mknod"}, 1378 }, 1379 { 1380 Name: "restrictive-whitelist-drop-all", 1381 CapDrop: []string{"all"}, 1382 Whitelist: "fowner,mknod", 1383 }, 1384 { 1385 Name: "restrictive-whitelist-add-allowed", 1386 CapAdd: []string{"fowner", "mknod"}, 1387 CapDrop: []string{"all"}, 1388 Whitelist: "fowner,mknod", 1389 }, 1390 { 1391 Name: "restrictive-whitelist-add-forbidden", 1392 CapAdd: []string{"net_admin", "mknod"}, 1393 CapDrop: []string{"all"}, 1394 Whitelist: "fowner,mknod", 1395 StartError: "net_admin", 1396 }, 1397 { 1398 Name: "permissive-whitelist", 1399 CapAdd: []string{"net_admin", "mknod"}, 1400 Whitelist: "all", 1401 }, 1402 { 1403 Name: "permissive-whitelist-add-all", 1404 CapAdd: []string{"all"}, 1405 Whitelist: "all", 1406 }, 1407 } 1408 1409 for _, tc := range testCases { 1410 t.Run(tc.Name, func(t *testing.T) { 1411 client := newTestDockerClient(t) 1412 task, cfg, ports := dockerTask(t) 1413 defer freeport.Return(ports) 1414 1415 if len(tc.CapAdd) > 0 { 1416 cfg.CapAdd = tc.CapAdd 1417 } 1418 if len(tc.CapDrop) > 0 { 1419 cfg.CapDrop = tc.CapDrop 1420 } 1421 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1422 1423 d := dockerDriverHarness(t, nil) 1424 dockerDriver, ok := d.Impl().(*Driver) 1425 require.True(t, ok) 1426 if tc.Whitelist != "" { 1427 dockerDriver.config.AllowCaps = strings.Split(tc.Whitelist, ",") 1428 } 1429 1430 cleanup := d.MkAllocDir(task, true) 1431 defer cleanup() 1432 copyImage(t, task.TaskDir(), "busybox.tar") 1433 1434 _, _, err := d.StartTask(task) 1435 defer d.DestroyTask(task.ID, true) 1436 if err == nil && tc.StartError != "" { 1437 t.Fatalf("Expected error in start: %v", tc.StartError) 1438 } else if err != nil { 1439 if tc.StartError == "" { 1440 require.NoError(t, err) 1441 } else { 1442 require.Contains(t, err.Error(), tc.StartError) 1443 } 1444 return 1445 } 1446 1447 handle, ok := dockerDriver.tasks.Get(task.ID) 1448 require.True(t, ok) 1449 1450 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1451 1452 container, err := client.InspectContainer(handle.containerID) 1453 require.NoError(t, err) 1454 1455 require.Exactly(t, tc.CapAdd, container.HostConfig.CapAdd) 1456 require.Exactly(t, tc.CapDrop, container.HostConfig.CapDrop) 1457 }) 1458 } 1459 } 1460 1461 func TestDockerDriver_DNS(t *testing.T) { 1462 if !tu.IsCI() { 1463 t.Parallel() 1464 } 1465 testutil.DockerCompatible(t) 1466 1467 task, cfg, ports := dockerTask(t) 1468 defer freeport.Return(ports) 1469 cfg.DNSServers = []string{"8.8.8.8", "8.8.4.4"} 1470 cfg.DNSSearchDomains = []string{"example.com", "example.org", "example.net"} 1471 cfg.DNSOptions = []string{"ndots:1"} 1472 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1473 1474 client, d, handle, cleanup := dockerSetup(t, task, nil) 1475 defer cleanup() 1476 1477 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1478 1479 container, err := client.InspectContainer(handle.containerID) 1480 require.NoError(t, err) 1481 1482 require.Exactly(t, cfg.DNSServers, container.HostConfig.DNS) 1483 require.Exactly(t, cfg.DNSSearchDomains, container.HostConfig.DNSSearch) 1484 require.Exactly(t, cfg.DNSOptions, container.HostConfig.DNSOptions) 1485 } 1486 1487 func TestDockerDriver_MemoryHardLimit(t *testing.T) { 1488 if !tu.IsCI() { 1489 t.Parallel() 1490 } 1491 testutil.DockerCompatible(t) 1492 if runtime.GOOS == "windows" { 1493 t.Skip("Windows does not support MemoryReservation") 1494 } 1495 1496 task, cfg, ports := dockerTask(t) 1497 defer freeport.Return(ports) 1498 1499 cfg.MemoryHardLimit = 300 1500 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1501 1502 client, d, handle, cleanup := dockerSetup(t, task, nil) 1503 defer cleanup() 1504 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1505 1506 container, err := client.InspectContainer(handle.containerID) 1507 require.NoError(t, err) 1508 1509 require.Equal(t, task.Resources.LinuxResources.MemoryLimitBytes, container.HostConfig.MemoryReservation) 1510 require.Equal(t, cfg.MemoryHardLimit*1024*1024, container.HostConfig.Memory) 1511 } 1512 1513 func TestDockerDriver_MACAddress(t *testing.T) { 1514 if !tu.IsCI() { 1515 t.Parallel() 1516 } 1517 testutil.DockerCompatible(t) 1518 if runtime.GOOS == "windows" { 1519 t.Skip("Windows docker does not support setting MacAddress") 1520 } 1521 1522 task, cfg, ports := dockerTask(t) 1523 defer freeport.Return(ports) 1524 cfg.MacAddress = "00:16:3e:00:00:00" 1525 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1526 1527 client, d, handle, cleanup := dockerSetup(t, task, nil) 1528 defer cleanup() 1529 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1530 1531 container, err := client.InspectContainer(handle.containerID) 1532 require.NoError(t, err) 1533 1534 require.Equal(t, cfg.MacAddress, container.NetworkSettings.MacAddress) 1535 } 1536 1537 func TestDockerWorkDir(t *testing.T) { 1538 if !tu.IsCI() { 1539 t.Parallel() 1540 } 1541 testutil.DockerCompatible(t) 1542 1543 task, cfg, ports := dockerTask(t) 1544 defer freeport.Return(ports) 1545 cfg.WorkDir = "/some/path" 1546 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1547 1548 client, d, handle, cleanup := dockerSetup(t, task, nil) 1549 defer cleanup() 1550 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1551 1552 container, err := client.InspectContainer(handle.containerID) 1553 require.NoError(t, err) 1554 require.Equal(t, cfg.WorkDir, filepath.ToSlash(container.Config.WorkingDir)) 1555 } 1556 1557 func inSlice(needle string, haystack []string) bool { 1558 for _, h := range haystack { 1559 if h == needle { 1560 return true 1561 } 1562 } 1563 return false 1564 } 1565 1566 func TestDockerDriver_PortsNoMap(t *testing.T) { 1567 if !tu.IsCI() { 1568 t.Parallel() 1569 } 1570 testutil.DockerCompatible(t) 1571 1572 task, _, ports := dockerTask(t) 1573 defer freeport.Return(ports) 1574 res := ports[0] 1575 dyn := ports[1] 1576 1577 client, d, handle, cleanup := dockerSetup(t, task, nil) 1578 defer cleanup() 1579 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1580 1581 container, err := client.InspectContainer(handle.containerID) 1582 require.NoError(t, err) 1583 1584 // Verify that the correct ports are EXPOSED 1585 expectedExposedPorts := map[docker.Port]struct{}{ 1586 docker.Port(fmt.Sprintf("%d/tcp", res)): {}, 1587 docker.Port(fmt.Sprintf("%d/udp", res)): {}, 1588 docker.Port(fmt.Sprintf("%d/tcp", dyn)): {}, 1589 docker.Port(fmt.Sprintf("%d/udp", dyn)): {}, 1590 } 1591 1592 require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts) 1593 1594 hostIP := "127.0.0.1" 1595 if runtime.GOOS == "windows" { 1596 hostIP = "" 1597 } 1598 1599 // Verify that the correct ports are FORWARDED 1600 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 1601 docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, 1602 docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, 1603 docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, 1604 docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, 1605 } 1606 1607 require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings) 1608 } 1609 1610 func TestDockerDriver_PortsMapping(t *testing.T) { 1611 if !tu.IsCI() { 1612 t.Parallel() 1613 } 1614 testutil.DockerCompatible(t) 1615 1616 task, cfg, ports := dockerTask(t) 1617 defer freeport.Return(ports) 1618 res := ports[0] 1619 dyn := ports[1] 1620 cfg.PortMap = map[string]int{ 1621 "main": 8080, 1622 "REDIS": 6379, 1623 } 1624 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1625 1626 client, d, handle, cleanup := dockerSetup(t, task, nil) 1627 defer cleanup() 1628 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1629 1630 container, err := client.InspectContainer(handle.containerID) 1631 require.NoError(t, err) 1632 1633 // Verify that the port environment variables are set 1634 require.Contains(t, container.Config.Env, "NOMAD_PORT_main=8080") 1635 require.Contains(t, container.Config.Env, "NOMAD_PORT_REDIS=6379") 1636 1637 // Verify that the correct ports are EXPOSED 1638 expectedExposedPorts := map[docker.Port]struct{}{ 1639 docker.Port("8080/tcp"): {}, 1640 docker.Port("8080/udp"): {}, 1641 docker.Port("6379/tcp"): {}, 1642 docker.Port("6379/udp"): {}, 1643 } 1644 1645 require.Exactly(t, expectedExposedPorts, container.Config.ExposedPorts) 1646 1647 hostIP := "127.0.0.1" 1648 if runtime.GOOS == "windows" { 1649 hostIP = "" 1650 } 1651 1652 // Verify that the correct ports are FORWARDED 1653 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 1654 docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, 1655 docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, 1656 docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, 1657 docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, 1658 } 1659 require.Exactly(t, expectedPortBindings, container.HostConfig.PortBindings) 1660 } 1661 1662 func TestDockerDriver_CreateContainerConfig_PortsMapping(t *testing.T) { 1663 t.Parallel() 1664 1665 task, cfg, ports := dockerTask(t) 1666 defer freeport.Return(ports) 1667 res := ports[0] 1668 dyn := ports[1] 1669 cfg.PortMap = map[string]int{ 1670 "main": 8080, 1671 "REDIS": 6379, 1672 } 1673 dh := dockerDriverHarness(t, nil) 1674 driver := dh.Impl().(*Driver) 1675 1676 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 1677 require.NoError(t, err) 1678 1679 require.Equal(t, "org/repo:0.1", c.Config.Image) 1680 require.Contains(t, c.Config.Env, "NOMAD_PORT_main=8080") 1681 require.Contains(t, c.Config.Env, "NOMAD_PORT_REDIS=6379") 1682 1683 // Verify that the correct ports are FORWARDED 1684 hostIP := "127.0.0.1" 1685 if runtime.GOOS == "windows" { 1686 hostIP = "" 1687 } 1688 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 1689 docker.Port("8080/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, 1690 docker.Port("8080/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", res)}}, 1691 docker.Port("6379/tcp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, 1692 docker.Port("6379/udp"): {{HostIP: hostIP, HostPort: fmt.Sprintf("%d", dyn)}}, 1693 } 1694 require.Exactly(t, expectedPortBindings, c.HostConfig.PortBindings) 1695 1696 } 1697 1698 func TestDockerDriver_CleanupContainer(t *testing.T) { 1699 if !tu.IsCI() { 1700 t.Parallel() 1701 } 1702 testutil.DockerCompatible(t) 1703 1704 task, cfg, ports := dockerTask(t) 1705 defer freeport.Return(ports) 1706 cfg.Command = "echo" 1707 cfg.Args = []string{"hello"} 1708 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1709 1710 client, d, handle, cleanup := dockerSetup(t, task, nil) 1711 defer cleanup() 1712 1713 waitCh, err := d.WaitTask(context.Background(), task.ID) 1714 require.NoError(t, err) 1715 1716 select { 1717 case res := <-waitCh: 1718 if !res.Successful() { 1719 t.Fatalf("err: %v", res) 1720 } 1721 1722 err = d.DestroyTask(task.ID, false) 1723 require.NoError(t, err) 1724 1725 time.Sleep(3 * time.Second) 1726 1727 // Ensure that the container isn't present 1728 _, err := client.InspectContainer(handle.containerID) 1729 if err == nil { 1730 t.Fatalf("expected to not get container") 1731 } 1732 1733 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 1734 t.Fatalf("timeout") 1735 } 1736 } 1737 1738 func TestDockerDriver_EnableImageGC(t *testing.T) { 1739 testutil.DockerCompatible(t) 1740 1741 task, cfg, ports := dockerTask(t) 1742 defer freeport.Return(ports) 1743 cfg.Command = "echo" 1744 cfg.Args = []string{"hello"} 1745 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1746 1747 client := newTestDockerClient(t) 1748 driver := dockerDriverHarness(t, map[string]interface{}{ 1749 "gc": map[string]interface{}{ 1750 "container": true, 1751 "image": true, 1752 "image_delay": "2s", 1753 }, 1754 }) 1755 cleanup := driver.MkAllocDir(task, true) 1756 defer cleanup() 1757 1758 cleanSlate(client, cfg.Image) 1759 1760 copyImage(t, task.TaskDir(), "busybox.tar") 1761 _, _, err := driver.StartTask(task) 1762 require.NoError(t, err) 1763 1764 dockerDriver, ok := driver.Impl().(*Driver) 1765 require.True(t, ok) 1766 _, ok = dockerDriver.tasks.Get(task.ID) 1767 require.True(t, ok) 1768 1769 waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID) 1770 require.NoError(t, err) 1771 select { 1772 case res := <-waitCh: 1773 if !res.Successful() { 1774 t.Fatalf("err: %v", res) 1775 } 1776 1777 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 1778 t.Fatalf("timeout") 1779 } 1780 1781 // we haven't called DestroyTask, image should be present 1782 _, err = client.InspectImage(cfg.Image) 1783 require.NoError(t, err) 1784 1785 err = dockerDriver.DestroyTask(task.ID, false) 1786 require.NoError(t, err) 1787 1788 // image_delay is 3s, so image should still be around for a bit 1789 _, err = client.InspectImage(cfg.Image) 1790 require.NoError(t, err) 1791 1792 // Ensure image was removed 1793 tu.WaitForResult(func() (bool, error) { 1794 if _, err := client.InspectImage(cfg.Image); err == nil { 1795 return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image) 1796 } 1797 1798 return true, nil 1799 }, func(err error) { 1800 require.NoError(t, err) 1801 }) 1802 } 1803 1804 func TestDockerDriver_DisableImageGC(t *testing.T) { 1805 testutil.DockerCompatible(t) 1806 1807 task, cfg, ports := dockerTask(t) 1808 defer freeport.Return(ports) 1809 cfg.Command = "echo" 1810 cfg.Args = []string{"hello"} 1811 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1812 1813 client := newTestDockerClient(t) 1814 driver := dockerDriverHarness(t, map[string]interface{}{ 1815 "gc": map[string]interface{}{ 1816 "container": true, 1817 "image": false, 1818 "image_delay": "1s", 1819 }, 1820 }) 1821 cleanup := driver.MkAllocDir(task, true) 1822 defer cleanup() 1823 1824 cleanSlate(client, cfg.Image) 1825 1826 copyImage(t, task.TaskDir(), "busybox.tar") 1827 _, _, err := driver.StartTask(task) 1828 require.NoError(t, err) 1829 1830 dockerDriver, ok := driver.Impl().(*Driver) 1831 require.True(t, ok) 1832 handle, ok := dockerDriver.tasks.Get(task.ID) 1833 require.True(t, ok) 1834 1835 waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID) 1836 require.NoError(t, err) 1837 select { 1838 case res := <-waitCh: 1839 if !res.Successful() { 1840 t.Fatalf("err: %v", res) 1841 } 1842 1843 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 1844 t.Fatalf("timeout") 1845 } 1846 1847 // we haven't called DestroyTask, image should be present 1848 _, err = client.InspectImage(handle.containerImage) 1849 require.NoError(t, err) 1850 1851 err = dockerDriver.DestroyTask(task.ID, false) 1852 require.NoError(t, err) 1853 1854 // image_delay is 1s, wait a little longer 1855 time.Sleep(3 * time.Second) 1856 1857 // image should not have been removed or scheduled to be removed 1858 _, err = client.InspectImage(cfg.Image) 1859 require.NoError(t, err) 1860 dockerDriver.coordinator.imageLock.Lock() 1861 _, ok = dockerDriver.coordinator.deleteFuture[handle.containerImage] 1862 require.False(t, ok, "image should not be registered for deletion") 1863 dockerDriver.coordinator.imageLock.Unlock() 1864 } 1865 1866 func TestDockerDriver_MissingContainer_Cleanup(t *testing.T) { 1867 testutil.DockerCompatible(t) 1868 1869 task, cfg, ports := dockerTask(t) 1870 defer freeport.Return(ports) 1871 cfg.Command = "echo" 1872 cfg.Args = []string{"hello"} 1873 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1874 1875 client := newTestDockerClient(t) 1876 driver := dockerDriverHarness(t, map[string]interface{}{ 1877 "gc": map[string]interface{}{ 1878 "container": true, 1879 "image": true, 1880 "image_delay": "0s", 1881 }, 1882 }) 1883 cleanup := driver.MkAllocDir(task, true) 1884 defer cleanup() 1885 1886 cleanSlate(client, cfg.Image) 1887 1888 copyImage(t, task.TaskDir(), "busybox.tar") 1889 _, _, err := driver.StartTask(task) 1890 require.NoError(t, err) 1891 1892 dockerDriver, ok := driver.Impl().(*Driver) 1893 require.True(t, ok) 1894 h, ok := dockerDriver.tasks.Get(task.ID) 1895 require.True(t, ok) 1896 1897 waitCh, err := dockerDriver.WaitTask(context.Background(), task.ID) 1898 require.NoError(t, err) 1899 select { 1900 case res := <-waitCh: 1901 if !res.Successful() { 1902 t.Fatalf("err: %v", res) 1903 } 1904 1905 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 1906 t.Fatalf("timeout") 1907 } 1908 1909 // remove the container out-of-band 1910 require.NoError(t, client.RemoveContainer(docker.RemoveContainerOptions{ 1911 ID: h.containerID, 1912 })) 1913 1914 require.NoError(t, dockerDriver.DestroyTask(task.ID, false)) 1915 1916 // Ensure image was removed 1917 tu.WaitForResult(func() (bool, error) { 1918 if _, err := client.InspectImage(cfg.Image); err == nil { 1919 return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", cfg.Image) 1920 } 1921 1922 return true, nil 1923 }, func(err error) { 1924 require.NoError(t, err) 1925 }) 1926 1927 // Ensure that task handle was removed 1928 _, ok = dockerDriver.tasks.Get(task.ID) 1929 require.False(t, ok) 1930 } 1931 1932 func TestDockerDriver_Stats(t *testing.T) { 1933 if !tu.IsCI() { 1934 t.Parallel() 1935 } 1936 testutil.DockerCompatible(t) 1937 1938 task, cfg, ports := dockerTask(t) 1939 defer freeport.Return(ports) 1940 cfg.Command = "sleep" 1941 cfg.Args = []string{"1000"} 1942 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 1943 1944 _, d, handle, cleanup := dockerSetup(t, task, nil) 1945 defer cleanup() 1946 require.NoError(t, d.WaitUntilStarted(task.ID, 5*time.Second)) 1947 1948 go func() { 1949 defer d.DestroyTask(task.ID, true) 1950 ctx, cancel := context.WithCancel(context.Background()) 1951 defer cancel() 1952 ch, err := handle.Stats(ctx, 1*time.Second) 1953 assert.NoError(t, err) 1954 select { 1955 case ru := <-ch: 1956 assert.NotNil(t, ru.ResourceUsage) 1957 case <-time.After(3 * time.Second): 1958 assert.Fail(t, "stats timeout") 1959 } 1960 }() 1961 1962 waitCh, err := d.WaitTask(context.Background(), task.ID) 1963 require.NoError(t, err) 1964 select { 1965 case res := <-waitCh: 1966 if res.Successful() { 1967 t.Fatalf("should err: %v", res) 1968 } 1969 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 1970 t.Fatalf("timeout") 1971 } 1972 } 1973 1974 func setupDockerVolumes(t *testing.T, cfg map[string]interface{}, hostpath string) (*drivers.TaskConfig, *dtestutil.DriverHarness, *TaskConfig, string, func()) { 1975 testutil.DockerCompatible(t) 1976 1977 randfn := fmt.Sprintf("test-%d", rand.Int()) 1978 hostfile := filepath.Join(hostpath, randfn) 1979 var containerPath string 1980 if runtime.GOOS == "windows" { 1981 containerPath = "C:\\data" 1982 } else { 1983 containerPath = "/mnt/vol" 1984 } 1985 containerFile := filepath.Join(containerPath, randfn) 1986 1987 taskCfg := newTaskConfig("", []string{"touch", containerFile}) 1988 taskCfg.Volumes = []string{fmt.Sprintf("%s:%s", hostpath, containerPath)} 1989 1990 task := &drivers.TaskConfig{ 1991 ID: uuid.Generate(), 1992 Name: "ls", 1993 AllocID: uuid.Generate(), 1994 Env: map[string]string{"VOL_PATH": containerPath}, 1995 Resources: basicResources, 1996 } 1997 require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg)) 1998 1999 d := dockerDriverHarness(t, cfg) 2000 cleanup := d.MkAllocDir(task, true) 2001 2002 copyImage(t, task.TaskDir(), "busybox.tar") 2003 2004 return task, d, &taskCfg, hostfile, cleanup 2005 } 2006 2007 func TestDockerDriver_VolumesDisabled(t *testing.T) { 2008 if !tu.IsCI() { 2009 t.Parallel() 2010 } 2011 testutil.DockerCompatible(t) 2012 2013 cfg := map[string]interface{}{ 2014 "volumes": map[string]interface{}{ 2015 "enabled": false, 2016 }, 2017 "gc": map[string]interface{}{ 2018 "image": false, 2019 }, 2020 } 2021 2022 { 2023 tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesdisabled") 2024 if err != nil { 2025 t.Fatalf("error creating temporary dir: %v", err) 2026 } 2027 2028 task, driver, _, _, cleanup := setupDockerVolumes(t, cfg, tmpvol) 2029 defer cleanup() 2030 2031 _, _, err = driver.StartTask(task) 2032 defer driver.DestroyTask(task.ID, true) 2033 if err == nil { 2034 require.Fail(t, "Started driver successfully when volumes should have been disabled.") 2035 } 2036 } 2037 2038 // Relative paths should still be allowed 2039 { 2040 task, driver, _, fn, cleanup := setupDockerVolumes(t, cfg, ".") 2041 defer cleanup() 2042 2043 _, _, err := driver.StartTask(task) 2044 require.NoError(t, err) 2045 defer driver.DestroyTask(task.ID, true) 2046 2047 waitCh, err := driver.WaitTask(context.Background(), task.ID) 2048 require.NoError(t, err) 2049 select { 2050 case res := <-waitCh: 2051 if !res.Successful() { 2052 t.Fatalf("unexpected err: %v", res) 2053 } 2054 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 2055 t.Fatalf("timeout") 2056 } 2057 2058 if _, err := ioutil.ReadFile(filepath.Join(task.TaskDir().Dir, fn)); err != nil { 2059 t.Fatalf("unexpected error reading %s: %v", fn, err) 2060 } 2061 } 2062 2063 // Volume Drivers should be rejected (error) 2064 { 2065 task, driver, taskCfg, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol") 2066 defer cleanup() 2067 2068 taskCfg.VolumeDriver = "flocker" 2069 require.NoError(t, task.EncodeConcreteDriverConfig(taskCfg)) 2070 2071 _, _, err := driver.StartTask(task) 2072 defer driver.DestroyTask(task.ID, true) 2073 if err == nil { 2074 require.Fail(t, "Started driver successfully when volume drivers should have been disabled.") 2075 } 2076 } 2077 } 2078 2079 func TestDockerDriver_VolumesEnabled(t *testing.T) { 2080 if !tu.IsCI() { 2081 t.Parallel() 2082 } 2083 testutil.DockerCompatible(t) 2084 2085 tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled") 2086 require.NoError(t, err) 2087 2088 // Evaluate symlinks so it works on MacOS 2089 tmpvol, err = filepath.EvalSymlinks(tmpvol) 2090 require.NoError(t, err) 2091 2092 task, driver, _, hostpath, cleanup := setupDockerVolumes(t, nil, tmpvol) 2093 defer cleanup() 2094 2095 _, _, err = driver.StartTask(task) 2096 require.NoError(t, err) 2097 defer driver.DestroyTask(task.ID, true) 2098 2099 waitCh, err := driver.WaitTask(context.Background(), task.ID) 2100 require.NoError(t, err) 2101 select { 2102 case res := <-waitCh: 2103 if !res.Successful() { 2104 t.Fatalf("unexpected err: %v", res) 2105 } 2106 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 2107 t.Fatalf("timeout") 2108 } 2109 2110 if _, err := ioutil.ReadFile(hostpath); err != nil { 2111 t.Fatalf("unexpected error reading %s: %v", hostpath, err) 2112 } 2113 } 2114 2115 func TestDockerDriver_Mounts(t *testing.T) { 2116 if !tu.IsCI() { 2117 t.Parallel() 2118 } 2119 testutil.DockerCompatible(t) 2120 2121 goodMount := DockerMount{ 2122 Target: "/nomad", 2123 VolumeOptions: DockerVolumeOptions{ 2124 Labels: map[string]string{"foo": "bar"}, 2125 DriverConfig: DockerVolumeDriverConfig{ 2126 Name: "local", 2127 }, 2128 }, 2129 ReadOnly: true, 2130 Source: "test", 2131 } 2132 2133 if runtime.GOOS == "windows" { 2134 goodMount.Target = "C:\\nomad" 2135 } 2136 2137 cases := []struct { 2138 Name string 2139 Mounts []DockerMount 2140 Error string 2141 }{ 2142 { 2143 Name: "good-one", 2144 Error: "", 2145 Mounts: []DockerMount{goodMount}, 2146 }, 2147 { 2148 Name: "duplicate", 2149 Error: "Duplicate mount point", 2150 Mounts: []DockerMount{goodMount, goodMount, goodMount}, 2151 }, 2152 } 2153 2154 for _, c := range cases { 2155 t.Run(c.Name, func(t *testing.T) { 2156 d := dockerDriverHarness(t, nil) 2157 // Build the task 2158 task, cfg, ports := dockerTask(t) 2159 defer freeport.Return(ports) 2160 cfg.Command = "sleep" 2161 cfg.Args = []string{"10000"} 2162 cfg.Mounts = c.Mounts 2163 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2164 cleanup := d.MkAllocDir(task, true) 2165 defer cleanup() 2166 2167 copyImage(t, task.TaskDir(), "busybox.tar") 2168 2169 _, _, err := d.StartTask(task) 2170 defer d.DestroyTask(task.ID, true) 2171 if err == nil && c.Error != "" { 2172 t.Fatalf("expected error: %v", c.Error) 2173 } else if err != nil { 2174 if c.Error == "" { 2175 t.Fatalf("unexpected error in prestart: %v", err) 2176 } else if !strings.Contains(err.Error(), c.Error) { 2177 t.Fatalf("expected error %q; got %v", c.Error, err) 2178 } 2179 } 2180 }) 2181 } 2182 } 2183 2184 func TestDockerDriver_AuthConfiguration(t *testing.T) { 2185 if !tu.IsCI() { 2186 t.Parallel() 2187 } 2188 testutil.DockerCompatible(t) 2189 2190 path := "./test-resources/docker/auth.json" 2191 cases := []struct { 2192 Repo string 2193 AuthConfig *docker.AuthConfiguration 2194 }{ 2195 { 2196 Repo: "lolwhat.com/what:1337", 2197 AuthConfig: nil, 2198 }, 2199 { 2200 Repo: "redis:3.2", 2201 AuthConfig: &docker.AuthConfiguration{ 2202 Username: "test", 2203 Password: "1234", 2204 Email: "", 2205 ServerAddress: "https://index.docker.io/v1/", 2206 }, 2207 }, 2208 { 2209 Repo: "quay.io/redis:3.2", 2210 AuthConfig: &docker.AuthConfiguration{ 2211 Username: "test", 2212 Password: "5678", 2213 Email: "", 2214 ServerAddress: "quay.io", 2215 }, 2216 }, 2217 { 2218 Repo: "other.io/redis:3.2", 2219 AuthConfig: &docker.AuthConfiguration{ 2220 Username: "test", 2221 Password: "abcd", 2222 Email: "", 2223 ServerAddress: "https://other.io/v1/", 2224 }, 2225 }, 2226 } 2227 2228 for _, c := range cases { 2229 act, err := authFromDockerConfig(path)(c.Repo) 2230 require.NoError(t, err) 2231 require.Exactly(t, c.AuthConfig, act) 2232 } 2233 } 2234 2235 func TestDockerDriver_AuthFromTaskConfig(t *testing.T) { 2236 if !tu.IsCI() { 2237 t.Parallel() 2238 } 2239 2240 cases := []struct { 2241 Auth DockerAuth 2242 AuthConfig *docker.AuthConfiguration 2243 Desc string 2244 }{ 2245 { 2246 Auth: DockerAuth{}, 2247 AuthConfig: nil, 2248 Desc: "Empty Config", 2249 }, 2250 { 2251 Auth: DockerAuth{ 2252 Username: "foo", 2253 Password: "bar", 2254 Email: "foo@bar.com", 2255 ServerAddr: "www.foobar.com", 2256 }, 2257 AuthConfig: &docker.AuthConfiguration{ 2258 Username: "foo", 2259 Password: "bar", 2260 Email: "foo@bar.com", 2261 ServerAddress: "www.foobar.com", 2262 }, 2263 Desc: "All fields set", 2264 }, 2265 { 2266 Auth: DockerAuth{ 2267 Username: "foo", 2268 Password: "bar", 2269 ServerAddr: "www.foobar.com", 2270 }, 2271 AuthConfig: &docker.AuthConfiguration{ 2272 Username: "foo", 2273 Password: "bar", 2274 ServerAddress: "www.foobar.com", 2275 }, 2276 Desc: "Email not set", 2277 }, 2278 } 2279 2280 for _, c := range cases { 2281 t.Run(c.Desc, func(t *testing.T) { 2282 act, err := authFromTaskConfig(&TaskConfig{Auth: c.Auth})("test") 2283 require.NoError(t, err) 2284 require.Exactly(t, c.AuthConfig, act) 2285 }) 2286 } 2287 } 2288 2289 func TestDockerDriver_OOMKilled(t *testing.T) { 2290 if !tu.IsCI() { 2291 t.Parallel() 2292 } 2293 testutil.DockerCompatible(t) 2294 2295 if runtime.GOOS == "windows" { 2296 t.Skip("Windows does not support OOM Killer") 2297 } 2298 2299 taskCfg := newTaskConfig("", []string{"sh", "-c", `sleep 2 && x=a && while true; do x="$x$x"; done`}) 2300 task := &drivers.TaskConfig{ 2301 ID: uuid.Generate(), 2302 Name: "oom-killed", 2303 AllocID: uuid.Generate(), 2304 Resources: basicResources, 2305 } 2306 task.Resources.LinuxResources.MemoryLimitBytes = 10 * 1024 * 1024 2307 task.Resources.NomadResources.Memory.MemoryMB = 10 2308 2309 require.NoError(t, task.EncodeConcreteDriverConfig(&taskCfg)) 2310 2311 d := dockerDriverHarness(t, nil) 2312 cleanup := d.MkAllocDir(task, true) 2313 defer cleanup() 2314 copyImage(t, task.TaskDir(), "busybox.tar") 2315 2316 _, _, err := d.StartTask(task) 2317 require.NoError(t, err) 2318 2319 defer d.DestroyTask(task.ID, true) 2320 2321 waitCh, err := d.WaitTask(context.Background(), task.ID) 2322 require.NoError(t, err) 2323 select { 2324 case res := <-waitCh: 2325 if res.Successful() { 2326 t.Fatalf("expected error, but container exited successful") 2327 } 2328 2329 if !res.OOMKilled { 2330 t.Fatalf("not killed by OOM killer: %s", res.Err) 2331 } 2332 2333 t.Logf("Successfully killed by OOM killer") 2334 2335 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 2336 t.Fatalf("timeout") 2337 } 2338 } 2339 2340 func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { 2341 if !tu.IsCI() { 2342 t.Parallel() 2343 } 2344 testutil.DockerCompatible(t) 2345 2346 brokenConfigs := []DockerDevice{ 2347 { 2348 HostPath: "", 2349 }, 2350 { 2351 HostPath: "/dev/sda1", 2352 CgroupPermissions: "rxb", 2353 }, 2354 } 2355 2356 testCases := []struct { 2357 deviceConfig []DockerDevice 2358 err error 2359 }{ 2360 {brokenConfigs[:1], fmt.Errorf("host path must be set in configuration for devices")}, 2361 {brokenConfigs[1:], fmt.Errorf("invalid cgroup permission string: \"rxb\"")}, 2362 } 2363 2364 for _, tc := range testCases { 2365 task, cfg, ports := dockerTask(t) 2366 cfg.Devices = tc.deviceConfig 2367 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2368 d := dockerDriverHarness(t, nil) 2369 cleanup := d.MkAllocDir(task, true) 2370 copyImage(t, task.TaskDir(), "busybox.tar") 2371 defer cleanup() 2372 2373 _, _, err := d.StartTask(task) 2374 require.Error(t, err) 2375 require.Contains(t, err.Error(), tc.err.Error()) 2376 freeport.Return(ports) 2377 } 2378 } 2379 2380 func TestDockerDriver_Device_Success(t *testing.T) { 2381 if !tu.IsCI() { 2382 t.Parallel() 2383 } 2384 testutil.DockerCompatible(t) 2385 2386 if runtime.GOOS != "linux" { 2387 t.Skip("test device mounts only on linux") 2388 } 2389 2390 hostPath := "/dev/random" 2391 containerPath := "/dev/myrandom" 2392 perms := "rwm" 2393 2394 expectedDevice := docker.Device{ 2395 PathOnHost: hostPath, 2396 PathInContainer: containerPath, 2397 CgroupPermissions: perms, 2398 } 2399 config := DockerDevice{ 2400 HostPath: hostPath, 2401 ContainerPath: containerPath, 2402 } 2403 2404 task, cfg, ports := dockerTask(t) 2405 defer freeport.Return(ports) 2406 cfg.Devices = []DockerDevice{config} 2407 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2408 2409 client, driver, handle, cleanup := dockerSetup(t, task, nil) 2410 defer cleanup() 2411 require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) 2412 2413 container, err := client.InspectContainer(handle.containerID) 2414 require.NoError(t, err) 2415 2416 require.NotEmpty(t, container.HostConfig.Devices, "Expected one device") 2417 require.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ") 2418 } 2419 2420 func TestDockerDriver_Entrypoint(t *testing.T) { 2421 if !tu.IsCI() { 2422 t.Parallel() 2423 } 2424 testutil.DockerCompatible(t) 2425 2426 entrypoint := []string{"sh", "-c"} 2427 task, cfg, ports := dockerTask(t) 2428 defer freeport.Return(ports) 2429 cfg.Entrypoint = entrypoint 2430 cfg.Command = strings.Join(busyboxLongRunningCmd, " ") 2431 cfg.Args = []string{} 2432 2433 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2434 2435 client, driver, handle, cleanup := dockerSetup(t, task, nil) 2436 defer cleanup() 2437 2438 require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) 2439 2440 container, err := client.InspectContainer(handle.containerID) 2441 require.NoError(t, err) 2442 2443 require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint") 2444 require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ") 2445 } 2446 2447 func TestDockerDriver_ReadonlyRootfs(t *testing.T) { 2448 if !tu.IsCI() { 2449 t.Parallel() 2450 } 2451 testutil.DockerCompatible(t) 2452 2453 if runtime.GOOS == "windows" { 2454 t.Skip("Windows Docker does not support root filesystem in read-only mode") 2455 } 2456 2457 task, cfg, ports := dockerTask(t) 2458 defer freeport.Return(ports) 2459 cfg.ReadonlyRootfs = true 2460 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2461 2462 client, driver, handle, cleanup := dockerSetup(t, task, nil) 2463 defer cleanup() 2464 require.NoError(t, driver.WaitUntilStarted(task.ID, 5*time.Second)) 2465 2466 container, err := client.InspectContainer(handle.containerID) 2467 require.NoError(t, err) 2468 2469 require.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set") 2470 } 2471 2472 // fakeDockerClient can be used in places that accept an interface for the 2473 // docker client such as createContainer. 2474 type fakeDockerClient struct{} 2475 2476 func (fakeDockerClient) CreateContainer(docker.CreateContainerOptions) (*docker.Container, error) { 2477 return nil, fmt.Errorf("volume is attached on another node") 2478 } 2479 func (fakeDockerClient) InspectContainer(id string) (*docker.Container, error) { 2480 panic("not implemented") 2481 } 2482 func (fakeDockerClient) ListContainers(docker.ListContainersOptions) ([]docker.APIContainers, error) { 2483 panic("not implemented") 2484 } 2485 func (fakeDockerClient) RemoveContainer(opts docker.RemoveContainerOptions) error { 2486 panic("not implemented") 2487 } 2488 2489 // TestDockerDriver_VolumeError asserts volume related errors when creating a 2490 // container are recoverable. 2491 func TestDockerDriver_VolumeError(t *testing.T) { 2492 if !tu.IsCI() { 2493 t.Parallel() 2494 } 2495 2496 // setup 2497 _, cfg, ports := dockerTask(t) 2498 defer freeport.Return(ports) 2499 driver := dockerDriverHarness(t, nil) 2500 2501 // assert volume error is recoverable 2502 _, err := driver.Impl().(*Driver).createContainer(fakeDockerClient{}, docker.CreateContainerOptions{Config: &docker.Config{}}, cfg.Image) 2503 require.True(t, structs.IsRecoverable(err)) 2504 } 2505 2506 func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { 2507 if !tu.IsCI() { 2508 t.Parallel() 2509 } 2510 testutil.DockerCompatible(t) 2511 2512 expectedPrefix := "2001:db8:1::242:ac11" 2513 expectedAdvertise := true 2514 task, cfg, ports := dockerTask(t) 2515 defer freeport.Return(ports) 2516 cfg.AdvertiseIPv6Addr = expectedAdvertise 2517 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2518 2519 client := newTestDockerClient(t) 2520 2521 // Make sure IPv6 is enabled 2522 net, err := client.NetworkInfo("bridge") 2523 if err != nil { 2524 t.Skip("error retrieving bridge network information, skipping") 2525 } 2526 if net == nil || !net.EnableIPv6 { 2527 t.Skip("IPv6 not enabled on bridge network, skipping") 2528 } 2529 2530 driver := dockerDriverHarness(t, nil) 2531 cleanup := driver.MkAllocDir(task, true) 2532 copyImage(t, task.TaskDir(), "busybox.tar") 2533 defer cleanup() 2534 2535 _, network, err := driver.StartTask(task) 2536 defer driver.DestroyTask(task.ID, true) 2537 require.NoError(t, err) 2538 2539 require.Equal(t, expectedAdvertise, network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, network.AutoAdvertise) 2540 2541 if !strings.HasPrefix(network.IP, expectedPrefix) { 2542 t.Fatalf("Got IP address %q want ip address with prefix %q", network.IP, expectedPrefix) 2543 } 2544 2545 handle, ok := driver.Impl().(*Driver).tasks.Get(task.ID) 2546 require.True(t, ok) 2547 2548 require.NoError(t, driver.WaitUntilStarted(task.ID, time.Second)) 2549 2550 container, err := client.InspectContainer(handle.containerID) 2551 require.NoError(t, err) 2552 2553 if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) { 2554 t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address) 2555 } 2556 } 2557 2558 func TestParseDockerImage(t *testing.T) { 2559 tests := []struct { 2560 Image string 2561 Repo string 2562 Tag string 2563 }{ 2564 {"library/hello-world:1.0", "library/hello-world", "1.0"}, 2565 {"library/hello-world", "library/hello-world", "latest"}, 2566 {"library/hello-world:latest", "library/hello-world", "latest"}, 2567 {"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""}, 2568 } 2569 for _, test := range tests { 2570 t.Run(test.Image, func(t *testing.T) { 2571 repo, tag := parseDockerImage(test.Image) 2572 require.Equal(t, test.Repo, repo) 2573 require.Equal(t, test.Tag, tag) 2574 }) 2575 } 2576 } 2577 2578 func TestDockerImageRef(t *testing.T) { 2579 tests := []struct { 2580 Image string 2581 Repo string 2582 Tag string 2583 }{ 2584 {"library/hello-world:1.0", "library/hello-world", "1.0"}, 2585 {"library/hello-world:latest", "library/hello-world", "latest"}, 2586 {"library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", "library/hello-world@sha256:f5233545e43561214ca4891fd1157e1c3c563316ed8e237750d59bde73361e77", ""}, 2587 } 2588 for _, test := range tests { 2589 t.Run(test.Image, func(t *testing.T) { 2590 image := dockerImageRef(test.Repo, test.Tag) 2591 require.Equal(t, test.Image, image) 2592 }) 2593 } 2594 } 2595 2596 func waitForExist(t *testing.T, client *docker.Client, containerID string) { 2597 tu.WaitForResult(func() (bool, error) { 2598 container, err := client.InspectContainer(containerID) 2599 if err != nil { 2600 if _, ok := err.(*docker.NoSuchContainer); !ok { 2601 return false, err 2602 } 2603 } 2604 2605 return container != nil, nil 2606 }, func(err error) { 2607 require.NoError(t, err) 2608 }) 2609 } 2610 2611 // TestDockerDriver_CreationIdempotent asserts that createContainer and 2612 // and startContainers functions are idempotent, as we have some retry 2613 // logic there without ensureing we delete/destroy containers 2614 func TestDockerDriver_CreationIdempotent(t *testing.T) { 2615 if !tu.IsCI() { 2616 t.Parallel() 2617 } 2618 testutil.DockerCompatible(t) 2619 2620 task, cfg, ports := dockerTask(t) 2621 defer freeport.Return(ports) 2622 require.NoError(t, task.EncodeConcreteDriverConfig(cfg)) 2623 2624 client := newTestDockerClient(t) 2625 driver := dockerDriverHarness(t, nil) 2626 cleanup := driver.MkAllocDir(task, true) 2627 defer cleanup() 2628 2629 copyImage(t, task.TaskDir(), "busybox.tar") 2630 2631 d, ok := driver.Impl().(*Driver) 2632 require.True(t, ok) 2633 2634 _, err := d.createImage(task, cfg, client) 2635 require.NoError(t, err) 2636 2637 containerCfg, err := d.createContainerConfig(task, cfg, cfg.Image) 2638 require.NoError(t, err) 2639 2640 c, err := d.createContainer(client, containerCfg, cfg.Image) 2641 require.NoError(t, err) 2642 defer client.RemoveContainer(docker.RemoveContainerOptions{ 2643 ID: c.ID, 2644 Force: true, 2645 }) 2646 2647 // calling createContainer again creates a new one and remove old one 2648 c2, err := d.createContainer(client, containerCfg, cfg.Image) 2649 require.NoError(t, err) 2650 defer client.RemoveContainer(docker.RemoveContainerOptions{ 2651 ID: c2.ID, 2652 Force: true, 2653 }) 2654 2655 require.NotEqual(t, c.ID, c2.ID) 2656 // old container was destroyed 2657 { 2658 _, err := client.InspectContainer(c.ID) 2659 require.Error(t, err) 2660 require.Contains(t, err.Error(), NoSuchContainerError) 2661 } 2662 2663 // now start container twice 2664 require.NoError(t, d.startContainer(c2)) 2665 require.NoError(t, d.startContainer(c2)) 2666 2667 tu.WaitForResult(func() (bool, error) { 2668 c, err := client.InspectContainer(c2.ID) 2669 if err != nil { 2670 return false, fmt.Errorf("failed to get container status: %v", err) 2671 } 2672 2673 if !c.State.Running { 2674 return false, fmt.Errorf("container is not running but %v", c.State) 2675 } 2676 2677 return true, nil 2678 }, func(err error) { 2679 require.NoError(t, err) 2680 }) 2681 } 2682 2683 // TestDockerDriver_CreateContainerConfig_CPUHardLimit asserts that a default 2684 // CPU quota and period are set when cpu_hard_limit = true. 2685 func TestDockerDriver_CreateContainerConfig_CPUHardLimit(t *testing.T) { 2686 t.Parallel() 2687 2688 task, _, ports := dockerTask(t) 2689 defer freeport.Return(ports) 2690 2691 dh := dockerDriverHarness(t, nil) 2692 driver := dh.Impl().(*Driver) 2693 schema, _ := driver.TaskConfigSchema() 2694 spec, _ := hclspecutils.Convert(schema) 2695 2696 val, _, _ := hclutils.ParseHclInterface(map[string]interface{}{ 2697 "image": "foo/bar", 2698 "cpu_hard_limit": true, 2699 }, spec, nil) 2700 2701 require.NoError(t, task.EncodeDriverConfig(val)) 2702 cfg := &TaskConfig{} 2703 require.NoError(t, task.DecodeDriverConfig(cfg)) 2704 c, err := driver.createContainerConfig(task, cfg, "org/repo:0.1") 2705 require.NoError(t, err) 2706 2707 require.NotZero(t, c.HostConfig.CPUQuota) 2708 require.NotZero(t, c.HostConfig.CPUPeriod) 2709 } 2710 2711 func TestDockerDriver_memoryLimits(t *testing.T) { 2712 t.Parallel() 2713 2714 t.Run("driver hard limit not set", func(t *testing.T) { 2715 memory, memoryReservation := new(Driver).memoryLimits(0, 256*1024*1024) 2716 require.Equal(t, int64(256*1024*1024), memory) 2717 require.Equal(t, int64(0), memoryReservation) 2718 }) 2719 2720 t.Run("driver hard limit is set", func(t *testing.T) { 2721 memory, memoryReservation := new(Driver).memoryLimits(512, 256*1024*1024) 2722 require.Equal(t, int64(512*1024*1024), memory) 2723 require.Equal(t, int64(256*1024*1024), memoryReservation) 2724 }) 2725 }