github.com/dkerwin/nomad@v0.3.3-0.20160525181927-74554135514b/client/driver/docker_test.go (about) 1 package driver 2 3 import ( 4 "fmt" 5 "io/ioutil" 6 "math/rand" 7 "os" 8 "os/exec" 9 "path/filepath" 10 "reflect" 11 "runtime/debug" 12 "strings" 13 "testing" 14 "time" 15 16 docker "github.com/fsouza/go-dockerclient" 17 "github.com/hashicorp/go-plugin" 18 "github.com/hashicorp/nomad/client/allocdir" 19 "github.com/hashicorp/nomad/client/config" 20 "github.com/hashicorp/nomad/client/driver/env" 21 cstructs "github.com/hashicorp/nomad/client/driver/structs" 22 "github.com/hashicorp/nomad/client/testutil" 23 "github.com/hashicorp/nomad/helper/discover" 24 "github.com/hashicorp/nomad/nomad/structs" 25 tu "github.com/hashicorp/nomad/testutil" 26 ) 27 28 func dockerIsRemote(t *testing.T) bool { 29 client, err := docker.NewClientFromEnv() 30 if err != nil { 31 return false 32 } 33 34 // Technically this could be a local tcp socket but for testing purposes 35 // we'll just assume that tcp is only used for remote connections. 36 if client.Endpoint()[0:3] == "tcp" { 37 return true 38 } 39 return false 40 } 41 42 // Ports used by tests 43 var ( 44 docker_reserved = 32768 + int(rand.Int31n(25000)) 45 docker_dynamic = 32768 + int(rand.Int31n(25000)) 46 ) 47 48 // Returns a task with a reserved and dynamic port. The ports are returned 49 // respectively. 50 func dockerTask() (*structs.Task, int, int) { 51 docker_reserved += 1 52 docker_dynamic += 1 53 return &structs.Task{ 54 Name: "redis-demo", 55 Config: map[string]interface{}{ 56 "image": "redis", 57 }, 58 LogConfig: &structs.LogConfig{ 59 MaxFiles: 10, 60 MaxFileSizeMB: 10, 61 }, 62 Resources: &structs.Resources{ 63 MemoryMB: 256, 64 CPU: 512, 65 Networks: []*structs.NetworkResource{ 66 &structs.NetworkResource{ 67 IP: "127.0.0.1", 68 ReservedPorts: []structs.Port{{"main", docker_reserved}}, 69 DynamicPorts: []structs.Port{{"REDIS", docker_dynamic}}, 70 }, 71 }, 72 }, 73 }, docker_reserved, docker_dynamic 74 } 75 76 // dockerSetup does all of the basic setup you need to get a running docker 77 // process up and running for testing. Use like: 78 // 79 // task := taskTemplate() 80 // // do custom task configuration 81 // client, handle, cleanup := dockerSetup(t, task) 82 // defer cleanup() 83 // // do test stuff 84 // 85 // If there is a problem during setup this function will abort or skip the test 86 // and indicate the reason. 87 func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle, func()) { 88 if !testutil.DockerIsConnected(t) { 89 t.SkipNow() 90 } 91 92 client, err := docker.NewClientFromEnv() 93 if err != nil { 94 t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack()) 95 } 96 97 driverCtx, execCtx := testDriverContexts(task) 98 driver := NewDockerDriver(driverCtx) 99 100 handle, err := driver.Start(execCtx, task) 101 if err != nil { 102 execCtx.AllocDir.Destroy() 103 t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack()) 104 } 105 if handle == nil { 106 execCtx.AllocDir.Destroy() 107 t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) 108 } 109 110 cleanup := func() { 111 handle.Kill() 112 execCtx.AllocDir.Destroy() 113 } 114 115 return client, handle, cleanup 116 } 117 118 func TestDockerDriver_Handle(t *testing.T) { 119 t.Parallel() 120 121 bin, err := discover.NomadExecutable() 122 if err != nil { 123 t.Fatalf("got an err: %v", err) 124 } 125 126 f, _ := ioutil.TempFile(os.TempDir(), "") 127 defer f.Close() 128 defer os.Remove(f.Name()) 129 pluginConfig := &plugin.ClientConfig{ 130 Cmd: exec.Command(bin, "syslog", f.Name()), 131 } 132 exec, pluginClient, err := createExecutor(pluginConfig, os.Stdout, &config.Config{}) 133 if err != nil { 134 t.Fatalf("got an err: %v", err) 135 } 136 defer pluginClient.Kill() 137 138 h := &DockerHandle{ 139 version: "version", 140 imageID: "imageid", 141 executor: exec, 142 pluginClient: pluginClient, 143 containerID: "containerid", 144 killTimeout: 5 * time.Nanosecond, 145 maxKillTimeout: 15 * time.Nanosecond, 146 doneCh: make(chan struct{}), 147 waitCh: make(chan *cstructs.WaitResult, 1), 148 } 149 150 actual := h.ID() 151 expected := fmt.Sprintf("DOCKER:{\"Version\":\"version\",\"ImageID\":\"imageid\",\"ContainerID\":\"containerid\",\"KillTimeout\":5,\"MaxKillTimeout\":15,\"PluginConfig\":{\"Pid\":%d,\"AddrNet\":\"unix\",\"AddrName\":\"%s\"}}", 152 pluginClient.ReattachConfig().Pid, pluginClient.ReattachConfig().Addr.String()) 153 if actual != expected { 154 t.Errorf("Expected `%s`, found `%s`", expected, actual) 155 } 156 } 157 158 // This test should always pass, even if docker daemon is not available 159 func TestDockerDriver_Fingerprint(t *testing.T) { 160 t.Parallel() 161 driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"}) 162 d := NewDockerDriver(driverCtx) 163 node := &structs.Node{ 164 Attributes: make(map[string]string), 165 } 166 apply, err := d.Fingerprint(&config.Config{}, node) 167 if err != nil { 168 t.Fatalf("err: %v", err) 169 } 170 if apply != testutil.DockerIsConnected(t) { 171 t.Fatalf("Fingerprinter should detect when docker is available") 172 } 173 if node.Attributes["driver.docker"] != "1" { 174 t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.") 175 } 176 t.Logf("Found docker version %s", node.Attributes["driver.docker.version"]) 177 } 178 179 func TestDockerDriver_StartOpen_Wait(t *testing.T) { 180 t.Parallel() 181 if !testutil.DockerIsConnected(t) { 182 t.SkipNow() 183 } 184 185 task := &structs.Task{ 186 Name: "redis-demo", 187 Config: map[string]interface{}{ 188 "image": "redis", 189 }, 190 LogConfig: &structs.LogConfig{ 191 MaxFiles: 10, 192 MaxFileSizeMB: 10, 193 }, 194 Resources: basicResources, 195 } 196 197 driverCtx, execCtx := testDriverContexts(task) 198 defer execCtx.AllocDir.Destroy() 199 d := NewDockerDriver(driverCtx) 200 201 handle, err := d.Start(execCtx, task) 202 if err != nil { 203 t.Fatalf("err: %v", err) 204 } 205 if handle == nil { 206 t.Fatalf("missing handle") 207 } 208 defer handle.Kill() 209 210 // Attempt to open 211 handle2, err := d.Open(execCtx, handle.ID()) 212 if err != nil { 213 t.Fatalf("err: %v", err) 214 } 215 if handle2 == nil { 216 t.Fatalf("missing handle") 217 } 218 } 219 220 func TestDockerDriver_Start_Wait(t *testing.T) { 221 t.Parallel() 222 task := &structs.Task{ 223 Name: "redis-demo", 224 Config: map[string]interface{}{ 225 "image": "redis", 226 "command": "/usr/local/bin/redis-server", 227 "args": []string{"-v"}, 228 }, 229 Resources: &structs.Resources{ 230 MemoryMB: 256, 231 CPU: 512, 232 }, 233 LogConfig: &structs.LogConfig{ 234 MaxFiles: 10, 235 MaxFileSizeMB: 10, 236 }, 237 } 238 239 _, handle, cleanup := dockerSetup(t, task) 240 defer cleanup() 241 242 // Update should be a no-op 243 err := handle.Update(task) 244 if err != nil { 245 t.Fatalf("err: %v", err) 246 } 247 248 select { 249 case res := <-handle.WaitCh(): 250 if !res.Successful() { 251 t.Fatalf("err: %v", res) 252 } 253 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 254 t.Fatalf("timeout") 255 } 256 } 257 258 func TestDockerDriver_Start_LoadImage(t *testing.T) { 259 task := &structs.Task{ 260 Name: "busybox-demo", 261 Config: map[string]interface{}{ 262 "image": "busybox", 263 "load": []string{"busybox.tar"}, 264 "command": "/bin/echo", 265 "args": []string{ 266 "hello", 267 }, 268 }, 269 LogConfig: &structs.LogConfig{ 270 MaxFiles: 10, 271 MaxFileSizeMB: 10, 272 }, 273 Resources: &structs.Resources{ 274 MemoryMB: 256, 275 CPU: 512, 276 }, 277 } 278 279 driverCtx, execCtx := testDriverContexts(task) 280 defer execCtx.AllocDir.Destroy() 281 d := NewDockerDriver(driverCtx) 282 283 // Copy the test jar into the task's directory 284 taskDir, _ := execCtx.AllocDir.TaskDirs[task.Name] 285 dst := filepath.Join(taskDir, allocdir.TaskLocal, "busybox.tar") 286 copyFile("./test-resources/docker/busybox.tar", dst, t) 287 288 handle, err := d.Start(execCtx, task) 289 if err != nil { 290 t.Fatalf("err: %v", err) 291 } 292 if handle == nil { 293 t.Fatalf("missing handle") 294 } 295 defer handle.Kill() 296 297 select { 298 case res := <-handle.WaitCh(): 299 if !res.Successful() { 300 t.Fatalf("err: %v", res) 301 } 302 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 303 t.Fatalf("timeout") 304 } 305 306 // Check that data was written to the shared alloc directory. 307 outputFile := filepath.Join(execCtx.AllocDir.LogDir(), "busybox-demo.stdout.0") 308 act, err := ioutil.ReadFile(outputFile) 309 if err != nil { 310 t.Fatalf("Couldn't read expected output: %v", err) 311 } 312 313 exp := "hello" 314 if strings.TrimSpace(string(act)) != exp { 315 t.Fatalf("Command outputted %v; want %v", act, exp) 316 } 317 318 } 319 320 func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { 321 t.Parallel() 322 // This test requires that the alloc dir be mounted into docker as a volume. 323 // Because this cannot happen when docker is run remotely, e.g. when running 324 // docker in a VM, we skip this when we detect Docker is being run remotely. 325 if !testutil.DockerIsConnected(t) || dockerIsRemote(t) { 326 t.SkipNow() 327 } 328 329 exp := []byte{'w', 'i', 'n'} 330 file := "output.txt" 331 task := &structs.Task{ 332 Name: "redis-demo", 333 Config: map[string]interface{}{ 334 "image": "redis", 335 "command": "/bin/bash", 336 "args": []string{ 337 "-c", 338 fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`, 339 string(exp), env.AllocDir, file), 340 }, 341 }, 342 LogConfig: &structs.LogConfig{ 343 MaxFiles: 10, 344 MaxFileSizeMB: 10, 345 }, 346 Resources: &structs.Resources{ 347 MemoryMB: 256, 348 CPU: 512, 349 }, 350 } 351 352 driverCtx, execCtx := testDriverContexts(task) 353 defer execCtx.AllocDir.Destroy() 354 d := NewDockerDriver(driverCtx) 355 356 handle, err := d.Start(execCtx, task) 357 if err != nil { 358 t.Fatalf("err: %v", err) 359 } 360 if handle == nil { 361 t.Fatalf("missing handle") 362 } 363 defer handle.Kill() 364 365 select { 366 case res := <-handle.WaitCh(): 367 if !res.Successful() { 368 t.Fatalf("err: %v", res) 369 } 370 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 371 t.Fatalf("timeout") 372 } 373 374 // Check that data was written to the shared alloc directory. 375 outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file) 376 act, err := ioutil.ReadFile(outputFile) 377 if err != nil { 378 t.Fatalf("Couldn't read expected output: %v", err) 379 } 380 381 if !reflect.DeepEqual(act, exp) { 382 t.Fatalf("Command outputted %v; want %v", act, exp) 383 } 384 } 385 386 func TestDockerDriver_Start_Kill_Wait(t *testing.T) { 387 t.Parallel() 388 task := &structs.Task{ 389 Name: "redis-demo", 390 Config: map[string]interface{}{ 391 "image": "redis", 392 "command": "/bin/sleep", 393 "args": []string{"10"}, 394 }, 395 LogConfig: &structs.LogConfig{ 396 MaxFiles: 10, 397 MaxFileSizeMB: 10, 398 }, 399 Resources: basicResources, 400 } 401 402 _, handle, cleanup := dockerSetup(t, task) 403 defer cleanup() 404 405 go func() { 406 time.Sleep(100 * time.Millisecond) 407 err := handle.Kill() 408 if err != nil { 409 t.Fatalf("err: %v", err) 410 } 411 }() 412 413 select { 414 case res := <-handle.WaitCh(): 415 if res.Successful() { 416 t.Fatalf("should err: %v", res) 417 } 418 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 419 t.Fatalf("timeout") 420 } 421 } 422 423 func TestDocker_StartN(t *testing.T) { 424 t.Parallel() 425 if !testutil.DockerIsConnected(t) { 426 t.SkipNow() 427 } 428 429 task1, _, _ := dockerTask() 430 task2, _, _ := dockerTask() 431 task3, _, _ := dockerTask() 432 taskList := []*structs.Task{task1, task2, task3} 433 434 handles := make([]DriverHandle, len(taskList)) 435 436 t.Logf("==> Starting %d tasks", len(taskList)) 437 438 // Let's spin up a bunch of things 439 var err error 440 for idx, task := range taskList { 441 driverCtx, execCtx := testDriverContexts(task) 442 defer execCtx.AllocDir.Destroy() 443 d := NewDockerDriver(driverCtx) 444 445 handles[idx], err = d.Start(execCtx, task) 446 if err != nil { 447 t.Errorf("Failed starting task #%d: %s", idx+1, err) 448 } 449 } 450 451 t.Log("==> All tasks are started. Terminating...") 452 453 for idx, handle := range handles { 454 if handle == nil { 455 t.Errorf("Bad handle for task #%d", idx+1) 456 continue 457 } 458 459 err := handle.Kill() 460 if err != nil { 461 t.Errorf("Failed stopping task #%d: %s", idx+1, err) 462 } 463 } 464 465 t.Log("==> Test complete!") 466 } 467 468 func TestDocker_StartNVersions(t *testing.T) { 469 t.Parallel() 470 if !testutil.DockerIsConnected(t) { 471 t.SkipNow() 472 } 473 474 task1, _, _ := dockerTask() 475 task1.Config["image"] = "redis" 476 477 task2, _, _ := dockerTask() 478 task2.Config["image"] = "redis:latest" 479 480 task3, _, _ := dockerTask() 481 task3.Config["image"] = "redis:3.0" 482 483 taskList := []*structs.Task{task1, task2, task3} 484 485 handles := make([]DriverHandle, len(taskList)) 486 487 t.Logf("==> Starting %d tasks", len(taskList)) 488 489 // Let's spin up a bunch of things 490 var err error 491 for idx, task := range taskList { 492 driverCtx, execCtx := testDriverContexts(task) 493 defer execCtx.AllocDir.Destroy() 494 d := NewDockerDriver(driverCtx) 495 496 handles[idx], err = d.Start(execCtx, task) 497 if err != nil { 498 t.Errorf("Failed starting task #%d: %s", idx+1, err) 499 } 500 } 501 502 t.Log("==> All tasks are started. Terminating...") 503 504 for idx, handle := range handles { 505 if handle == nil { 506 t.Errorf("Bad handle for task #%d", idx+1) 507 continue 508 } 509 510 err := handle.Kill() 511 if err != nil { 512 t.Errorf("Failed stopping task #%d: %s", idx+1, err) 513 } 514 } 515 516 t.Log("==> Test complete!") 517 } 518 519 func TestDockerHostNet(t *testing.T) { 520 t.Parallel() 521 expected := "host" 522 523 task := &structs.Task{ 524 Name: "redis-demo", 525 Config: map[string]interface{}{ 526 "image": "redis", 527 "network_mode": expected, 528 }, 529 Resources: &structs.Resources{ 530 MemoryMB: 256, 531 CPU: 512, 532 }, 533 LogConfig: &structs.LogConfig{ 534 MaxFiles: 10, 535 MaxFileSizeMB: 10, 536 }, 537 } 538 539 client, handle, cleanup := dockerSetup(t, task) 540 defer cleanup() 541 542 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 543 if err != nil { 544 t.Fatalf("err: %v", err) 545 } 546 547 actual := container.HostConfig.NetworkMode 548 if actual != expected { 549 t.Errorf("DNS Network mode doesn't match.\nExpected:\n%s\nGot:\n%s\n", expected, actual) 550 } 551 } 552 553 func TestDockerLabels(t *testing.T) { 554 t.Parallel() 555 task, _, _ := dockerTask() 556 task.Config["labels"] = []map[string]string{ 557 map[string]string{ 558 "label1": "value1", 559 "label2": "value2", 560 }, 561 } 562 563 client, handle, cleanup := dockerSetup(t, task) 564 defer cleanup() 565 566 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 567 if err != nil { 568 t.Fatalf("err: %v", err) 569 } 570 571 if want, got := 2, len(container.Config.Labels); want != got { 572 t.Errorf("Wrong labels count for docker job. Expect: %d, got: %d", want, got) 573 } 574 575 if want, got := "value1", container.Config.Labels["label1"]; want != got { 576 t.Errorf("Wrong label value docker job. Expect: %s, got: %s", want, got) 577 } 578 } 579 580 func TestDockerDNS(t *testing.T) { 581 t.Parallel() 582 task, _, _ := dockerTask() 583 task.Config["dns_servers"] = []string{"8.8.8.8", "8.8.4.4"} 584 task.Config["dns_search_domains"] = []string{"example.com", "example.org", "example.net"} 585 586 client, handle, cleanup := dockerSetup(t, task) 587 defer cleanup() 588 589 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 590 if err != nil { 591 t.Fatalf("err: %v", err) 592 } 593 594 if !reflect.DeepEqual(task.Config["dns_servers"], container.HostConfig.DNS) { 595 t.Errorf("DNS Servers don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_servers"], container.HostConfig.DNS) 596 } 597 598 if !reflect.DeepEqual(task.Config["dns_search_domains"], container.HostConfig.DNSSearch) { 599 t.Errorf("DNS Servers don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_search_domains"], container.HostConfig.DNSSearch) 600 } 601 } 602 603 func inSlice(needle string, haystack []string) bool { 604 for _, h := range haystack { 605 if h == needle { 606 return true 607 } 608 } 609 return false 610 } 611 612 func TestDockerPortsNoMap(t *testing.T) { 613 t.Parallel() 614 task, res, dyn := dockerTask() 615 616 client, handle, cleanup := dockerSetup(t, task) 617 defer cleanup() 618 619 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 620 if err != nil { 621 t.Fatalf("err: %v", err) 622 } 623 624 // Verify that the correct ports are EXPOSED 625 expectedExposedPorts := map[docker.Port]struct{}{ 626 docker.Port(fmt.Sprintf("%d/tcp", res)): struct{}{}, 627 docker.Port(fmt.Sprintf("%d/udp", res)): struct{}{}, 628 docker.Port(fmt.Sprintf("%d/tcp", dyn)): struct{}{}, 629 docker.Port(fmt.Sprintf("%d/udp", dyn)): struct{}{}, 630 // This one comes from the redis container 631 docker.Port("6379/tcp"): struct{}{}, 632 } 633 634 if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { 635 t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts) 636 } 637 638 // Verify that the correct ports are FORWARDED 639 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 640 docker.Port(fmt.Sprintf("%d/tcp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 641 docker.Port(fmt.Sprintf("%d/udp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 642 docker.Port(fmt.Sprintf("%d/tcp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 643 docker.Port(fmt.Sprintf("%d/udp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 644 } 645 646 if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { 647 t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings) 648 } 649 650 expectedEnvironment := map[string]string{ 651 "NOMAD_ADDR_main": fmt.Sprintf("127.0.0.1:%d", res), 652 "NOMAD_ADDR_REDIS": fmt.Sprintf("127.0.0.1:%d", dyn), 653 } 654 655 for key, val := range expectedEnvironment { 656 search := fmt.Sprintf("%s=%s", key, val) 657 if !inSlice(search, container.Config.Env) { 658 t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env) 659 } 660 } 661 } 662 663 func TestDockerPortsMapping(t *testing.T) { 664 t.Parallel() 665 task, res, dyn := dockerTask() 666 task.Config["port_map"] = []map[string]string{ 667 map[string]string{ 668 "main": "8080", 669 "REDIS": "6379", 670 }, 671 } 672 673 client, handle, cleanup := dockerSetup(t, task) 674 defer cleanup() 675 676 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 677 if err != nil { 678 t.Fatalf("err: %v", err) 679 } 680 681 // Verify that the correct ports are EXPOSED 682 expectedExposedPorts := map[docker.Port]struct{}{ 683 docker.Port("8080/tcp"): struct{}{}, 684 docker.Port("8080/udp"): struct{}{}, 685 docker.Port("6379/tcp"): struct{}{}, 686 docker.Port("6379/udp"): struct{}{}, 687 } 688 689 if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { 690 t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts) 691 } 692 693 // Verify that the correct ports are FORWARDED 694 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 695 docker.Port("8080/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 696 docker.Port("8080/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 697 docker.Port("6379/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 698 docker.Port("6379/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 699 } 700 701 if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { 702 t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings) 703 } 704 705 expectedEnvironment := map[string]string{ 706 "NOMAD_ADDR_main": "127.0.0.1:8080", 707 "NOMAD_ADDR_REDIS": "127.0.0.1:6379", 708 "NOMAD_HOST_PORT_main": "8080", 709 } 710 711 for key, val := range expectedEnvironment { 712 search := fmt.Sprintf("%s=%s", key, val) 713 if !inSlice(search, container.Config.Env) { 714 t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env) 715 } 716 } 717 } 718 719 func TestDockerUser(t *testing.T) { 720 t.Parallel() 721 722 task := &structs.Task{ 723 Name: "redis-demo", 724 User: "alice", 725 Config: map[string]interface{}{ 726 "image": "redis", 727 "command": "sleep", 728 "args": []string{"10000"}, 729 }, 730 Resources: &structs.Resources{ 731 MemoryMB: 256, 732 CPU: 512, 733 }, 734 LogConfig: &structs.LogConfig{ 735 MaxFiles: 10, 736 MaxFileSizeMB: 10, 737 }, 738 } 739 740 if !testutil.DockerIsConnected(t) { 741 t.SkipNow() 742 } 743 744 driverCtx, execCtx := testDriverContexts(task) 745 driver := NewDockerDriver(driverCtx) 746 defer execCtx.AllocDir.Destroy() 747 748 // It should fail because the user "alice" does not exist on the given 749 // image. 750 handle, err := driver.Start(execCtx, task) 751 if err == nil { 752 handle.Kill() 753 t.Fatalf("Should've failed") 754 } 755 756 msgs := []string{ 757 "System error: Unable to find user alice", 758 "linux spec user: Unable to find user alice", 759 } 760 var found bool 761 for _, msg := range msgs { 762 if strings.Contains(err.Error(), msg) { 763 found = true 764 break 765 } 766 } 767 if !found { 768 t.Fatalf("Expected failure string not found, found %q instead", err.Error()) 769 } 770 } 771 772 func TestDockerDriver_CleanupContainer(t *testing.T) { 773 t.Parallel() 774 task := &structs.Task{ 775 Name: "redis-demo", 776 Config: map[string]interface{}{ 777 "image": "busybox", 778 "command": "/bin/echo", 779 "args": []string{"hello"}, 780 }, 781 Resources: &structs.Resources{ 782 MemoryMB: 256, 783 CPU: 512, 784 }, 785 LogConfig: &structs.LogConfig{ 786 MaxFiles: 10, 787 MaxFileSizeMB: 10, 788 }, 789 } 790 791 _, handle, cleanup := dockerSetup(t, task) 792 defer cleanup() 793 794 // Update should be a no-op 795 err := handle.Update(task) 796 if err != nil { 797 t.Fatalf("err: %v", err) 798 } 799 800 select { 801 case res := <-handle.WaitCh(): 802 if !res.Successful() { 803 t.Fatalf("err: %v", res) 804 } 805 806 time.Sleep(3 * time.Second) 807 808 // Ensure that the container isn't present 809 _, err := client.InspectContainer(handle.(*DockerHandle).containerID) 810 if err == nil { 811 t.Fatalf("expected to not get container") 812 } 813 814 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 815 t.Fatalf("timeout") 816 } 817 818 }