github.com/hspak/nomad@v0.7.2-0.20180309000617-bc4ae22a39a5/client/driver/docker_test.go (about) 1 package driver 2 3 import ( 4 "fmt" 5 "io/ioutil" 6 "math/rand" 7 "os" 8 "path/filepath" 9 "reflect" 10 "runtime" 11 "runtime/debug" 12 "sort" 13 "strconv" 14 "strings" 15 "testing" 16 "time" 17 18 docker "github.com/fsouza/go-dockerclient" 19 "github.com/hashicorp/consul/lib/freeport" 20 sockaddr "github.com/hashicorp/go-sockaddr" 21 "github.com/hashicorp/nomad/client/allocdir" 22 "github.com/hashicorp/nomad/client/config" 23 "github.com/hashicorp/nomad/client/driver/env" 24 cstructs "github.com/hashicorp/nomad/client/structs" 25 "github.com/hashicorp/nomad/client/testutil" 26 "github.com/hashicorp/nomad/helper/uuid" 27 "github.com/hashicorp/nomad/nomad/mock" 28 "github.com/hashicorp/nomad/nomad/structs" 29 tu "github.com/hashicorp/nomad/testutil" 30 "github.com/stretchr/testify/assert" 31 "github.com/stretchr/testify/require" 32 ) 33 34 func dockerIsRemote(t *testing.T) bool { 35 client, err := docker.NewClientFromEnv() 36 if err != nil { 37 return false 38 } 39 40 // Technically this could be a local tcp socket but for testing purposes 41 // we'll just assume that tcp is only used for remote connections. 42 if client.Endpoint()[0:3] == "tcp" { 43 return true 44 } 45 return false 46 } 47 48 // Returns a task with a reserved and dynamic port. The ports are returned 49 // respectively. 50 func dockerTask(t *testing.T) (*structs.Task, int, int) { 51 ports := freeport.GetT(t, 2) 52 dockerReserved := ports[0] 53 dockerDynamic := ports[1] 54 return &structs.Task{ 55 Name: "redis-demo", 56 Driver: "docker", 57 Config: map[string]interface{}{ 58 "image": "busybox", 59 "load": "busybox.tar", 60 "command": "/bin/nc", 61 "args": []string{"-l", "127.0.0.1", "-p", "0"}, 62 }, 63 LogConfig: &structs.LogConfig{ 64 MaxFiles: 10, 65 MaxFileSizeMB: 10, 66 }, 67 Resources: &structs.Resources{ 68 MemoryMB: 256, 69 CPU: 512, 70 Networks: []*structs.NetworkResource{ 71 { 72 IP: "127.0.0.1", 73 ReservedPorts: []structs.Port{{Label: "main", Value: dockerReserved}}, 74 DynamicPorts: []structs.Port{{Label: "REDIS", Value: dockerDynamic}}, 75 }, 76 }, 77 }, 78 }, dockerReserved, dockerDynamic 79 } 80 81 // dockerSetup does all of the basic setup you need to get a running docker 82 // process up and running for testing. Use like: 83 // 84 // task := taskTemplate() 85 // // do custom task configuration 86 // client, handle, cleanup := dockerSetup(t, task) 87 // defer cleanup() 88 // // do test stuff 89 // 90 // If there is a problem during setup this function will abort or skip the test 91 // and indicate the reason. 92 func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, *DockerHandle, func()) { 93 client := newTestDockerClient(t) 94 return dockerSetupWithClient(t, task, client) 95 } 96 97 func testDockerDriverContexts(t *testing.T, task *structs.Task) *testContext { 98 tctx := testDriverContexts(t, task) 99 100 // Drop the delay 101 tctx.DriverCtx.config.Options = make(map[string]string) 102 tctx.DriverCtx.config.Options[dockerImageRemoveDelayConfigOption] = "1s" 103 104 return tctx 105 } 106 107 func dockerSetupWithClient(t *testing.T, task *structs.Task, client *docker.Client) (*docker.Client, *DockerHandle, func()) { 108 t.Helper() 109 tctx := testDockerDriverContexts(t, task) 110 driver := NewDockerDriver(tctx.DriverCtx) 111 copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar") 112 113 presp, err := driver.Prestart(tctx.ExecCtx, task) 114 if err != nil { 115 if presp != nil && presp.CreatedResources != nil { 116 driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 117 } 118 tctx.AllocDir.Destroy() 119 t.Fatalf("error in prestart: %v", err) 120 } 121 // Update the exec ctx with the driver network env vars 122 tctx.ExecCtx.TaskEnv = tctx.EnvBuilder.SetDriverNetwork(presp.Network).Build() 123 124 sresp, err := driver.Start(tctx.ExecCtx, task) 125 if err != nil { 126 driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 127 tctx.AllocDir.Destroy() 128 t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack()) 129 } 130 131 if sresp.Handle == nil { 132 driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 133 tctx.AllocDir.Destroy() 134 t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) 135 } 136 137 // At runtime this is handled by TaskRunner 138 tctx.ExecCtx.TaskEnv = tctx.EnvBuilder.SetDriverNetwork(sresp.Network).Build() 139 140 cleanup := func() { 141 driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 142 sresp.Handle.Kill() 143 tctx.AllocDir.Destroy() 144 } 145 146 return client, sresp.Handle.(*DockerHandle), cleanup 147 } 148 149 func newTestDockerClient(t *testing.T) *docker.Client { 150 t.Helper() 151 if !testutil.DockerIsConnected(t) { 152 t.Skip("Docker not connected") 153 } 154 155 client, err := docker.NewClientFromEnv() 156 if err != nil { 157 t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack()) 158 } 159 return client 160 } 161 162 // This test should always pass, even if docker daemon is not available 163 func TestDockerDriver_Fingerprint(t *testing.T) { 164 if !tu.IsTravis() { 165 t.Parallel() 166 } 167 ctx := testDockerDriverContexts(t, &structs.Task{Name: "foo", Driver: "docker", Resources: basicResources}) 168 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 169 defer ctx.AllocDir.Destroy() 170 d := NewDockerDriver(ctx.DriverCtx) 171 node := &structs.Node{ 172 Attributes: make(map[string]string), 173 } 174 175 request := &cstructs.FingerprintRequest{Config: &config.Config{}, Node: node} 176 var response cstructs.FingerprintResponse 177 err := d.Fingerprint(request, &response) 178 if err != nil { 179 t.Fatalf("err: %v", err) 180 } 181 182 attributes := response.Attributes 183 if testutil.DockerIsConnected(t) && attributes["driver.docker"] == "" { 184 t.Fatalf("Fingerprinter should detect when docker is available") 185 } 186 187 if attributes["driver.docker"] != "1" { 188 t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.") 189 } else { 190 191 // if docker is available, make sure that the response is tagged as 192 // applicable 193 if !response.Detected { 194 t.Fatalf("expected response to be applicable") 195 } 196 } 197 198 t.Logf("Found docker version %s", attributes["driver.docker.version"]) 199 } 200 201 // TestDockerDriver_Fingerprint_Bridge asserts that if Docker is running we set 202 // the bridge network's IP as a node attribute. See #2785 203 func TestDockerDriver_Fingerprint_Bridge(t *testing.T) { 204 if !tu.IsTravis() { 205 t.Parallel() 206 } 207 if !testutil.DockerIsConnected(t) { 208 t.Skip("requires Docker") 209 } 210 if runtime.GOOS != "linux" { 211 t.Skip("expect only on linux") 212 } 213 214 // This seems fragile, so we might need to reconsider this test if it 215 // proves flaky 216 expectedAddr, err := sockaddr.GetInterfaceIP("docker0") 217 if err != nil { 218 t.Fatalf("unable to get ip for docker0: %v", err) 219 } 220 if expectedAddr == "" { 221 t.Fatalf("unable to get ip for docker bridge") 222 } 223 224 conf := testConfig(t) 225 conf.Node = mock.Node() 226 dd := NewDockerDriver(NewDriverContext("", "", conf, conf.Node, testLogger(), nil)) 227 228 request := &cstructs.FingerprintRequest{Config: conf, Node: conf.Node} 229 var response cstructs.FingerprintResponse 230 err = dd.Fingerprint(request, &response) 231 if err != nil { 232 t.Fatalf("error fingerprinting docker: %v", err) 233 } 234 235 if !response.Detected { 236 t.Fatalf("expected response to be applicable") 237 } 238 239 attributes := response.Attributes 240 if attributes == nil { 241 t.Fatalf("expected attributes to be set") 242 } 243 244 if attributes["driver.docker"] == "" { 245 t.Fatalf("expected Docker to be enabled but false was returned") 246 } 247 248 if found := attributes["driver.docker.bridge_ip"]; found != expectedAddr { 249 t.Fatalf("expected bridge ip %q but found: %q", expectedAddr, found) 250 } 251 t.Logf("docker bridge ip: %q", attributes["driver.docker.bridge_ip"]) 252 } 253 254 func TestDockerDriver_StartOpen_Wait(t *testing.T) { 255 if !tu.IsTravis() { 256 t.Parallel() 257 } 258 if !testutil.DockerIsConnected(t) { 259 t.Skip("Docker not connected") 260 } 261 262 task := &structs.Task{ 263 Name: "nc-demo", 264 Driver: "docker", 265 Config: map[string]interface{}{ 266 "load": "busybox.tar", 267 "image": "busybox", 268 "command": "/bin/nc", 269 "args": []string{"-l", "127.0.0.1", "-p", "0"}, 270 }, 271 LogConfig: &structs.LogConfig{ 272 MaxFiles: 10, 273 MaxFileSizeMB: 10, 274 }, 275 Resources: basicResources, 276 } 277 278 ctx := testDockerDriverContexts(t, task) 279 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 280 defer ctx.AllocDir.Destroy() 281 d := NewDockerDriver(ctx.DriverCtx) 282 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 283 284 _, err := d.Prestart(ctx.ExecCtx, task) 285 if err != nil { 286 t.Fatalf("error in prestart: %v", err) 287 } 288 289 resp, err := d.Start(ctx.ExecCtx, task) 290 if err != nil { 291 t.Fatalf("err: %v", err) 292 } 293 if resp.Handle == nil { 294 t.Fatalf("missing handle") 295 } 296 defer resp.Handle.Kill() 297 298 // Attempt to open 299 resp2, err := d.Open(ctx.ExecCtx, resp.Handle.ID()) 300 if err != nil { 301 t.Fatalf("err: %v", err) 302 } 303 if resp2 == nil { 304 t.Fatalf("missing handle") 305 } 306 } 307 308 func TestDockerDriver_Start_Wait(t *testing.T) { 309 if !tu.IsTravis() { 310 t.Parallel() 311 } 312 if !testutil.DockerIsConnected(t) { 313 t.Skip("Docker not connected") 314 } 315 task := &structs.Task{ 316 Name: "nc-demo", 317 Driver: "docker", 318 Config: map[string]interface{}{ 319 "load": "busybox.tar", 320 "image": "busybox", 321 "command": "/bin/echo", 322 "args": []string{"hello"}, 323 }, 324 Resources: &structs.Resources{ 325 MemoryMB: 256, 326 CPU: 512, 327 }, 328 LogConfig: &structs.LogConfig{ 329 MaxFiles: 10, 330 MaxFileSizeMB: 10, 331 }, 332 } 333 334 _, handle, cleanup := dockerSetup(t, task) 335 defer cleanup() 336 337 // Update should be a no-op 338 err := handle.Update(task) 339 if err != nil { 340 t.Fatalf("err: %v", err) 341 } 342 343 select { 344 case res := <-handle.WaitCh(): 345 if !res.Successful() { 346 t.Fatalf("err: %v", res) 347 } 348 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 349 t.Fatalf("timeout") 350 } 351 } 352 353 // TestDockerDriver_Start_StoppedContainer asserts that Nomad will detect a 354 // stopped task container, remove it, and start a new container. 355 // 356 // See https://github.com/hashicorp/nomad/issues/3419 357 func TestDockerDriver_Start_StoppedContainer(t *testing.T) { 358 if !tu.IsTravis() { 359 t.Parallel() 360 } 361 if !testutil.DockerIsConnected(t) { 362 t.Skip("Docker not connected") 363 } 364 task := &structs.Task{ 365 Name: "nc-demo", 366 Driver: "docker", 367 Config: map[string]interface{}{ 368 "load": "busybox.tar", 369 "image": "busybox", 370 "command": "sleep", 371 "args": []string{"9000"}, 372 }, 373 Resources: &structs.Resources{ 374 MemoryMB: 100, 375 CPU: 100, 376 }, 377 LogConfig: &structs.LogConfig{ 378 MaxFiles: 1, 379 MaxFileSizeMB: 10, 380 }, 381 } 382 383 tctx := testDockerDriverContexts(t, task) 384 defer tctx.AllocDir.Destroy() 385 386 copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar") 387 client := newTestDockerClient(t) 388 driver := NewDockerDriver(tctx.DriverCtx).(*DockerDriver) 389 driverConfig := &DockerDriverConfig{ImageName: "busybox", LoadImage: "busybox.tar"} 390 if _, err := driver.loadImage(driverConfig, client, tctx.ExecCtx.TaskDir); err != nil { 391 t.Fatalf("error loading image: %v", err) 392 } 393 394 // Create a container of the same name but don't start it. This mimics 395 // the case of dockerd getting restarted and stopping containers while 396 // Nomad is watching them. 397 opts := docker.CreateContainerOptions{ 398 Name: fmt.Sprintf("%s-%s", task.Name, tctx.DriverCtx.allocID), 399 Config: &docker.Config{ 400 Image: "busybox", 401 Cmd: []string{"sleep", "9000"}, 402 }, 403 } 404 if _, err := client.CreateContainer(opts); err != nil { 405 t.Fatalf("error creating initial container: %v", err) 406 } 407 408 // Now assert that the driver can still start normally 409 presp, err := driver.Prestart(tctx.ExecCtx, task) 410 if err != nil { 411 driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 412 t.Fatalf("error in prestart: %v", err) 413 } 414 defer driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 415 416 sresp, err := driver.Start(tctx.ExecCtx, task) 417 if err != nil { 418 t.Fatalf("failed to start driver: %s", err) 419 } 420 handle := sresp.Handle.(*DockerHandle) 421 waitForExist(t, client, handle) 422 handle.Kill() 423 } 424 425 func TestDockerDriver_Start_LoadImage(t *testing.T) { 426 if !tu.IsTravis() { 427 t.Parallel() 428 } 429 if !testutil.DockerIsConnected(t) { 430 t.Skip("Docker not connected") 431 } 432 task := &structs.Task{ 433 Name: "busybox-demo", 434 Driver: "docker", 435 Config: map[string]interface{}{ 436 "image": "busybox", 437 "load": "busybox.tar", 438 "command": "/bin/sh", 439 "args": []string{ 440 "-c", 441 "echo hello > $NOMAD_TASK_DIR/output", 442 }, 443 }, 444 LogConfig: &structs.LogConfig{ 445 MaxFiles: 10, 446 MaxFileSizeMB: 10, 447 }, 448 Resources: &structs.Resources{ 449 MemoryMB: 256, 450 CPU: 512, 451 }, 452 } 453 454 ctx := testDockerDriverContexts(t, task) 455 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 456 defer ctx.AllocDir.Destroy() 457 d := NewDockerDriver(ctx.DriverCtx) 458 459 // Copy the image into the task's directory 460 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 461 462 _, err := d.Prestart(ctx.ExecCtx, task) 463 if err != nil { 464 t.Fatalf("error in prestart: %v", err) 465 } 466 resp, err := d.Start(ctx.ExecCtx, task) 467 if err != nil { 468 t.Fatalf("err: %v", err) 469 } 470 defer resp.Handle.Kill() 471 472 select { 473 case res := <-resp.Handle.WaitCh(): 474 if !res.Successful() { 475 t.Fatalf("err: %v", res) 476 } 477 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 478 t.Fatalf("timeout") 479 } 480 481 // Check that data was written to the shared alloc directory. 482 outputFile := filepath.Join(ctx.ExecCtx.TaskDir.LocalDir, "output") 483 act, err := ioutil.ReadFile(outputFile) 484 if err != nil { 485 t.Fatalf("Couldn't read expected output: %v", err) 486 } 487 488 exp := "hello" 489 if strings.TrimSpace(string(act)) != exp { 490 t.Fatalf("Command outputted %v; want %v", act, exp) 491 } 492 493 } 494 495 func TestDockerDriver_Start_BadPull_Recoverable(t *testing.T) { 496 if !tu.IsTravis() { 497 t.Parallel() 498 } 499 if !testutil.DockerIsConnected(t) { 500 t.Skip("Docker not connected") 501 } 502 task := &structs.Task{ 503 Name: "busybox-demo", 504 Driver: "docker", 505 Config: map[string]interface{}{ 506 "image": "127.0.1.1:32121/foo", // bad path 507 "command": "/bin/echo", 508 "args": []string{ 509 "hello", 510 }, 511 }, 512 LogConfig: &structs.LogConfig{ 513 MaxFiles: 10, 514 MaxFileSizeMB: 10, 515 }, 516 Resources: &structs.Resources{ 517 MemoryMB: 256, 518 CPU: 512, 519 }, 520 } 521 522 ctx := testDockerDriverContexts(t, task) 523 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 524 defer ctx.AllocDir.Destroy() 525 d := NewDockerDriver(ctx.DriverCtx) 526 527 _, err := d.Prestart(ctx.ExecCtx, task) 528 if err == nil { 529 t.Fatalf("want error in prestart: %v", err) 530 } 531 532 if rerr, ok := err.(*structs.RecoverableError); !ok { 533 t.Fatalf("want recoverable error: %+v", err) 534 } else if !rerr.IsRecoverable() { 535 t.Fatalf("error not recoverable: %+v", err) 536 } 537 } 538 539 func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { 540 if !tu.IsTravis() { 541 t.Parallel() 542 } 543 // This test requires that the alloc dir be mounted into docker as a volume. 544 // Because this cannot happen when docker is run remotely, e.g. when running 545 // docker in a VM, we skip this when we detect Docker is being run remotely. 546 if !testutil.DockerIsConnected(t) || dockerIsRemote(t) { 547 t.Skip("Docker not connected") 548 } 549 550 exp := []byte{'w', 'i', 'n'} 551 file := "output.txt" 552 task := &structs.Task{ 553 Name: "nc-demo", 554 Driver: "docker", 555 Config: map[string]interface{}{ 556 "image": "busybox", 557 "load": "busybox.tar", 558 "command": "/bin/sh", 559 "args": []string{ 560 "-c", 561 fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`, 562 string(exp), env.AllocDir, file), 563 }, 564 }, 565 LogConfig: &structs.LogConfig{ 566 MaxFiles: 10, 567 MaxFileSizeMB: 10, 568 }, 569 Resources: &structs.Resources{ 570 MemoryMB: 256, 571 CPU: 512, 572 }, 573 } 574 575 ctx := testDockerDriverContexts(t, task) 576 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 577 defer ctx.AllocDir.Destroy() 578 d := NewDockerDriver(ctx.DriverCtx) 579 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 580 581 _, err := d.Prestart(ctx.ExecCtx, task) 582 if err != nil { 583 t.Fatalf("error in prestart: %v", err) 584 } 585 resp, err := d.Start(ctx.ExecCtx, task) 586 if err != nil { 587 t.Fatalf("err: %v", err) 588 } 589 defer resp.Handle.Kill() 590 591 select { 592 case res := <-resp.Handle.WaitCh(): 593 if !res.Successful() { 594 t.Fatalf("err: %v", res) 595 } 596 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 597 t.Fatalf("timeout") 598 } 599 600 // Check that data was written to the shared alloc directory. 601 outputFile := filepath.Join(ctx.AllocDir.SharedDir, file) 602 act, err := ioutil.ReadFile(outputFile) 603 if err != nil { 604 t.Fatalf("Couldn't read expected output: %v", err) 605 } 606 607 if !reflect.DeepEqual(act, exp) { 608 t.Fatalf("Command outputted %v; want %v", act, exp) 609 } 610 } 611 612 func TestDockerDriver_Start_Kill_Wait(t *testing.T) { 613 if !tu.IsTravis() { 614 t.Parallel() 615 } 616 if !testutil.DockerIsConnected(t) { 617 t.Skip("Docker not connected") 618 } 619 task := &structs.Task{ 620 Name: "nc-demo", 621 Driver: "docker", 622 Config: map[string]interface{}{ 623 "image": "busybox", 624 "load": "busybox.tar", 625 "command": "/bin/sleep", 626 "args": []string{"10"}, 627 }, 628 LogConfig: &structs.LogConfig{ 629 MaxFiles: 10, 630 MaxFileSizeMB: 10, 631 }, 632 Resources: basicResources, 633 } 634 635 _, handle, cleanup := dockerSetup(t, task) 636 defer cleanup() 637 638 go func() { 639 time.Sleep(100 * time.Millisecond) 640 err := handle.Kill() 641 if err != nil { 642 t.Fatalf("err: %v", err) 643 } 644 }() 645 646 select { 647 case res := <-handle.WaitCh(): 648 if res.Successful() { 649 t.Fatalf("should err: %v", res) 650 } 651 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 652 t.Fatalf("timeout") 653 } 654 } 655 656 func TestDockerDriver_StartN(t *testing.T) { 657 if !tu.IsTravis() { 658 t.Parallel() 659 } 660 if !testutil.DockerIsConnected(t) { 661 t.Skip("Docker not connected") 662 } 663 664 task1, _, _ := dockerTask(t) 665 task2, _, _ := dockerTask(t) 666 task3, _, _ := dockerTask(t) 667 taskList := []*structs.Task{task1, task2, task3} 668 669 handles := make([]DriverHandle, len(taskList)) 670 671 t.Logf("Starting %d tasks", len(taskList)) 672 673 // Let's spin up a bunch of things 674 for idx, task := range taskList { 675 ctx := testDockerDriverContexts(t, task) 676 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 677 defer ctx.AllocDir.Destroy() 678 d := NewDockerDriver(ctx.DriverCtx) 679 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 680 681 _, err := d.Prestart(ctx.ExecCtx, task) 682 if err != nil { 683 t.Fatalf("error in prestart #%d: %v", idx+1, err) 684 } 685 resp, err := d.Start(ctx.ExecCtx, task) 686 if err != nil { 687 t.Errorf("Failed starting task #%d: %s", idx+1, err) 688 continue 689 } 690 handles[idx] = resp.Handle 691 } 692 693 t.Log("All tasks are started. Terminating...") 694 695 for idx, handle := range handles { 696 if handle == nil { 697 t.Errorf("Bad handle for task #%d", idx+1) 698 continue 699 } 700 701 err := handle.Kill() 702 if err != nil { 703 t.Errorf("Failed stopping task #%d: %s", idx+1, err) 704 } 705 } 706 707 t.Log("Test complete!") 708 } 709 710 func TestDockerDriver_StartNVersions(t *testing.T) { 711 if !tu.IsTravis() { 712 t.Parallel() 713 } 714 if !testutil.DockerIsConnected(t) { 715 t.Skip("Docker not connected") 716 } 717 718 task1, _, _ := dockerTask(t) 719 task1.Config["image"] = "busybox" 720 task1.Config["load"] = "busybox.tar" 721 722 task2, _, _ := dockerTask(t) 723 task2.Config["image"] = "busybox:musl" 724 task2.Config["load"] = "busybox_musl.tar" 725 726 task3, _, _ := dockerTask(t) 727 task3.Config["image"] = "busybox:glibc" 728 task3.Config["load"] = "busybox_glibc.tar" 729 730 taskList := []*structs.Task{task1, task2, task3} 731 732 handles := make([]DriverHandle, len(taskList)) 733 734 t.Logf("Starting %d tasks", len(taskList)) 735 736 // Let's spin up a bunch of things 737 for idx, task := range taskList { 738 ctx := testDockerDriverContexts(t, task) 739 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 740 defer ctx.AllocDir.Destroy() 741 d := NewDockerDriver(ctx.DriverCtx) 742 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 743 copyImage(t, ctx.ExecCtx.TaskDir, "busybox_musl.tar") 744 copyImage(t, ctx.ExecCtx.TaskDir, "busybox_glibc.tar") 745 746 _, err := d.Prestart(ctx.ExecCtx, task) 747 if err != nil { 748 t.Fatalf("error in prestart #%d: %v", idx+1, err) 749 } 750 resp, err := d.Start(ctx.ExecCtx, task) 751 if err != nil { 752 t.Errorf("Failed starting task #%d: %s", idx+1, err) 753 continue 754 } 755 handles[idx] = resp.Handle 756 } 757 758 t.Log("All tasks are started. Terminating...") 759 760 for idx, handle := range handles { 761 if handle == nil { 762 t.Errorf("Bad handle for task #%d", idx+1) 763 continue 764 } 765 766 err := handle.Kill() 767 if err != nil { 768 t.Errorf("Failed stopping task #%d: %s", idx+1, err) 769 } 770 } 771 772 t.Log("Test complete!") 773 } 774 775 func waitForExist(t *testing.T, client *docker.Client, handle *DockerHandle) { 776 handle.logger.Printf("[DEBUG] docker.test: waiting for container %s to exist...", handle.ContainerID()) 777 tu.WaitForResult(func() (bool, error) { 778 container, err := client.InspectContainer(handle.ContainerID()) 779 if err != nil { 780 if _, ok := err.(*docker.NoSuchContainer); !ok { 781 return false, err 782 } 783 } 784 785 return container != nil, nil 786 }, func(err error) { 787 t.Fatalf("err: %v", err) 788 }) 789 handle.logger.Printf("[DEBUG] docker.test: ...container %s exists!", handle.ContainerID()) 790 } 791 792 func TestDockerDriver_NetworkMode_Host(t *testing.T) { 793 if !tu.IsTravis() { 794 t.Parallel() 795 } 796 if !testutil.DockerIsConnected(t) { 797 t.Skip("Docker not connected") 798 } 799 expected := "host" 800 801 task := &structs.Task{ 802 Name: "nc-demo", 803 Driver: "docker", 804 Config: map[string]interface{}{ 805 "image": "busybox", 806 "load": "busybox.tar", 807 "command": "/bin/nc", 808 "args": []string{"-l", "127.0.0.1", "-p", "0"}, 809 "network_mode": expected, 810 }, 811 Resources: &structs.Resources{ 812 MemoryMB: 256, 813 CPU: 512, 814 }, 815 LogConfig: &structs.LogConfig{ 816 MaxFiles: 10, 817 MaxFileSizeMB: 10, 818 }, 819 } 820 821 client, handle, cleanup := dockerSetup(t, task) 822 defer cleanup() 823 824 waitForExist(t, client, handle) 825 826 container, err := client.InspectContainer(handle.ContainerID()) 827 if err != nil { 828 t.Fatalf("err: %v", err) 829 } 830 831 actual := container.HostConfig.NetworkMode 832 if actual != expected { 833 t.Fatalf("Got network mode %q; want %q", expected, actual) 834 } 835 } 836 837 func TestDockerDriver_NetworkAliases_Bridge(t *testing.T) { 838 if !tu.IsTravis() { 839 t.Parallel() 840 } 841 if !testutil.DockerIsConnected(t) { 842 t.Skip("Docker not connected") 843 } 844 845 // Because go-dockerclient doesn't provide api for query network aliases, just check that 846 // a container can be created with a 'network_aliases' property 847 848 // Create network, network-scoped alias is supported only for containers in user defined networks 849 client := newTestDockerClient(t) 850 networkOpts := docker.CreateNetworkOptions{Name: "foobar", Driver: "bridge"} 851 network, err := client.CreateNetwork(networkOpts) 852 if err != nil { 853 t.Fatalf("err: %v", err) 854 } 855 defer client.RemoveNetwork(network.ID) 856 857 expected := []string{"foobar"} 858 task := &structs.Task{ 859 Name: "nc-demo", 860 Driver: "docker", 861 Config: map[string]interface{}{ 862 "image": "busybox", 863 "load": "busybox.tar", 864 "command": "/bin/nc", 865 "args": []string{"-l", "127.0.0.1", "-p", "0"}, 866 "network_mode": network.Name, 867 "network_aliases": expected, 868 }, 869 Resources: &structs.Resources{ 870 MemoryMB: 256, 871 CPU: 512, 872 }, 873 LogConfig: &structs.LogConfig{ 874 MaxFiles: 10, 875 MaxFileSizeMB: 10, 876 }, 877 } 878 879 client, handle, cleanup := dockerSetupWithClient(t, task, client) 880 defer cleanup() 881 882 waitForExist(t, client, handle) 883 884 _, err = client.InspectContainer(handle.ContainerID()) 885 if err != nil { 886 t.Fatalf("err: %v", err) 887 } 888 } 889 890 func TestDockerDriver_Sysctl_Ulimit(t *testing.T) { 891 task, _, _ := dockerTask(t) 892 expectedUlimits := map[string]string{ 893 "nproc": "4242", 894 "nofile": "2048:4096", 895 } 896 task.Config["sysctl"] = []map[string]string{ 897 { 898 "net.core.somaxconn": "16384", 899 }, 900 } 901 task.Config["ulimit"] = []map[string]string{ 902 expectedUlimits, 903 } 904 905 client, handle, cleanup := dockerSetup(t, task) 906 defer cleanup() 907 908 waitForExist(t, client, handle) 909 910 container, err := client.InspectContainer(handle.ContainerID()) 911 assert.Nil(t, err, "unexpected error: %v", err) 912 913 want := "16384" 914 got := container.HostConfig.Sysctls["net.core.somaxconn"] 915 assert.Equal(t, want, got, "Wrong net.core.somaxconn config for docker job. Expect: %s, got: %s", want, got) 916 917 expectedUlimitLen := 2 918 actualUlimitLen := len(container.HostConfig.Ulimits) 919 assert.Equal(t, want, got, "Wrong number of ulimit configs for docker job. Expect: %d, got: %d", expectedUlimitLen, actualUlimitLen) 920 921 for _, got := range container.HostConfig.Ulimits { 922 if expectedStr, ok := expectedUlimits[got.Name]; !ok { 923 t.Errorf("%s config unexpected for docker job.", got.Name) 924 } else { 925 if !strings.Contains(expectedStr, ":") { 926 expectedStr = expectedStr + ":" + expectedStr 927 } 928 929 splitted := strings.SplitN(expectedStr, ":", 2) 930 soft, _ := strconv.Atoi(splitted[0]) 931 hard, _ := strconv.Atoi(splitted[1]) 932 assert.Equal(t, int64(soft), got.Soft, "Wrong soft %s ulimit for docker job. Expect: %d, got: %d", got.Name, soft, got.Soft) 933 assert.Equal(t, int64(hard), got.Hard, "Wrong hard %s ulimit for docker job. Expect: %d, got: %d", got.Name, hard, got.Hard) 934 935 } 936 } 937 } 938 939 func TestDockerDriver_Sysctl_Ulimit_Errors(t *testing.T) { 940 brokenConfigs := []interface{}{ 941 map[string]interface{}{ 942 "nofile": "", 943 }, 944 map[string]interface{}{ 945 "nofile": "abc:1234", 946 }, 947 map[string]interface{}{ 948 "nofile": "1234:abc", 949 }, 950 } 951 952 test_cases := []struct { 953 ulimitConfig interface{} 954 err error 955 }{ 956 {[]interface{}{brokenConfigs[0]}, fmt.Errorf("Malformed ulimit specification nofile: \"\", cannot be empty")}, 957 {[]interface{}{brokenConfigs[1]}, fmt.Errorf("Malformed soft ulimit nofile: abc:1234")}, 958 {[]interface{}{brokenConfigs[2]}, fmt.Errorf("Malformed hard ulimit nofile: 1234:abc")}, 959 } 960 961 for _, tc := range test_cases { 962 task, _, _ := dockerTask(t) 963 task.Config["ulimit"] = tc.ulimitConfig 964 965 ctx := testDockerDriverContexts(t, task) 966 driver := NewDockerDriver(ctx.DriverCtx) 967 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 968 defer ctx.AllocDir.Destroy() 969 970 _, err := driver.Prestart(ctx.ExecCtx, task) 971 assert.NotNil(t, err, "Expected non nil error") 972 assert.Equal(t, err.Error(), tc.err.Error(), "unexpected error in prestart, got %v, expected %v", err, tc.err) 973 } 974 } 975 976 func TestDockerDriver_Labels(t *testing.T) { 977 if !tu.IsTravis() { 978 t.Parallel() 979 } 980 if !testutil.DockerIsConnected(t) { 981 t.Skip("Docker not connected") 982 } 983 984 task, _, _ := dockerTask(t) 985 task.Config["labels"] = []map[string]string{ 986 { 987 "label1": "value1", 988 "label2": "value2", 989 }, 990 } 991 992 client, handle, cleanup := dockerSetup(t, task) 993 defer cleanup() 994 995 waitForExist(t, client, handle) 996 997 container, err := client.InspectContainer(handle.ContainerID()) 998 if err != nil { 999 t.Fatalf("err: %v", err) 1000 } 1001 1002 if want, got := 2, len(container.Config.Labels); want != got { 1003 t.Errorf("Wrong labels count for docker job. Expect: %d, got: %d", want, got) 1004 } 1005 1006 if want, got := "value1", container.Config.Labels["label1"]; want != got { 1007 t.Errorf("Wrong label value docker job. Expect: %s, got: %s", want, got) 1008 } 1009 } 1010 1011 func TestDockerDriver_ForcePull_IsInvalidConfig(t *testing.T) { 1012 if !tu.IsTravis() { 1013 t.Parallel() 1014 } 1015 if !testutil.DockerIsConnected(t) { 1016 t.Skip("Docker not connected") 1017 } 1018 1019 task, _, _ := dockerTask(t) 1020 task.Config["force_pull"] = "nothing" 1021 1022 ctx := testDockerDriverContexts(t, task) 1023 defer ctx.AllocDir.Destroy() 1024 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 1025 driver := NewDockerDriver(ctx.DriverCtx) 1026 1027 if _, err := driver.Prestart(ctx.ExecCtx, task); err == nil { 1028 t.Fatalf("error expected in prestart") 1029 } 1030 } 1031 1032 func TestDockerDriver_ForcePull(t *testing.T) { 1033 if !tu.IsTravis() { 1034 t.Parallel() 1035 } 1036 if !testutil.DockerIsConnected(t) { 1037 t.Skip("Docker not connected") 1038 } 1039 1040 task, _, _ := dockerTask(t) 1041 task.Config["force_pull"] = "true" 1042 1043 client, handle, cleanup := dockerSetup(t, task) 1044 defer cleanup() 1045 1046 waitForExist(t, client, handle) 1047 1048 _, err := client.InspectContainer(handle.ContainerID()) 1049 if err != nil { 1050 t.Fatalf("err: %v", err) 1051 } 1052 } 1053 1054 func TestDockerDriver_SecurityOpt(t *testing.T) { 1055 if !tu.IsTravis() { 1056 t.Parallel() 1057 } 1058 if !testutil.DockerIsConnected(t) { 1059 t.Skip("Docker not connected") 1060 } 1061 1062 task, _, _ := dockerTask(t) 1063 task.Config["security_opt"] = []string{"seccomp=unconfined"} 1064 1065 client, handle, cleanup := dockerSetup(t, task) 1066 defer cleanup() 1067 1068 waitForExist(t, client, handle) 1069 1070 container, err := client.InspectContainer(handle.ContainerID()) 1071 if err != nil { 1072 t.Fatalf("err: %v", err) 1073 } 1074 1075 if !reflect.DeepEqual(task.Config["security_opt"], container.HostConfig.SecurityOpt) { 1076 t.Errorf("Security Opts don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["security_opt"], container.HostConfig.SecurityOpt) 1077 } 1078 } 1079 1080 func TestDockerDriver_Capabilities(t *testing.T) { 1081 if !tu.IsTravis() { 1082 t.Parallel() 1083 } 1084 if !testutil.DockerIsConnected(t) { 1085 t.Skip("Docker not connected") 1086 } 1087 if runtime.GOOS == "windows" { 1088 t.Skip("Capabilities not supported on windows") 1089 } 1090 1091 testCases := []struct { 1092 Name string 1093 CapAdd []string 1094 CapDrop []string 1095 Whitelist string 1096 StartError string 1097 }{ 1098 { 1099 Name: "default-whitelist-add-allowed", 1100 CapAdd: []string{"fowner", "mknod"}, 1101 CapDrop: []string{"all"}, 1102 }, 1103 { 1104 Name: "default-whitelist-add-forbidden", 1105 CapAdd: []string{"net_admin"}, 1106 StartError: "net_admin", 1107 }, 1108 { 1109 Name: "default-whitelist-drop-existing", 1110 CapDrop: []string{"fowner", "mknod"}, 1111 }, 1112 { 1113 Name: "restrictive-whitelist-drop-all", 1114 CapDrop: []string{"all"}, 1115 Whitelist: "fowner,mknod", 1116 }, 1117 { 1118 Name: "restrictive-whitelist-add-allowed", 1119 CapAdd: []string{"fowner", "mknod"}, 1120 CapDrop: []string{"all"}, 1121 Whitelist: "fowner,mknod", 1122 }, 1123 { 1124 Name: "restrictive-whitelist-add-forbidden", 1125 CapAdd: []string{"net_admin", "mknod"}, 1126 CapDrop: []string{"all"}, 1127 Whitelist: "fowner,mknod", 1128 StartError: "net_admin", 1129 }, 1130 { 1131 Name: "permissive-whitelist", 1132 CapAdd: []string{"net_admin", "mknod"}, 1133 Whitelist: "all", 1134 }, 1135 { 1136 Name: "permissive-whitelist-add-all", 1137 CapAdd: []string{"all"}, 1138 Whitelist: "all", 1139 }, 1140 } 1141 1142 for _, tc := range testCases { 1143 t.Run(tc.Name, func(t *testing.T) { 1144 client := newTestDockerClient(t) 1145 task, _, _ := dockerTask(t) 1146 if len(tc.CapAdd) > 0 { 1147 task.Config["cap_add"] = tc.CapAdd 1148 } 1149 if len(tc.CapDrop) > 0 { 1150 task.Config["cap_drop"] = tc.CapDrop 1151 } 1152 1153 tctx := testDockerDriverContexts(t, task) 1154 if tc.Whitelist != "" { 1155 tctx.DriverCtx.config.Options[dockerCapsWhitelistConfigOption] = tc.Whitelist 1156 } 1157 1158 driver := NewDockerDriver(tctx.DriverCtx) 1159 copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar") 1160 defer tctx.AllocDir.Destroy() 1161 1162 presp, err := driver.Prestart(tctx.ExecCtx, task) 1163 defer driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 1164 if err != nil { 1165 t.Fatalf("Error in prestart: %v", err) 1166 } 1167 1168 sresp, err := driver.Start(tctx.ExecCtx, task) 1169 if err == nil && tc.StartError != "" { 1170 t.Fatalf("Expected error in start: %v", tc.StartError) 1171 } else if err != nil { 1172 if tc.StartError == "" { 1173 t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack()) 1174 } else if !strings.Contains(err.Error(), tc.StartError) { 1175 t.Fatalf("Expect error containing \"%s\", got %v", tc.StartError, err) 1176 } 1177 return 1178 } 1179 1180 if sresp.Handle == nil { 1181 t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) 1182 } 1183 defer sresp.Handle.Kill() 1184 handle := sresp.Handle.(*DockerHandle) 1185 1186 waitForExist(t, client, handle) 1187 1188 container, err := client.InspectContainer(handle.ContainerID()) 1189 if err != nil { 1190 t.Fatalf("Error inspecting container: %v", err) 1191 } 1192 1193 if !reflect.DeepEqual(tc.CapAdd, container.HostConfig.CapAdd) { 1194 t.Errorf("CapAdd doesn't match.\nExpected:\n%s\nGot:\n%s\n", tc.CapAdd, container.HostConfig.CapAdd) 1195 } 1196 1197 if !reflect.DeepEqual(tc.CapDrop, container.HostConfig.CapDrop) { 1198 t.Errorf("CapDrop doesn't match.\nExpected:\n%s\nGot:\n%s\n", tc.CapDrop, container.HostConfig.CapDrop) 1199 } 1200 }) 1201 } 1202 } 1203 1204 func TestDockerDriver_DNS(t *testing.T) { 1205 if !tu.IsTravis() { 1206 t.Parallel() 1207 } 1208 if !testutil.DockerIsConnected(t) { 1209 t.Skip("Docker not connected") 1210 } 1211 1212 task, _, _ := dockerTask(t) 1213 task.Config["dns_servers"] = []string{"8.8.8.8", "8.8.4.4"} 1214 task.Config["dns_search_domains"] = []string{"example.com", "example.org", "example.net"} 1215 task.Config["dns_options"] = []string{"ndots:1"} 1216 1217 client, handle, cleanup := dockerSetup(t, task) 1218 defer cleanup() 1219 1220 waitForExist(t, client, handle) 1221 1222 container, err := client.InspectContainer(handle.ContainerID()) 1223 if err != nil { 1224 t.Fatalf("err: %v", err) 1225 } 1226 1227 if !reflect.DeepEqual(task.Config["dns_servers"], container.HostConfig.DNS) { 1228 t.Errorf("DNS Servers don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_servers"], container.HostConfig.DNS) 1229 } 1230 1231 if !reflect.DeepEqual(task.Config["dns_search_domains"], container.HostConfig.DNSSearch) { 1232 t.Errorf("DNS Search Domains don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_search_domains"], container.HostConfig.DNSSearch) 1233 } 1234 1235 if !reflect.DeepEqual(task.Config["dns_options"], container.HostConfig.DNSOptions) { 1236 t.Errorf("DNS Options don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_options"], container.HostConfig.DNSOptions) 1237 } 1238 } 1239 1240 func TestDockerDriver_MACAddress(t *testing.T) { 1241 if !tu.IsTravis() { 1242 t.Parallel() 1243 } 1244 if !testutil.DockerIsConnected(t) { 1245 t.Skip("Docker not connected") 1246 } 1247 1248 task, _, _ := dockerTask(t) 1249 task.Config["mac_address"] = "00:16:3e:00:00:00" 1250 1251 client, handle, cleanup := dockerSetup(t, task) 1252 defer cleanup() 1253 1254 waitForExist(t, client, handle) 1255 1256 container, err := client.InspectContainer(handle.ContainerID()) 1257 if err != nil { 1258 t.Fatalf("err: %v", err) 1259 } 1260 1261 if container.NetworkSettings.MacAddress != task.Config["mac_address"] { 1262 t.Errorf("expected mac_address=%q but found %q", task.Config["mac_address"], container.NetworkSettings.MacAddress) 1263 } 1264 } 1265 1266 func TestDockerWorkDir(t *testing.T) { 1267 if !tu.IsTravis() { 1268 t.Parallel() 1269 } 1270 if !testutil.DockerIsConnected(t) { 1271 t.Skip("Docker not connected") 1272 } 1273 1274 task, _, _ := dockerTask(t) 1275 task.Config["work_dir"] = "/some/path" 1276 1277 client, handle, cleanup := dockerSetup(t, task) 1278 defer cleanup() 1279 1280 container, err := client.InspectContainer(handle.ContainerID()) 1281 if err != nil { 1282 t.Fatalf("err: %v", err) 1283 } 1284 1285 if want, got := "/some/path", container.Config.WorkingDir; want != got { 1286 t.Errorf("Wrong working directory for docker job. Expect: %s, got: %s", want, got) 1287 } 1288 } 1289 1290 func inSlice(needle string, haystack []string) bool { 1291 for _, h := range haystack { 1292 if h == needle { 1293 return true 1294 } 1295 } 1296 return false 1297 } 1298 1299 func TestDockerDriver_PortsNoMap(t *testing.T) { 1300 if !tu.IsTravis() { 1301 t.Parallel() 1302 } 1303 if !testutil.DockerIsConnected(t) { 1304 t.Skip("Docker not connected") 1305 } 1306 1307 task, res, dyn := dockerTask(t) 1308 1309 client, handle, cleanup := dockerSetup(t, task) 1310 defer cleanup() 1311 1312 waitForExist(t, client, handle) 1313 1314 container, err := client.InspectContainer(handle.ContainerID()) 1315 if err != nil { 1316 t.Fatalf("err: %v", err) 1317 } 1318 1319 // Verify that the correct ports are EXPOSED 1320 expectedExposedPorts := map[docker.Port]struct{}{ 1321 docker.Port(fmt.Sprintf("%d/tcp", res)): {}, 1322 docker.Port(fmt.Sprintf("%d/udp", res)): {}, 1323 docker.Port(fmt.Sprintf("%d/tcp", dyn)): {}, 1324 docker.Port(fmt.Sprintf("%d/udp", dyn)): {}, 1325 } 1326 1327 if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { 1328 t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts) 1329 } 1330 1331 // Verify that the correct ports are FORWARDED 1332 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 1333 docker.Port(fmt.Sprintf("%d/tcp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 1334 docker.Port(fmt.Sprintf("%d/udp", res)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 1335 docker.Port(fmt.Sprintf("%d/tcp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 1336 docker.Port(fmt.Sprintf("%d/udp", dyn)): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 1337 } 1338 1339 if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { 1340 t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings) 1341 } 1342 1343 expectedEnvironment := map[string]string{ 1344 "NOMAD_ADDR_main": fmt.Sprintf("127.0.0.1:%d", res), 1345 "NOMAD_ADDR_REDIS": fmt.Sprintf("127.0.0.1:%d", dyn), 1346 } 1347 1348 for key, val := range expectedEnvironment { 1349 search := fmt.Sprintf("%s=%s", key, val) 1350 if !inSlice(search, container.Config.Env) { 1351 t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env) 1352 } 1353 } 1354 } 1355 1356 func TestDockerDriver_PortsMapping(t *testing.T) { 1357 if !tu.IsTravis() { 1358 t.Parallel() 1359 } 1360 if !testutil.DockerIsConnected(t) { 1361 t.Skip("Docker not connected") 1362 } 1363 1364 task, res, dyn := dockerTask(t) 1365 task.Config["port_map"] = []map[string]string{ 1366 { 1367 "main": "8080", 1368 "REDIS": "6379", 1369 }, 1370 } 1371 1372 client, handle, cleanup := dockerSetup(t, task) 1373 defer cleanup() 1374 1375 waitForExist(t, client, handle) 1376 1377 container, err := client.InspectContainer(handle.ContainerID()) 1378 if err != nil { 1379 t.Fatalf("err: %v", err) 1380 } 1381 1382 // Verify that the correct ports are EXPOSED 1383 expectedExposedPorts := map[docker.Port]struct{}{ 1384 docker.Port("8080/tcp"): {}, 1385 docker.Port("8080/udp"): {}, 1386 docker.Port("6379/tcp"): {}, 1387 docker.Port("6379/udp"): {}, 1388 } 1389 1390 if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { 1391 t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts) 1392 } 1393 1394 // Verify that the correct ports are FORWARDED 1395 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 1396 docker.Port("8080/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 1397 docker.Port("8080/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 1398 docker.Port("6379/tcp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 1399 docker.Port("6379/udp"): {{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 1400 } 1401 1402 if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { 1403 t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings) 1404 } 1405 1406 expectedEnvironment := map[string]string{ 1407 "NOMAD_PORT_main": "8080", 1408 "NOMAD_PORT_REDIS": "6379", 1409 "NOMAD_HOST_PORT_main": strconv.Itoa(res), 1410 } 1411 1412 sort.Strings(container.Config.Env) 1413 for key, val := range expectedEnvironment { 1414 search := fmt.Sprintf("%s=%s", key, val) 1415 if !inSlice(search, container.Config.Env) { 1416 t.Errorf("Expected to find %s in container environment:\n%s\n\n", search, strings.Join(container.Config.Env, "\n")) 1417 } 1418 } 1419 } 1420 1421 func TestDockerDriver_User(t *testing.T) { 1422 if !tu.IsTravis() { 1423 t.Parallel() 1424 } 1425 if !testutil.DockerIsConnected(t) { 1426 t.Skip("Docker not connected") 1427 } 1428 1429 task := &structs.Task{ 1430 Name: "redis-demo", 1431 User: "alice", 1432 Driver: "docker", 1433 Config: map[string]interface{}{ 1434 "image": "busybox", 1435 "load": "busybox.tar", 1436 "command": "/bin/sleep", 1437 "args": []string{"10000"}, 1438 }, 1439 Resources: &structs.Resources{ 1440 MemoryMB: 256, 1441 CPU: 512, 1442 }, 1443 LogConfig: &structs.LogConfig{ 1444 MaxFiles: 10, 1445 MaxFileSizeMB: 10, 1446 }, 1447 } 1448 1449 ctx := testDockerDriverContexts(t, task) 1450 //ctx.DriverCtx.config.Options = map[string]string{"docker.cleanup.image": "false"} 1451 driver := NewDockerDriver(ctx.DriverCtx) 1452 defer ctx.AllocDir.Destroy() 1453 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 1454 1455 _, err := driver.Prestart(ctx.ExecCtx, task) 1456 if err != nil { 1457 t.Fatalf("error in prestart: %v", err) 1458 } 1459 1460 // It should fail because the user "alice" does not exist on the given 1461 // image. 1462 resp, err := driver.Start(ctx.ExecCtx, task) 1463 if err == nil { 1464 resp.Handle.Kill() 1465 t.Fatalf("Should've failed") 1466 } 1467 1468 if !strings.Contains(err.Error(), "alice") { 1469 t.Fatalf("Expected failure string not found, found %q instead", err.Error()) 1470 } 1471 } 1472 1473 func TestDockerDriver_CleanupContainer(t *testing.T) { 1474 if !tu.IsTravis() { 1475 t.Parallel() 1476 } 1477 if !testutil.DockerIsConnected(t) { 1478 t.Skip("Docker not connected") 1479 } 1480 1481 task := &structs.Task{ 1482 Name: "redis-demo", 1483 Driver: "docker", 1484 Config: map[string]interface{}{ 1485 "image": "busybox", 1486 "load": "busybox.tar", 1487 "command": "/bin/echo", 1488 "args": []string{"hello"}, 1489 }, 1490 Resources: &structs.Resources{ 1491 MemoryMB: 256, 1492 CPU: 512, 1493 }, 1494 LogConfig: &structs.LogConfig{ 1495 MaxFiles: 10, 1496 MaxFileSizeMB: 10, 1497 }, 1498 } 1499 1500 _, handle, cleanup := dockerSetup(t, task) 1501 defer cleanup() 1502 1503 // Update should be a no-op 1504 err := handle.Update(task) 1505 if err != nil { 1506 t.Fatalf("err: %v", err) 1507 } 1508 1509 select { 1510 case res := <-handle.WaitCh(): 1511 if !res.Successful() { 1512 t.Fatalf("err: %v", res) 1513 } 1514 1515 time.Sleep(3 * time.Second) 1516 1517 // Ensure that the container isn't present 1518 _, err := client.InspectContainer(handle.containerID) 1519 if err == nil { 1520 t.Fatalf("expected to not get container") 1521 } 1522 1523 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 1524 t.Fatalf("timeout") 1525 } 1526 } 1527 1528 func TestDockerDriver_Stats(t *testing.T) { 1529 if !tu.IsTravis() { 1530 t.Parallel() 1531 } 1532 if !testutil.DockerIsConnected(t) { 1533 t.Skip("Docker not connected") 1534 } 1535 1536 task := &structs.Task{ 1537 Name: "sleep", 1538 Driver: "docker", 1539 Config: map[string]interface{}{ 1540 "image": "busybox", 1541 "load": "busybox.tar", 1542 "command": "/bin/sleep", 1543 "args": []string{"100"}, 1544 }, 1545 LogConfig: &structs.LogConfig{ 1546 MaxFiles: 10, 1547 MaxFileSizeMB: 10, 1548 }, 1549 Resources: basicResources, 1550 } 1551 1552 _, handle, cleanup := dockerSetup(t, task) 1553 defer cleanup() 1554 1555 waitForExist(t, client, handle) 1556 1557 go func() { 1558 time.Sleep(3 * time.Second) 1559 ru, err := handle.Stats() 1560 if err != nil { 1561 t.Fatalf("err: %v", err) 1562 } 1563 if ru.ResourceUsage == nil { 1564 handle.Kill() 1565 t.Fatalf("expected resource usage") 1566 } 1567 err = handle.Kill() 1568 if err != nil { 1569 t.Fatalf("err: %v", err) 1570 } 1571 }() 1572 1573 select { 1574 case res := <-handle.WaitCh(): 1575 if res.Successful() { 1576 t.Fatalf("should err: %v", res) 1577 } 1578 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 1579 t.Fatalf("timeout") 1580 } 1581 } 1582 1583 func setupDockerVolumes(t *testing.T, cfg *config.Config, hostpath string) (*structs.Task, Driver, *ExecContext, string, func()) { 1584 if !testutil.DockerIsConnected(t) { 1585 t.Skip("Docker not connected") 1586 } 1587 1588 randfn := fmt.Sprintf("test-%d", rand.Int()) 1589 hostfile := filepath.Join(hostpath, randfn) 1590 containerPath := "/mnt/vol" 1591 containerFile := filepath.Join(containerPath, randfn) 1592 1593 task := &structs.Task{ 1594 Name: "ls", 1595 Env: map[string]string{"VOL_PATH": containerPath}, 1596 Driver: "docker", 1597 Config: map[string]interface{}{ 1598 "image": "busybox", 1599 "load": "busybox.tar", 1600 "command": "touch", 1601 "args": []string{containerFile}, 1602 "volumes": []string{fmt.Sprintf("%s:${VOL_PATH}", hostpath)}, 1603 }, 1604 LogConfig: &structs.LogConfig{ 1605 MaxFiles: 10, 1606 MaxFileSizeMB: 10, 1607 }, 1608 Resources: basicResources, 1609 } 1610 1611 // Build alloc and task directory structure 1612 allocDir := allocdir.NewAllocDir(testLogger(), filepath.Join(cfg.AllocDir, uuid.Generate())) 1613 if err := allocDir.Build(); err != nil { 1614 t.Fatalf("failed to build alloc dir: %v", err) 1615 } 1616 taskDir := allocDir.NewTaskDir(task.Name) 1617 if err := taskDir.Build(false, nil, cstructs.FSIsolationImage); err != nil { 1618 allocDir.Destroy() 1619 t.Fatalf("failed to build task dir: %v", err) 1620 } 1621 copyImage(t, taskDir, "busybox.tar") 1622 1623 // Setup driver 1624 alloc := mock.Alloc() 1625 logger := testLogger() 1626 emitter := func(m string, args ...interface{}) { 1627 logger.Printf("[EVENT] "+m, args...) 1628 } 1629 driverCtx := NewDriverContext(task.Name, alloc.ID, cfg, cfg.Node, testLogger(), emitter) 1630 driver := NewDockerDriver(driverCtx) 1631 1632 // Setup execCtx 1633 envBuilder := env.NewBuilder(cfg.Node, alloc, task, cfg.Region) 1634 SetEnvvars(envBuilder, driver.FSIsolation(), taskDir, cfg) 1635 execCtx := NewExecContext(taskDir, envBuilder.Build()) 1636 1637 // Setup cleanup function 1638 cleanup := func() { 1639 allocDir.Destroy() 1640 if filepath.IsAbs(hostpath) { 1641 os.RemoveAll(hostpath) 1642 } 1643 } 1644 return task, driver, execCtx, hostfile, cleanup 1645 } 1646 1647 func TestDockerDriver_VolumesDisabled(t *testing.T) { 1648 if !tu.IsTravis() { 1649 t.Parallel() 1650 } 1651 if !testutil.DockerIsConnected(t) { 1652 t.Skip("Docker not connected") 1653 } 1654 1655 cfg := testConfig(t) 1656 cfg.Options = map[string]string{ 1657 dockerVolumesConfigOption: "false", 1658 "docker.cleanup.image": "false", 1659 } 1660 1661 { 1662 tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesdisabled") 1663 if err != nil { 1664 t.Fatalf("error creating temporary dir: %v", err) 1665 } 1666 1667 task, driver, execCtx, _, cleanup := setupDockerVolumes(t, cfg, tmpvol) 1668 defer cleanup() 1669 1670 _, err = driver.Prestart(execCtx, task) 1671 if err != nil { 1672 t.Fatalf("error in prestart: %v", err) 1673 } 1674 if _, err := driver.Start(execCtx, task); err == nil { 1675 t.Fatalf("Started driver successfully when volumes should have been disabled.") 1676 } 1677 } 1678 1679 // Relative paths should still be allowed 1680 { 1681 task, driver, execCtx, fn, cleanup := setupDockerVolumes(t, cfg, ".") 1682 defer cleanup() 1683 1684 _, err := driver.Prestart(execCtx, task) 1685 if err != nil { 1686 t.Fatalf("error in prestart: %v", err) 1687 } 1688 resp, err := driver.Start(execCtx, task) 1689 if err != nil { 1690 t.Fatalf("err: %v", err) 1691 } 1692 defer resp.Handle.Kill() 1693 1694 select { 1695 case res := <-resp.Handle.WaitCh(): 1696 if !res.Successful() { 1697 t.Fatalf("unexpected err: %v", res) 1698 } 1699 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 1700 t.Fatalf("timeout") 1701 } 1702 1703 if _, err := ioutil.ReadFile(filepath.Join(execCtx.TaskDir.Dir, fn)); err != nil { 1704 t.Fatalf("unexpected error reading %s: %v", fn, err) 1705 } 1706 } 1707 1708 // Volume Drivers should be rejected (error) 1709 { 1710 task, driver, execCtx, _, cleanup := setupDockerVolumes(t, cfg, "fake_flocker_vol") 1711 defer cleanup() 1712 task.Config["volume_driver"] = "flocker" 1713 1714 if _, err := driver.Prestart(execCtx, task); err != nil { 1715 t.Fatalf("error in prestart: %v", err) 1716 } 1717 if _, err := driver.Start(execCtx, task); err == nil { 1718 t.Fatalf("Started driver successfully when volume drivers should have been disabled.") 1719 } 1720 } 1721 1722 } 1723 1724 func TestDockerDriver_VolumesEnabled(t *testing.T) { 1725 if !tu.IsTravis() { 1726 t.Parallel() 1727 } 1728 if !testutil.DockerIsConnected(t) { 1729 t.Skip("Docker not connected") 1730 } 1731 1732 cfg := testConfig(t) 1733 1734 tmpvol, err := ioutil.TempDir("", "nomadtest_docker_volumesenabled") 1735 if err != nil { 1736 t.Fatalf("error creating temporary dir: %v", err) 1737 } 1738 1739 // Evaluate symlinks so it works on MacOS 1740 tmpvol, err = filepath.EvalSymlinks(tmpvol) 1741 if err != nil { 1742 t.Fatalf("error evaluating symlinks: %v", err) 1743 } 1744 1745 task, driver, execCtx, hostpath, cleanup := setupDockerVolumes(t, cfg, tmpvol) 1746 defer cleanup() 1747 1748 _, err = driver.Prestart(execCtx, task) 1749 if err != nil { 1750 t.Fatalf("error in prestart: %v", err) 1751 } 1752 resp, err := driver.Start(execCtx, task) 1753 if err != nil { 1754 t.Fatalf("Failed to start docker driver: %v", err) 1755 } 1756 defer resp.Handle.Kill() 1757 1758 select { 1759 case res := <-resp.Handle.WaitCh(): 1760 if !res.Successful() { 1761 t.Fatalf("unexpected err: %v", res) 1762 } 1763 case <-time.After(time.Duration(tu.TestMultiplier()*10) * time.Second): 1764 t.Fatalf("timeout") 1765 } 1766 1767 if _, err := ioutil.ReadFile(hostpath); err != nil { 1768 t.Fatalf("unexpected error reading %s: %v", hostpath, err) 1769 } 1770 } 1771 1772 func TestDockerDriver_Mounts(t *testing.T) { 1773 if !tu.IsTravis() { 1774 t.Parallel() 1775 } 1776 if !testutil.DockerIsConnected(t) { 1777 t.Skip("Docker not connected") 1778 } 1779 1780 goodMount := map[string]interface{}{ 1781 "target": "/nomad", 1782 "volume_options": []interface{}{ 1783 map[string]interface{}{ 1784 "labels": []interface{}{ 1785 map[string]string{"foo": "bar"}, 1786 }, 1787 "driver_config": []interface{}{ 1788 map[string]interface{}{ 1789 "name": "local", 1790 "options": []interface{}{ 1791 map[string]interface{}{ 1792 "foo": "bar", 1793 }, 1794 }, 1795 }, 1796 }, 1797 }, 1798 }, 1799 "readonly": true, 1800 "source": "test", 1801 } 1802 1803 cases := []struct { 1804 Name string 1805 Mounts []interface{} 1806 Error string 1807 }{ 1808 { 1809 Name: "good-one", 1810 Error: "", 1811 Mounts: []interface{}{goodMount}, 1812 }, 1813 { 1814 Name: "good-many", 1815 Error: "", 1816 Mounts: []interface{}{goodMount, goodMount, goodMount}, 1817 }, 1818 { 1819 Name: "multiple volume options", 1820 Error: "Only one volume_options stanza allowed", 1821 Mounts: []interface{}{ 1822 map[string]interface{}{ 1823 "target": "/nomad", 1824 "volume_options": []interface{}{ 1825 map[string]interface{}{ 1826 "driver_config": []interface{}{ 1827 map[string]interface{}{ 1828 "name": "local", 1829 }, 1830 }, 1831 }, 1832 map[string]interface{}{ 1833 "driver_config": []interface{}{ 1834 map[string]interface{}{ 1835 "name": "local", 1836 }, 1837 }, 1838 }, 1839 }, 1840 }, 1841 }, 1842 }, 1843 { 1844 Name: "multiple driver configs", 1845 Error: "volume driver config may only be specified once", 1846 Mounts: []interface{}{ 1847 map[string]interface{}{ 1848 "target": "/nomad", 1849 "volume_options": []interface{}{ 1850 map[string]interface{}{ 1851 "driver_config": []interface{}{ 1852 map[string]interface{}{ 1853 "name": "local", 1854 }, 1855 map[string]interface{}{ 1856 "name": "local", 1857 }, 1858 }, 1859 }, 1860 }, 1861 }, 1862 }, 1863 }, 1864 { 1865 Name: "multiple volume labels", 1866 Error: "labels may only be", 1867 Mounts: []interface{}{ 1868 map[string]interface{}{ 1869 "target": "/nomad", 1870 "volume_options": []interface{}{ 1871 map[string]interface{}{ 1872 "labels": []interface{}{ 1873 map[string]string{"foo": "bar"}, 1874 map[string]string{"baz": "bam"}, 1875 }, 1876 }, 1877 }, 1878 }, 1879 }, 1880 }, 1881 { 1882 Name: "multiple driver options", 1883 Error: "driver options may only", 1884 Mounts: []interface{}{ 1885 map[string]interface{}{ 1886 "target": "/nomad", 1887 "volume_options": []interface{}{ 1888 map[string]interface{}{ 1889 "driver_config": []interface{}{ 1890 map[string]interface{}{ 1891 "name": "local", 1892 "options": []interface{}{ 1893 map[string]interface{}{ 1894 "foo": "bar", 1895 }, 1896 map[string]interface{}{ 1897 "bam": "bar", 1898 }, 1899 }, 1900 }, 1901 }, 1902 }, 1903 }, 1904 }, 1905 }, 1906 }, 1907 } 1908 1909 task := &structs.Task{ 1910 Name: "redis-demo", 1911 Driver: "docker", 1912 Config: map[string]interface{}{ 1913 "image": "busybox", 1914 "load": "busybox.tar", 1915 "command": "/bin/sleep", 1916 "args": []string{"10000"}, 1917 }, 1918 Resources: &structs.Resources{ 1919 MemoryMB: 256, 1920 CPU: 512, 1921 }, 1922 LogConfig: &structs.LogConfig{ 1923 MaxFiles: 10, 1924 MaxFileSizeMB: 10, 1925 }, 1926 } 1927 1928 for _, c := range cases { 1929 t.Run(c.Name, func(t *testing.T) { 1930 // Build the task 1931 task.Config["mounts"] = c.Mounts 1932 1933 ctx := testDockerDriverContexts(t, task) 1934 driver := NewDockerDriver(ctx.DriverCtx) 1935 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 1936 defer ctx.AllocDir.Destroy() 1937 1938 _, err := driver.Prestart(ctx.ExecCtx, task) 1939 if err == nil && c.Error != "" { 1940 t.Fatalf("expected error: %v", c.Error) 1941 } else if err != nil { 1942 if c.Error == "" { 1943 t.Fatalf("unexpected error in prestart: %v", err) 1944 } else if !strings.Contains(err.Error(), c.Error) { 1945 t.Fatalf("expected error %q; got %v", c.Error, err) 1946 } 1947 } 1948 }) 1949 } 1950 } 1951 1952 // TestDockerDriver_Cleanup ensures Cleanup removes only downloaded images. 1953 func TestDockerDriver_Cleanup(t *testing.T) { 1954 if !tu.IsTravis() { 1955 t.Parallel() 1956 } 1957 if !testutil.DockerIsConnected(t) { 1958 t.Skip("Docker not connected") 1959 } 1960 1961 imageName := "hello-world:latest" 1962 task := &structs.Task{ 1963 Name: "cleanup_test", 1964 Driver: "docker", 1965 Config: map[string]interface{}{ 1966 "image": imageName, 1967 }, 1968 } 1969 tctx := testDockerDriverContexts(t, task) 1970 defer tctx.AllocDir.Destroy() 1971 1972 // Run Prestart 1973 driver := NewDockerDriver(tctx.DriverCtx).(*DockerDriver) 1974 resp, err := driver.Prestart(tctx.ExecCtx, task) 1975 if err != nil { 1976 t.Fatalf("error in prestart: %v", err) 1977 } 1978 res := resp.CreatedResources 1979 if len(res.Resources) == 0 || len(res.Resources[dockerImageResKey]) == 0 { 1980 t.Fatalf("no created resources: %#v", res) 1981 } 1982 1983 // Cleanup 1984 rescopy := res.Copy() 1985 if err := driver.Cleanup(tctx.ExecCtx, rescopy); err != nil { 1986 t.Fatalf("Cleanup failed: %v", err) 1987 } 1988 1989 // Make sure rescopy is updated 1990 if len(rescopy.Resources) > 0 { 1991 t.Errorf("Cleanup should have cleared resource map: %#v", rescopy.Resources) 1992 } 1993 1994 // Ensure image was removed 1995 tu.WaitForResult(func() (bool, error) { 1996 if _, err := client.InspectImage(driver.driverConfig.ImageName); err == nil { 1997 return false, fmt.Errorf("image exists but should have been removed. Does another %v container exist?", imageName) 1998 } 1999 2000 return true, nil 2001 }, func(err error) { 2002 t.Fatalf("err: %v", err) 2003 }) 2004 2005 // The image doesn't exist which shouldn't be an error when calling 2006 // Cleanup, so call it again to make sure. 2007 if err := driver.Cleanup(tctx.ExecCtx, res.Copy()); err != nil { 2008 t.Fatalf("Cleanup failed: %v", err) 2009 } 2010 } 2011 2012 func copyImage(t *testing.T, taskDir *allocdir.TaskDir, image string) { 2013 dst := filepath.Join(taskDir.LocalDir, image) 2014 copyFile(filepath.Join("./test-resources/docker", image), dst, t) 2015 } 2016 2017 func TestDockerDriver_AuthConfiguration(t *testing.T) { 2018 if !tu.IsTravis() { 2019 t.Parallel() 2020 } 2021 if !testutil.DockerIsConnected(t) { 2022 t.Skip("Docker not connected") 2023 } 2024 2025 path := "./test-resources/docker/auth.json" 2026 cases := []struct { 2027 Repo string 2028 AuthConfig *docker.AuthConfiguration 2029 }{ 2030 { 2031 Repo: "lolwhat.com/what:1337", 2032 AuthConfig: nil, 2033 }, 2034 { 2035 Repo: "redis:3.2", 2036 AuthConfig: &docker.AuthConfiguration{ 2037 Username: "test", 2038 Password: "1234", 2039 Email: "", 2040 ServerAddress: "https://index.docker.io/v1/", 2041 }, 2042 }, 2043 { 2044 Repo: "quay.io/redis:3.2", 2045 AuthConfig: &docker.AuthConfiguration{ 2046 Username: "test", 2047 Password: "5678", 2048 Email: "", 2049 ServerAddress: "quay.io", 2050 }, 2051 }, 2052 { 2053 Repo: "other.io/redis:3.2", 2054 AuthConfig: &docker.AuthConfiguration{ 2055 Username: "test", 2056 Password: "abcd", 2057 Email: "", 2058 ServerAddress: "https://other.io/v1/", 2059 }, 2060 }, 2061 } 2062 2063 for i, c := range cases { 2064 act, err := authFromDockerConfig(path)(c.Repo) 2065 if err != nil { 2066 t.Fatalf("Test %d failed: %v", i+1, err) 2067 } 2068 2069 if !reflect.DeepEqual(act, c.AuthConfig) { 2070 t.Fatalf("Test %d failed: Unexpected auth config: got %+v; want %+v", i+1, act, c.AuthConfig) 2071 } 2072 } 2073 } 2074 2075 func TestDockerDriver_OOMKilled(t *testing.T) { 2076 if !tu.IsTravis() { 2077 t.Parallel() 2078 } 2079 if !testutil.DockerIsConnected(t) { 2080 t.Skip("Docker not connected") 2081 } 2082 2083 task := &structs.Task{ 2084 Name: "oom-killed", 2085 Driver: "docker", 2086 Config: map[string]interface{}{ 2087 "image": "busybox", 2088 "load": "busybox.tar", 2089 "command": "sh", 2090 // Incrementally creates a bigger and bigger variable. 2091 "args": []string{"-c", "x=a; while true; do eval x='$x$x'; done"}, 2092 }, 2093 LogConfig: &structs.LogConfig{ 2094 MaxFiles: 10, 2095 MaxFileSizeMB: 10, 2096 }, 2097 Resources: &structs.Resources{ 2098 CPU: 250, 2099 MemoryMB: 10, 2100 DiskMB: 20, 2101 Networks: []*structs.NetworkResource{}, 2102 }, 2103 } 2104 2105 _, handle, cleanup := dockerSetup(t, task) 2106 defer cleanup() 2107 2108 select { 2109 case res := <-handle.WaitCh(): 2110 if res.Successful() { 2111 t.Fatalf("expected error, but container exited successful") 2112 } 2113 2114 if res.Err.Error() != "OOM Killed" { 2115 t.Fatalf("not killed by OOM killer: %s", res.Err) 2116 } 2117 2118 t.Logf("Successfully killed by OOM killer") 2119 2120 case <-time.After(time.Duration(tu.TestMultiplier()*5) * time.Second): 2121 t.Fatalf("timeout") 2122 } 2123 } 2124 2125 func TestDockerDriver_Devices_IsInvalidConfig(t *testing.T) { 2126 if !tu.IsTravis() { 2127 t.Parallel() 2128 } 2129 if !testutil.DockerIsConnected(t) { 2130 t.Skip("Docker not connected") 2131 } 2132 2133 brokenConfigs := []interface{}{ 2134 map[string]interface{}{ 2135 "host_path": "", 2136 }, 2137 map[string]interface{}{ 2138 "host_path": "/dev/sda1", 2139 "cgroup_permissions": "rxb", 2140 }, 2141 } 2142 2143 test_cases := []struct { 2144 deviceConfig interface{} 2145 err error 2146 }{ 2147 {[]interface{}{brokenConfigs[0]}, fmt.Errorf("host path must be set in configuration for devices")}, 2148 {[]interface{}{brokenConfigs[1]}, fmt.Errorf("invalid cgroup permission string: \"rxb\"")}, 2149 } 2150 2151 for _, tc := range test_cases { 2152 task, _, _ := dockerTask(t) 2153 task.Config["devices"] = tc.deviceConfig 2154 2155 ctx := testDockerDriverContexts(t, task) 2156 driver := NewDockerDriver(ctx.DriverCtx) 2157 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 2158 defer ctx.AllocDir.Destroy() 2159 2160 if _, err := driver.Prestart(ctx.ExecCtx, task); err == nil || err.Error() != tc.err.Error() { 2161 t.Fatalf("error expected in prestart, got %v, expected %v", err, tc.err) 2162 } 2163 } 2164 } 2165 2166 func TestDockerDriver_Device_Success(t *testing.T) { 2167 if !tu.IsTravis() { 2168 t.Parallel() 2169 } 2170 if !testutil.DockerIsConnected(t) { 2171 t.Skip("Docker not connected") 2172 } 2173 2174 if runtime.GOOS != "linux" { 2175 t.Skip("test device mounts only on linux") 2176 } 2177 2178 hostPath := "/dev/random" 2179 containerPath := "/dev/myrandom" 2180 perms := "rwm" 2181 2182 expectedDevice := docker.Device{ 2183 PathOnHost: hostPath, 2184 PathInContainer: containerPath, 2185 CgroupPermissions: perms, 2186 } 2187 config := map[string]interface{}{ 2188 "host_path": hostPath, 2189 "container_path": containerPath, 2190 } 2191 2192 task, _, _ := dockerTask(t) 2193 task.Config["devices"] = []interface{}{config} 2194 2195 client, handle, cleanup := dockerSetup(t, task) 2196 defer cleanup() 2197 2198 waitForExist(t, client, handle) 2199 2200 container, err := client.InspectContainer(handle.ContainerID()) 2201 if err != nil { 2202 t.Fatalf("err: %v", err) 2203 } 2204 2205 assert.NotEmpty(t, container.HostConfig.Devices, "Expected one device") 2206 assert.Equal(t, expectedDevice, container.HostConfig.Devices[0], "Incorrect device ") 2207 } 2208 2209 func TestDockerDriver_Entrypoint(t *testing.T) { 2210 if !tu.IsTravis() { 2211 t.Parallel() 2212 } 2213 if !testutil.DockerIsConnected(t) { 2214 t.Skip("Docker not connected") 2215 } 2216 2217 entrypoint := []string{"/bin/sh", "-c"} 2218 task, _, _ := dockerTask(t) 2219 task.Config["entrypoint"] = entrypoint 2220 2221 client, handle, cleanup := dockerSetup(t, task) 2222 defer cleanup() 2223 2224 waitForExist(t, client, handle) 2225 2226 container, err := client.InspectContainer(handle.ContainerID()) 2227 if err != nil { 2228 t.Fatalf("err: %v", err) 2229 } 2230 2231 require.Len(t, container.Config.Entrypoint, 2, "Expected one entrypoint") 2232 require.Equal(t, entrypoint, container.Config.Entrypoint, "Incorrect entrypoint ") 2233 } 2234 2235 func TestDockerDriver_Kill(t *testing.T) { 2236 assert := assert.New(t) 2237 if !tu.IsTravis() { 2238 t.Parallel() 2239 } 2240 if !testutil.DockerIsConnected(t) { 2241 t.Skip("Docker not connected") 2242 } 2243 2244 // Tasks started with a signal that is not supported should not error 2245 task := &structs.Task{ 2246 Name: "nc-demo", 2247 Driver: "docker", 2248 KillSignal: "SIGKILL", 2249 Config: map[string]interface{}{ 2250 "load": "busybox.tar", 2251 "image": "busybox", 2252 "command": "/bin/nc", 2253 "args": []string{"-l", "127.0.0.1", "-p", "0"}, 2254 }, 2255 LogConfig: &structs.LogConfig{ 2256 MaxFiles: 10, 2257 MaxFileSizeMB: 10, 2258 }, 2259 Resources: basicResources, 2260 } 2261 2262 ctx := testDockerDriverContexts(t, task) 2263 defer ctx.AllocDir.Destroy() 2264 d := NewDockerDriver(ctx.DriverCtx) 2265 copyImage(t, ctx.ExecCtx.TaskDir, "busybox.tar") 2266 2267 _, err := d.Prestart(ctx.ExecCtx, task) 2268 if err != nil { 2269 t.Fatalf("error in prestart: %v", err) 2270 } 2271 2272 resp, err := d.Start(ctx.ExecCtx, task) 2273 assert.Nil(err) 2274 assert.NotNil(resp.Handle) 2275 2276 handle := resp.Handle.(*DockerHandle) 2277 waitForExist(t, client, handle) 2278 err = handle.Kill() 2279 assert.Nil(err) 2280 } 2281 2282 func TestDockerDriver_ReadonlyRootfs(t *testing.T) { 2283 if !tu.IsTravis() { 2284 t.Parallel() 2285 } 2286 if !testutil.DockerIsConnected(t) { 2287 t.Skip("Docker not connected") 2288 } 2289 2290 task, _, _ := dockerTask(t) 2291 task.Config["readonly_rootfs"] = true 2292 2293 client, handle, cleanup := dockerSetup(t, task) 2294 defer cleanup() 2295 2296 waitForExist(t, client, handle) 2297 2298 container, err := client.InspectContainer(handle.ContainerID()) 2299 assert.Nil(t, err, "Error inspecting container: %v", err) 2300 2301 assert.True(t, container.HostConfig.ReadonlyRootfs, "ReadonlyRootfs option not set") 2302 } 2303 2304 func TestDockerDriver_AdvertiseIPv6Address(t *testing.T) { 2305 if !tu.IsTravis() { 2306 t.Parallel() 2307 } 2308 if !testutil.DockerIsConnected(t) { 2309 t.Skip("Docker not connected") 2310 } 2311 2312 expectedPrefix := "2001:db8:1::242:ac11" 2313 expectedAdvertise := true 2314 task := &structs.Task{ 2315 Name: "nc-demo", 2316 Driver: "docker", 2317 Config: map[string]interface{}{ 2318 "image": "busybox", 2319 "load": "busybox.tar", 2320 "command": "/bin/nc", 2321 "args": []string{"-l", "127.0.0.1", "-p", "0"}, 2322 "advertise_ipv6_address": expectedAdvertise, 2323 }, 2324 Resources: &structs.Resources{ 2325 MemoryMB: 256, 2326 CPU: 512, 2327 }, 2328 LogConfig: &structs.LogConfig{ 2329 MaxFiles: 10, 2330 MaxFileSizeMB: 10, 2331 }, 2332 } 2333 2334 client := newTestDockerClient(t) 2335 2336 // Make sure IPv6 is enabled 2337 net, err := client.NetworkInfo("bridge") 2338 if err != nil { 2339 t.Skip("error retrieving bridge network information, skipping") 2340 } 2341 if net == nil || !net.EnableIPv6 { 2342 t.Skip("IPv6 not enabled on bridge network, skipping") 2343 } 2344 2345 tctx := testDockerDriverContexts(t, task) 2346 driver := NewDockerDriver(tctx.DriverCtx) 2347 copyImage(t, tctx.ExecCtx.TaskDir, "busybox.tar") 2348 defer tctx.AllocDir.Destroy() 2349 2350 presp, err := driver.Prestart(tctx.ExecCtx, task) 2351 defer driver.Cleanup(tctx.ExecCtx, presp.CreatedResources) 2352 if err != nil { 2353 t.Fatalf("Error in prestart: %v", err) 2354 } 2355 2356 sresp, err := driver.Start(tctx.ExecCtx, task) 2357 if err != nil { 2358 t.Fatalf("Error in start: %v", err) 2359 } 2360 2361 if sresp.Handle == nil { 2362 t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) 2363 } 2364 2365 assert.Equal(t, expectedAdvertise, sresp.Network.AutoAdvertise, "Wrong autoadvertise. Expect: %s, got: %s", expectedAdvertise, sresp.Network.AutoAdvertise) 2366 2367 if !strings.HasPrefix(sresp.Network.IP, expectedPrefix) { 2368 t.Fatalf("Got IP address %q want ip address with prefix %q", sresp.Network.IP, expectedPrefix) 2369 } 2370 2371 defer sresp.Handle.Kill() 2372 handle := sresp.Handle.(*DockerHandle) 2373 2374 waitForExist(t, client, handle) 2375 2376 container, err := client.InspectContainer(handle.ContainerID()) 2377 if err != nil { 2378 t.Fatalf("Error inspecting container: %v", err) 2379 } 2380 2381 if !strings.HasPrefix(container.NetworkSettings.GlobalIPv6Address, expectedPrefix) { 2382 t.Fatalf("Got GlobalIPv6address %s want GlobalIPv6address with prefix %s", expectedPrefix, container.NetworkSettings.GlobalIPv6Address) 2383 } 2384 }