github.com/ryanslade/nomad@v0.2.4-0.20160128061903-fc95782f2089/client/driver/docker_test.go (about) 1 package driver 2 3 import ( 4 "fmt" 5 "io/ioutil" 6 "math/rand" 7 "path/filepath" 8 "reflect" 9 "runtime/debug" 10 "testing" 11 "time" 12 13 docker "github.com/fsouza/go-dockerclient" 14 "github.com/hashicorp/nomad/client/config" 15 "github.com/hashicorp/nomad/client/driver/env" 16 cstructs "github.com/hashicorp/nomad/client/driver/structs" 17 "github.com/hashicorp/nomad/nomad/structs" 18 "github.com/hashicorp/nomad/testutil" 19 ) 20 21 // dockerIsConnected checks to see if a docker daemon is available (local or remote) 22 func dockerIsConnected(t *testing.T) bool { 23 client, err := docker.NewClientFromEnv() 24 if err != nil { 25 return false 26 } 27 28 // Creating a client doesn't actually connect, so make sure we do something 29 // like call Version() on it. 30 env, err := client.Version() 31 if err != nil { 32 t.Logf("Failed to connect to docker daemon: %s", err) 33 return false 34 } 35 36 t.Logf("Successfully connected to docker daemon running version %s", env.Get("Version")) 37 return true 38 } 39 40 func dockerIsRemote(t *testing.T) bool { 41 client, err := docker.NewClientFromEnv() 42 if err != nil { 43 return false 44 } 45 46 // Technically this could be a local tcp socket but for testing purposes 47 // we'll just assume that tcp is only used for remote connections. 48 if client.Endpoint()[0:3] == "tcp" { 49 return true 50 } 51 return false 52 } 53 54 // Ports used by tests 55 var ( 56 docker_reserved = 32768 + int(rand.Int31n(25000)) 57 docker_dynamic = 32768 + int(rand.Int31n(25000)) 58 ) 59 60 // Returns a task with a reserved and dynamic port. The ports are returned 61 // respectively. 62 func dockerTask() (*structs.Task, int, int) { 63 docker_reserved += 1 64 docker_dynamic += 1 65 return &structs.Task{ 66 Name: "redis-demo", 67 Config: map[string]interface{}{ 68 "image": "redis", 69 }, 70 Resources: &structs.Resources{ 71 MemoryMB: 256, 72 CPU: 512, 73 Networks: []*structs.NetworkResource{ 74 &structs.NetworkResource{ 75 IP: "127.0.0.1", 76 ReservedPorts: []structs.Port{{"main", docker_reserved}}, 77 DynamicPorts: []structs.Port{{"REDIS", docker_dynamic}}, 78 }, 79 }, 80 }, 81 }, docker_reserved, docker_dynamic 82 } 83 84 // dockerSetup does all of the basic setup you need to get a running docker 85 // process up and running for testing. Use like: 86 // 87 // task := taskTemplate() 88 // // do custom task configuration 89 // client, handle, cleanup := dockerSetup(t, task) 90 // defer cleanup() 91 // // do test stuff 92 // 93 // If there is a problem during setup this function will abort or skip the test 94 // and indicate the reason. 95 func dockerSetup(t *testing.T, task *structs.Task) (*docker.Client, DriverHandle, func()) { 96 if !dockerIsConnected(t) { 97 t.SkipNow() 98 } 99 100 client, err := docker.NewClientFromEnv() 101 if err != nil { 102 t.Fatalf("Failed to initialize client: %s\nStack\n%s", err, debug.Stack()) 103 } 104 105 driverCtx, execCtx := testDriverContexts(task) 106 driver := NewDockerDriver(driverCtx) 107 108 handle, err := driver.Start(execCtx, task) 109 if err != nil { 110 execCtx.AllocDir.Destroy() 111 t.Fatalf("Failed to start driver: %s\nStack\n%s", err, debug.Stack()) 112 } 113 if handle == nil { 114 execCtx.AllocDir.Destroy() 115 t.Fatalf("handle is nil\nStack\n%s", debug.Stack()) 116 } 117 118 cleanup := func() { 119 handle.Kill() 120 execCtx.AllocDir.Destroy() 121 } 122 123 return client, handle, cleanup 124 } 125 126 func TestDockerDriver_Handle(t *testing.T) { 127 t.Parallel() 128 h := &DockerHandle{ 129 imageID: "imageid", 130 containerID: "containerid", 131 killTimeout: 5 * time.Nanosecond, 132 doneCh: make(chan struct{}), 133 waitCh: make(chan *cstructs.WaitResult, 1), 134 } 135 136 actual := h.ID() 137 expected := `DOCKER:{"ImageID":"imageid","ContainerID":"containerid","KillTimeout":5}` 138 if actual != expected { 139 t.Errorf("Expected `%s`, found `%s`", expected, actual) 140 } 141 } 142 143 // This test should always pass, even if docker daemon is not available 144 func TestDockerDriver_Fingerprint(t *testing.T) { 145 t.Parallel() 146 driverCtx, _ := testDriverContexts(&structs.Task{Name: "foo"}) 147 d := NewDockerDriver(driverCtx) 148 node := &structs.Node{ 149 Attributes: make(map[string]string), 150 } 151 apply, err := d.Fingerprint(&config.Config{}, node) 152 if err != nil { 153 t.Fatalf("err: %v", err) 154 } 155 if apply != dockerIsConnected(t) { 156 t.Fatalf("Fingerprinter should detect when docker is available") 157 } 158 if node.Attributes["driver.docker"] != "1" { 159 t.Log("Docker daemon not available. The remainder of the docker tests will be skipped.") 160 } 161 t.Logf("Found docker version %s", node.Attributes["driver.docker.version"]) 162 } 163 164 func TestDockerDriver_StartOpen_Wait(t *testing.T) { 165 t.Parallel() 166 if !dockerIsConnected(t) { 167 t.SkipNow() 168 } 169 170 task := &structs.Task{ 171 Name: "redis-demo", 172 Config: map[string]interface{}{ 173 "image": "redis", 174 }, 175 Resources: basicResources, 176 } 177 178 driverCtx, execCtx := testDriverContexts(task) 179 defer execCtx.AllocDir.Destroy() 180 d := NewDockerDriver(driverCtx) 181 182 handle, err := d.Start(execCtx, task) 183 if err != nil { 184 t.Fatalf("err: %v", err) 185 } 186 if handle == nil { 187 t.Fatalf("missing handle") 188 } 189 defer handle.Kill() 190 191 // Attempt to open 192 handle2, err := d.Open(execCtx, handle.ID()) 193 if err != nil { 194 t.Fatalf("err: %v", err) 195 } 196 if handle2 == nil { 197 t.Fatalf("missing handle") 198 } 199 } 200 201 func TestDockerDriver_Start_Wait(t *testing.T) { 202 t.Parallel() 203 task := &structs.Task{ 204 Name: "redis-demo", 205 Config: map[string]interface{}{ 206 "image": "redis", 207 "command": "redis-server", 208 "args": []string{"-v"}, 209 }, 210 Resources: &structs.Resources{ 211 MemoryMB: 256, 212 CPU: 512, 213 }, 214 } 215 216 _, handle, cleanup := dockerSetup(t, task) 217 defer cleanup() 218 219 // Update should be a no-op 220 err := handle.Update(task) 221 if err != nil { 222 t.Fatalf("err: %v", err) 223 } 224 225 select { 226 case res := <-handle.WaitCh(): 227 if !res.Successful() { 228 t.Fatalf("err: %v", res) 229 } 230 case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second): 231 t.Fatalf("timeout") 232 } 233 } 234 235 func TestDockerDriver_Start_Wait_AllocDir(t *testing.T) { 236 t.Parallel() 237 // This test requires that the alloc dir be mounted into docker as a volume. 238 // Because this cannot happen when docker is run remotely, e.g. when running 239 // docker in a VM, we skip this when we detect Docker is being run remotely. 240 if !dockerIsConnected(t) || dockerIsRemote(t) { 241 t.SkipNow() 242 } 243 244 exp := []byte{'w', 'i', 'n'} 245 file := "output.txt" 246 task := &structs.Task{ 247 Name: "redis-demo", 248 Config: map[string]interface{}{ 249 "image": "redis", 250 "command": "/bin/bash", 251 "args": []string{ 252 "-c", 253 fmt.Sprintf(`sleep 1; echo -n %s > $%s/%s`, 254 string(exp), env.AllocDir, file), 255 }, 256 }, 257 Resources: &structs.Resources{ 258 MemoryMB: 256, 259 CPU: 512, 260 }, 261 } 262 263 driverCtx, execCtx := testDriverContexts(task) 264 defer execCtx.AllocDir.Destroy() 265 d := NewDockerDriver(driverCtx) 266 267 handle, err := d.Start(execCtx, task) 268 if err != nil { 269 t.Fatalf("err: %v", err) 270 } 271 if handle == nil { 272 t.Fatalf("missing handle") 273 } 274 defer handle.Kill() 275 276 select { 277 case res := <-handle.WaitCh(): 278 if !res.Successful() { 279 t.Fatalf("err: %v", res) 280 } 281 case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second): 282 t.Fatalf("timeout") 283 } 284 285 // Check that data was written to the shared alloc directory. 286 outputFile := filepath.Join(execCtx.AllocDir.SharedDir, file) 287 act, err := ioutil.ReadFile(outputFile) 288 if err != nil { 289 t.Fatalf("Couldn't read expected output: %v", err) 290 } 291 292 if !reflect.DeepEqual(act, exp) { 293 t.Fatalf("Command outputted %v; want %v", act, exp) 294 } 295 } 296 297 func TestDockerDriver_Start_Kill_Wait(t *testing.T) { 298 t.Parallel() 299 task := &structs.Task{ 300 Name: "redis-demo", 301 Config: map[string]interface{}{ 302 "image": "redis", 303 "command": "/bin/sleep", 304 "args": []string{"10"}, 305 }, 306 Resources: basicResources, 307 } 308 309 _, handle, cleanup := dockerSetup(t, task) 310 defer cleanup() 311 312 go func() { 313 time.Sleep(100 * time.Millisecond) 314 err := handle.Kill() 315 if err != nil { 316 t.Fatalf("err: %v", err) 317 } 318 }() 319 320 select { 321 case res := <-handle.WaitCh(): 322 if res.Successful() { 323 t.Fatalf("should err: %v", res) 324 } 325 case <-time.After(time.Duration(testutil.TestMultiplier()*10) * time.Second): 326 t.Fatalf("timeout") 327 } 328 } 329 330 func TestDocker_StartN(t *testing.T) { 331 t.Parallel() 332 if !dockerIsConnected(t) { 333 t.SkipNow() 334 } 335 336 task1, _, _ := dockerTask() 337 task2, _, _ := dockerTask() 338 task3, _, _ := dockerTask() 339 taskList := []*structs.Task{task1, task2, task3} 340 341 handles := make([]DriverHandle, len(taskList)) 342 343 t.Logf("==> Starting %d tasks", len(taskList)) 344 345 // Let's spin up a bunch of things 346 var err error 347 for idx, task := range taskList { 348 driverCtx, execCtx := testDriverContexts(task) 349 defer execCtx.AllocDir.Destroy() 350 d := NewDockerDriver(driverCtx) 351 352 handles[idx], err = d.Start(execCtx, task) 353 if err != nil { 354 t.Errorf("Failed starting task #%d: %s", idx+1, err) 355 } 356 } 357 358 t.Log("==> All tasks are started. Terminating...") 359 360 for idx, handle := range handles { 361 if handle == nil { 362 t.Errorf("Bad handle for task #%d", idx+1) 363 continue 364 } 365 366 err := handle.Kill() 367 if err != nil { 368 t.Errorf("Failed stopping task #%d: %s", idx+1, err) 369 } 370 } 371 372 t.Log("==> Test complete!") 373 } 374 375 func TestDocker_StartNVersions(t *testing.T) { 376 t.Parallel() 377 if !dockerIsConnected(t) { 378 t.SkipNow() 379 } 380 381 task1, _, _ := dockerTask() 382 task1.Config["image"] = "redis" 383 384 task2, _, _ := dockerTask() 385 task2.Config["image"] = "redis:latest" 386 387 task3, _, _ := dockerTask() 388 task3.Config["image"] = "redis:3.0" 389 390 taskList := []*structs.Task{task1, task2, task3} 391 392 handles := make([]DriverHandle, len(taskList)) 393 394 t.Logf("==> Starting %d tasks", len(taskList)) 395 396 // Let's spin up a bunch of things 397 var err error 398 for idx, task := range taskList { 399 driverCtx, execCtx := testDriverContexts(task) 400 defer execCtx.AllocDir.Destroy() 401 d := NewDockerDriver(driverCtx) 402 403 handles[idx], err = d.Start(execCtx, task) 404 if err != nil { 405 t.Errorf("Failed starting task #%d: %s", idx+1, err) 406 } 407 } 408 409 t.Log("==> All tasks are started. Terminating...") 410 411 for idx, handle := range handles { 412 if handle == nil { 413 t.Errorf("Bad handle for task #%d", idx+1) 414 continue 415 } 416 417 err := handle.Kill() 418 if err != nil { 419 t.Errorf("Failed stopping task #%d: %s", idx+1, err) 420 } 421 } 422 423 t.Log("==> Test complete!") 424 } 425 426 func TestDockerHostNet(t *testing.T) { 427 t.Parallel() 428 expected := "host" 429 430 task := &structs.Task{ 431 Name: "redis-demo", 432 Config: map[string]interface{}{ 433 "image": "redis", 434 "network_mode": expected, 435 }, 436 Resources: &structs.Resources{ 437 MemoryMB: 256, 438 CPU: 512, 439 }, 440 } 441 442 client, handle, cleanup := dockerSetup(t, task) 443 defer cleanup() 444 445 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 446 if err != nil { 447 t.Fatalf("err: %v", err) 448 } 449 450 actual := container.HostConfig.NetworkMode 451 if actual != expected { 452 t.Errorf("DNS Network mode doesn't match.\nExpected:\n%s\nGot:\n%s\n", expected, actual) 453 } 454 } 455 456 func TestDockerLabels(t *testing.T) { 457 t.Parallel() 458 task, _, _ := dockerTask() 459 task.Config["labels"] = []map[string]string{ 460 map[string]string{ 461 "label1": "value1", 462 "label2": "value2", 463 }, 464 } 465 466 client, handle, cleanup := dockerSetup(t, task) 467 defer cleanup() 468 469 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 470 if err != nil { 471 t.Fatalf("err: %v", err) 472 } 473 474 if want, got := 2, len(container.Config.Labels); want != got { 475 t.Errorf("Wrong labels count for docker job. Expect: %d, got: %d", want, got) 476 } 477 478 if want, got := "value1", container.Config.Labels["label1"]; want != got { 479 t.Errorf("Wrong label value docker job. Expect: %s, got: %s", want, got) 480 } 481 } 482 483 func TestDockerDNS(t *testing.T) { 484 t.Parallel() 485 task, _, _ := dockerTask() 486 task.Config["dns_servers"] = []string{"8.8.8.8", "8.8.4.4"} 487 task.Config["dns_search_domains"] = []string{"example.com", "example.org", "example.net"} 488 489 client, handle, cleanup := dockerSetup(t, task) 490 defer cleanup() 491 492 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 493 if err != nil { 494 t.Fatalf("err: %v", err) 495 } 496 497 if !reflect.DeepEqual(task.Config["dns_servers"], container.HostConfig.DNS) { 498 t.Errorf("DNS Servers don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_servers"], container.HostConfig.DNS) 499 } 500 501 if !reflect.DeepEqual(task.Config["dns_search_domains"], container.HostConfig.DNSSearch) { 502 t.Errorf("DNS Servers don't match.\nExpected:\n%s\nGot:\n%s\n", task.Config["dns_search_domains"], container.HostConfig.DNSSearch) 503 } 504 } 505 506 func inSlice(needle string, haystack []string) bool { 507 for _, h := range haystack { 508 if h == needle { 509 return true 510 } 511 } 512 return false 513 } 514 515 func TestDockerPortsNoMap(t *testing.T) { 516 t.Parallel() 517 task, res, dyn := dockerTask() 518 519 client, handle, cleanup := dockerSetup(t, task) 520 defer cleanup() 521 522 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 523 if err != nil { 524 t.Fatalf("err: %v", err) 525 } 526 527 // Verify that the correct ports are EXPOSED 528 expectedExposedPorts := map[docker.Port]struct{}{ 529 docker.Port(fmt.Sprintf("%d/tcp", res)): struct{}{}, 530 docker.Port(fmt.Sprintf("%d/udp", res)): struct{}{}, 531 docker.Port(fmt.Sprintf("%d/tcp", dyn)): struct{}{}, 532 docker.Port(fmt.Sprintf("%d/udp", dyn)): struct{}{}, 533 // This one comes from the redis container 534 docker.Port("6379/tcp"): struct{}{}, 535 } 536 537 if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { 538 t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts) 539 } 540 541 // Verify that the correct ports are FORWARDED 542 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 543 docker.Port(fmt.Sprintf("%d/tcp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 544 docker.Port(fmt.Sprintf("%d/udp", res)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 545 docker.Port(fmt.Sprintf("%d/tcp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 546 docker.Port(fmt.Sprintf("%d/udp", dyn)): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 547 } 548 549 if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { 550 t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings) 551 } 552 553 expectedEnvironment := map[string]string{ 554 "NOMAD_PORT_main": fmt.Sprintf("%d", res), 555 "NOMAD_PORT_REDIS": fmt.Sprintf("%d", dyn), 556 } 557 558 for key, val := range expectedEnvironment { 559 search := fmt.Sprintf("%s=%s", key, val) 560 if !inSlice(search, container.Config.Env) { 561 t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env) 562 } 563 } 564 } 565 566 func TestDockerPortsMapping(t *testing.T) { 567 t.Parallel() 568 task, res, dyn := dockerTask() 569 task.Config["port_map"] = []map[string]string{ 570 map[string]string{ 571 "main": "8080", 572 "REDIS": "6379", 573 }, 574 } 575 576 client, handle, cleanup := dockerSetup(t, task) 577 defer cleanup() 578 579 container, err := client.InspectContainer(handle.(*DockerHandle).ContainerID()) 580 if err != nil { 581 t.Fatalf("err: %v", err) 582 } 583 584 // Verify that the correct ports are EXPOSED 585 expectedExposedPorts := map[docker.Port]struct{}{ 586 docker.Port("8080/tcp"): struct{}{}, 587 docker.Port("8080/udp"): struct{}{}, 588 docker.Port("6379/tcp"): struct{}{}, 589 docker.Port("6379/udp"): struct{}{}, 590 } 591 592 if !reflect.DeepEqual(container.Config.ExposedPorts, expectedExposedPorts) { 593 t.Errorf("Exposed ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedExposedPorts, container.Config.ExposedPorts) 594 } 595 596 // Verify that the correct ports are FORWARDED 597 expectedPortBindings := map[docker.Port][]docker.PortBinding{ 598 docker.Port("8080/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 599 docker.Port("8080/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", res)}}, 600 docker.Port("6379/tcp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 601 docker.Port("6379/udp"): []docker.PortBinding{docker.PortBinding{HostIP: "127.0.0.1", HostPort: fmt.Sprintf("%d", dyn)}}, 602 } 603 604 if !reflect.DeepEqual(container.HostConfig.PortBindings, expectedPortBindings) { 605 t.Errorf("Forwarded ports don't match.\nExpected:\n%s\nGot:\n%s\n", expectedPortBindings, container.HostConfig.PortBindings) 606 } 607 608 expectedEnvironment := map[string]string{ 609 "NOMAD_PORT_main": "8080", 610 "NOMAD_PORT_REDIS": "6379", 611 } 612 613 for key, val := range expectedEnvironment { 614 search := fmt.Sprintf("%s=%s", key, val) 615 if !inSlice(search, container.Config.Env) { 616 t.Errorf("Expected to find %s in container environment: %+v", search, container.Config.Env) 617 } 618 } 619 }