github.com/erriapo/docker@v1.6.0-rc2/integration/runtime_test.go (about) 1 package docker 2 3 import ( 4 "bytes" 5 "fmt" 6 "io" 7 std_log "log" 8 "net" 9 "net/url" 10 "os" 11 "path/filepath" 12 "runtime" 13 "strconv" 14 "strings" 15 "syscall" 16 "testing" 17 "time" 18 19 log "github.com/Sirupsen/logrus" 20 "github.com/docker/docker/daemon" 21 "github.com/docker/docker/daemon/execdriver" 22 "github.com/docker/docker/engine" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/nat" 25 "github.com/docker/docker/pkg/common" 26 "github.com/docker/docker/pkg/ioutils" 27 "github.com/docker/docker/pkg/reexec" 28 "github.com/docker/docker/runconfig" 29 "github.com/docker/docker/utils" 30 ) 31 32 const ( 33 unitTestImageName = "docker-test-image" 34 unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 35 unitTestImageIDShort = "83599e29c455" 36 unitTestNetworkBridge = "testdockbr0" 37 unitTestStoreBase = "/var/lib/docker/unit-tests" 38 unitTestDockerTmpdir = "/var/lib/docker/tmp" 39 testDaemonAddr = "127.0.0.1:4270" 40 testDaemonProto = "tcp" 41 testDaemonHttpsProto = "tcp" 42 testDaemonHttpsAddr = "localhost:4271" 43 testDaemonRogueHttpsAddr = "localhost:4272" 44 ) 45 46 var ( 47 // FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted. 48 globalDaemon *daemon.Daemon 49 globalEngine *engine.Engine 50 globalHttpsEngine *engine.Engine 51 globalRogueHttpsEngine *engine.Engine 52 startFds int 53 startGoroutines int 54 ) 55 56 // FIXME: nuke() is deprecated by Daemon.Nuke() 57 func nuke(daemon *daemon.Daemon) error { 58 return daemon.Nuke() 59 } 60 61 // FIXME: cleanup and nuke are redundant. 62 func cleanup(eng *engine.Engine, t *testing.T) error { 63 daemon := mkDaemonFromEngine(eng, t) 64 for _, container := range daemon.List() { 65 container.Kill() 66 daemon.Rm(container) 67 } 68 job := eng.Job("images") 69 images, err := job.Stdout.AddTable() 70 if err != nil { 71 t.Fatal(err) 72 } 73 if err := job.Run(); err != nil { 74 t.Fatal(err) 75 } 76 for _, image := range images.Data { 77 if image.Get("Id") != unitTestImageID { 78 eng.Job("image_delete", image.Get("Id")).Run() 79 } 80 } 81 return nil 82 } 83 84 func init() { 85 // Always use the same driver (vfs) for all integration tests. 86 // To test other drivers, we need a dedicated driver validation suite. 87 os.Setenv("DOCKER_DRIVER", "vfs") 88 os.Setenv("TEST", "1") 89 os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) 90 91 // Hack to run sys init during unit testing 92 if reexec.Init() { 93 return 94 } 95 96 if uid := syscall.Geteuid(); uid != 0 { 97 log.Fatalf("docker tests need to be run as root") 98 } 99 100 // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) 101 if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { 102 src, err := os.Open(dockerinit) 103 if err != nil { 104 log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) 105 } 106 defer src.Close() 107 dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) 108 if err != nil { 109 log.Fatalf("Unable to create dockerinit in test directory: %s", err) 110 } 111 defer dst.Close() 112 if _, err := io.Copy(dst, src); err != nil { 113 log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) 114 } 115 dst.Close() 116 src.Close() 117 } 118 119 // Setup the base daemon, which will be duplicated for each test. 120 // (no tests are run directly in the base) 121 setupBaseImage() 122 123 // Create the "global daemon" with a long-running daemons for integration tests 124 spawnGlobalDaemon() 125 spawnLegitHttpsDaemon() 126 spawnRogueHttpsDaemon() 127 startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() 128 } 129 130 func setupBaseImage() { 131 eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) 132 job := eng.Job("image_inspect", unitTestImageName) 133 img, _ := job.Stdout.AddEnv() 134 // If the unit test is not found, try to download it. 135 if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { 136 // Retrieve the Image 137 job = eng.Job("pull", unitTestImageName) 138 job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout)) 139 if err := job.Run(); err != nil { 140 log.Fatalf("Unable to pull the test image: %s", err) 141 } 142 } 143 } 144 145 func spawnGlobalDaemon() { 146 if globalDaemon != nil { 147 log.Debugf("Global daemon already exists. Skipping.") 148 return 149 } 150 t := std_log.New(os.Stderr, "", 0) 151 eng := NewTestEngine(t) 152 globalEngine = eng 153 globalDaemon = mkDaemonFromEngine(eng, t) 154 155 // Spawn a Daemon 156 go func() { 157 log.Debugf("Spawning global daemon for integration tests") 158 listenURL := &url.URL{ 159 Scheme: testDaemonProto, 160 Host: testDaemonAddr, 161 } 162 job := eng.Job("serveapi", listenURL.String()) 163 job.SetenvBool("Logging", true) 164 if err := job.Run(); err != nil { 165 log.Fatalf("Unable to spawn the test daemon: %s", err) 166 } 167 }() 168 169 // Give some time to ListenAndServer to actually start 170 // FIXME: use inmem transports instead of tcp 171 time.Sleep(time.Second) 172 173 if err := eng.Job("acceptconnections").Run(); err != nil { 174 log.Fatalf("Unable to accept connections for test api: %s", err) 175 } 176 } 177 178 func spawnLegitHttpsDaemon() { 179 if globalHttpsEngine != nil { 180 return 181 } 182 globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", 183 "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") 184 } 185 186 func spawnRogueHttpsDaemon() { 187 if globalRogueHttpsEngine != nil { 188 return 189 } 190 globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", 191 "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") 192 } 193 194 func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { 195 t := std_log.New(os.Stderr, "", 0) 196 root, err := newTestDirectory(unitTestStoreBase) 197 if err != nil { 198 t.Fatal(err) 199 } 200 // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, 201 // and we want to set it to true. 202 203 eng := newTestEngine(t, true, root) 204 205 // Spawn a Daemon 206 go func() { 207 log.Debugf("Spawning https daemon for integration tests") 208 listenURL := &url.URL{ 209 Scheme: testDaemonHttpsProto, 210 Host: addr, 211 } 212 job := eng.Job("serveapi", listenURL.String()) 213 job.SetenvBool("Logging", true) 214 job.SetenvBool("Tls", true) 215 job.SetenvBool("TlsVerify", true) 216 job.Setenv("TlsCa", cacert) 217 job.Setenv("TlsCert", cert) 218 job.Setenv("TlsKey", key) 219 if err := job.Run(); err != nil { 220 log.Fatalf("Unable to spawn the test daemon: %s", err) 221 } 222 }() 223 224 // Give some time to ListenAndServer to actually start 225 time.Sleep(time.Second) 226 227 if err := eng.Job("acceptconnections").Run(); err != nil { 228 log.Fatalf("Unable to accept connections for test api: %s", err) 229 } 230 return eng 231 } 232 233 // FIXME: test that ImagePull(json=true) send correct json output 234 235 func GetTestImage(daemon *daemon.Daemon) *image.Image { 236 imgs, err := daemon.Graph().Map() 237 if err != nil { 238 log.Fatalf("Unable to get the test image: %s", err) 239 } 240 for _, image := range imgs { 241 if image.ID == unitTestImageID { 242 return image 243 } 244 } 245 log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) 246 return nil 247 } 248 249 func TestDaemonCreate(t *testing.T) { 250 daemon := mkDaemon(t) 251 defer nuke(daemon) 252 253 // Make sure we start we 0 containers 254 if len(daemon.List()) != 0 { 255 t.Errorf("Expected 0 containers, %v found", len(daemon.List())) 256 } 257 258 container, _, err := daemon.Create(&runconfig.Config{ 259 Image: GetTestImage(daemon).ID, 260 Cmd: []string{"ls", "-al"}, 261 }, 262 &runconfig.HostConfig{}, 263 "", 264 ) 265 if err != nil { 266 t.Fatal(err) 267 } 268 269 defer func() { 270 if err := daemon.Rm(container); err != nil { 271 t.Error(err) 272 } 273 }() 274 275 // Make sure we can find the newly created container with List() 276 if len(daemon.List()) != 1 { 277 t.Errorf("Expected 1 container, %v found", len(daemon.List())) 278 } 279 280 // Make sure the container List() returns is the right one 281 if daemon.List()[0].ID != container.ID { 282 t.Errorf("Unexpected container %v returned by List", daemon.List()[0]) 283 } 284 285 // Make sure we can get the container with Get() 286 if _, err := daemon.Get(container.ID); err != nil { 287 t.Errorf("Unable to get newly created container") 288 } 289 290 // Make sure it is the right container 291 if c, _ := daemon.Get(container.ID); c != container { 292 t.Errorf("Get() returned the wrong container") 293 } 294 295 // Make sure Exists returns it as existing 296 if !daemon.Exists(container.ID) { 297 t.Errorf("Exists() returned false for a newly created container") 298 } 299 300 // Test that conflict error displays correct details 301 testContainer, _, _ := daemon.Create( 302 &runconfig.Config{ 303 Image: GetTestImage(daemon).ID, 304 Cmd: []string{"ls", "-al"}, 305 }, 306 &runconfig.HostConfig{}, 307 "conflictname", 308 ) 309 if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), common.TruncateID(testContainer.ID)) { 310 t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %v", err) 311 } 312 313 // Make sure create with bad parameters returns an error 314 if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil { 315 t.Fatal("Builder.Create should throw an error when Cmd is missing") 316 } 317 318 if _, _, err := daemon.Create( 319 &runconfig.Config{ 320 Image: GetTestImage(daemon).ID, 321 Cmd: []string{}, 322 }, 323 &runconfig.HostConfig{}, 324 "", 325 ); err == nil { 326 t.Fatal("Builder.Create should throw an error when Cmd is empty") 327 } 328 329 config := &runconfig.Config{ 330 Image: GetTestImage(daemon).ID, 331 Cmd: []string{"/bin/ls"}, 332 PortSpecs: []string{"80"}, 333 } 334 container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "") 335 336 _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config) 337 if err != nil { 338 t.Error(err) 339 } 340 341 // test expose 80:8000 342 container, warnings, err := daemon.Create(&runconfig.Config{ 343 Image: GetTestImage(daemon).ID, 344 Cmd: []string{"ls", "-al"}, 345 PortSpecs: []string{"80:8000"}, 346 }, 347 &runconfig.HostConfig{}, 348 "", 349 ) 350 if err != nil { 351 t.Fatal(err) 352 } 353 if warnings == nil || len(warnings) != 1 { 354 t.Error("Expected a warning, got none") 355 } 356 } 357 358 func TestDestroy(t *testing.T) { 359 daemon := mkDaemon(t) 360 defer nuke(daemon) 361 362 container, _, err := daemon.Create(&runconfig.Config{ 363 Image: GetTestImage(daemon).ID, 364 Cmd: []string{"ls", "-al"}, 365 }, 366 &runconfig.HostConfig{}, 367 "") 368 if err != nil { 369 t.Fatal(err) 370 } 371 // Destroy 372 if err := daemon.Rm(container); err != nil { 373 t.Error(err) 374 } 375 376 // Make sure daemon.Exists() behaves correctly 377 if daemon.Exists("test_destroy") { 378 t.Errorf("Exists() returned true") 379 } 380 381 // Make sure daemon.List() doesn't list the destroyed container 382 if len(daemon.List()) != 0 { 383 t.Errorf("Expected 0 container, %v found", len(daemon.List())) 384 } 385 386 // Make sure daemon.Get() refuses to return the unexisting container 387 if c, _ := daemon.Get(container.ID); c != nil { 388 t.Errorf("Got a container that should not exist") 389 } 390 391 // Test double destroy 392 if err := daemon.Rm(container); err == nil { 393 // It should have failed 394 t.Errorf("Double destroy did not fail") 395 } 396 } 397 398 func TestGet(t *testing.T) { 399 daemon := mkDaemon(t) 400 defer nuke(daemon) 401 402 container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) 403 defer daemon.Rm(container1) 404 405 container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) 406 defer daemon.Rm(container2) 407 408 container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) 409 defer daemon.Rm(container3) 410 411 if c, _ := daemon.Get(container1.ID); c != container1 { 412 t.Errorf("Get(test1) returned %v while expecting %v", c, container1) 413 } 414 415 if c, _ := daemon.Get(container2.ID); c != container2 { 416 t.Errorf("Get(test2) returned %v while expecting %v", c, container2) 417 } 418 419 if c, _ := daemon.Get(container3.ID); c != container3 { 420 t.Errorf("Get(test3) returned %v while expecting %v", c, container3) 421 } 422 423 } 424 425 func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { 426 var ( 427 err error 428 id string 429 outputBuffer = bytes.NewBuffer(nil) 430 strPort string 431 eng = NewTestEngine(t) 432 daemon = mkDaemonFromEngine(eng, t) 433 port = 5554 434 p nat.Port 435 ) 436 defer func() { 437 if err != nil { 438 daemon.Nuke() 439 } 440 }() 441 442 for { 443 port += 1 444 strPort = strconv.Itoa(port) 445 var cmd string 446 if proto == "tcp" { 447 cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat" 448 } else if proto == "udp" { 449 cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat" 450 } else { 451 t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) 452 } 453 ep := make(map[nat.Port]struct{}, 1) 454 p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) 455 ep[p] = struct{}{} 456 457 jobCreate := eng.Job("create") 458 jobCreate.Setenv("Image", unitTestImageID) 459 jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) 460 jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) 461 jobCreate.SetenvJson("ExposedPorts", ep) 462 jobCreate.Stdout.Add(outputBuffer) 463 if err := jobCreate.Run(); err != nil { 464 t.Fatal(err) 465 } 466 id = engine.Tail(outputBuffer, 1) 467 // FIXME: this relies on the undocumented behavior of daemon.Create 468 // which will return a nil error AND container if the exposed ports 469 // are invalid. That behavior should be fixed! 470 if id != "" { 471 break 472 } 473 t.Logf("Port %v already in use, trying another one", strPort) 474 475 } 476 477 jobStart := eng.Job("start", id) 478 portBindings := make(map[nat.Port][]nat.PortBinding) 479 portBindings[p] = []nat.PortBinding{ 480 {}, 481 } 482 if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { 483 t.Fatal(err) 484 } 485 if err := jobStart.Run(); err != nil { 486 t.Fatal(err) 487 } 488 489 container, err := daemon.Get(id) 490 if err != nil { 491 t.Fatal(err) 492 } 493 494 setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { 495 for !container.IsRunning() { 496 time.Sleep(10 * time.Millisecond) 497 } 498 }) 499 500 // Even if the state is running, lets give some time to lxc to spawn the process 501 container.WaitStop(500 * time.Millisecond) 502 503 strPort = container.NetworkSettings.Ports[p][0].HostPort 504 return daemon, container, strPort 505 } 506 507 // Run a container with a TCP port allocated, and test that it can receive connections on localhost 508 func TestAllocateTCPPortLocalhost(t *testing.T) { 509 daemon, container, port := startEchoServerContainer(t, "tcp") 510 defer nuke(daemon) 511 defer container.Kill() 512 513 for i := 0; i != 10; i++ { 514 conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port)) 515 if err != nil { 516 t.Fatal(err) 517 } 518 defer conn.Close() 519 520 input := bytes.NewBufferString("well hello there\n") 521 _, err = conn.Write(input.Bytes()) 522 if err != nil { 523 t.Fatal(err) 524 } 525 buf := make([]byte, 16) 526 read := 0 527 conn.SetReadDeadline(time.Now().Add(3 * time.Second)) 528 read, err = conn.Read(buf) 529 if err != nil { 530 if err, ok := err.(*net.OpError); ok { 531 if err.Err == syscall.ECONNRESET { 532 t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec") 533 conn.Close() 534 time.Sleep(time.Second) 535 continue 536 } 537 if err.Timeout() { 538 t.Log("Timeout, trying again") 539 conn.Close() 540 continue 541 } 542 } 543 t.Fatal(err) 544 } 545 output := string(buf[:read]) 546 if !strings.Contains(output, "well hello there") { 547 t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output)) 548 } else { 549 return 550 } 551 } 552 553 t.Fatal("No reply from the container") 554 } 555 556 // Run a container with an UDP port allocated, and test that it can receive connections on localhost 557 func TestAllocateUDPPortLocalhost(t *testing.T) { 558 daemon, container, port := startEchoServerContainer(t, "udp") 559 defer nuke(daemon) 560 defer container.Kill() 561 562 conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port)) 563 if err != nil { 564 t.Fatal(err) 565 } 566 defer conn.Close() 567 568 input := bytes.NewBufferString("well hello there\n") 569 buf := make([]byte, 16) 570 // Try for a minute, for some reason the select in socat may take ages 571 // to return even though everything on the path seems fine (i.e: the 572 // UDPProxy forwards the traffic correctly and you can see the packets 573 // on the interface from within the container). 574 for i := 0; i != 120; i++ { 575 _, err := conn.Write(input.Bytes()) 576 if err != nil { 577 t.Fatal(err) 578 } 579 conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) 580 read, err := conn.Read(buf) 581 if err == nil { 582 output := string(buf[:read]) 583 if strings.Contains(output, "well hello there") { 584 return 585 } 586 } 587 } 588 589 t.Fatal("No reply from the container") 590 } 591 592 func TestRestore(t *testing.T) { 593 eng := NewTestEngine(t) 594 daemon1 := mkDaemonFromEngine(eng, t) 595 defer daemon1.Nuke() 596 // Create a container with one instance of docker 597 container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t) 598 defer daemon1.Rm(container1) 599 600 // Create a second container meant to be killed 601 container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t) 602 defer daemon1.Rm(container2) 603 604 // Start the container non blocking 605 if err := container2.Start(); err != nil { 606 t.Fatal(err) 607 } 608 609 if !container2.IsRunning() { 610 t.Fatalf("Container %v should appear as running but isn't", container2.ID) 611 } 612 613 // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running' 614 cStdin := container2.StdinPipe() 615 cStdin.Close() 616 if _, err := container2.WaitStop(2 * time.Second); err != nil { 617 t.Fatal(err) 618 } 619 container2.SetRunning(42) 620 container2.ToDisk() 621 622 if len(daemon1.List()) != 2 { 623 t.Errorf("Expected 2 container, %v found", len(daemon1.List())) 624 } 625 if err := container1.Run(); err != nil { 626 t.Fatal(err) 627 } 628 629 if !container2.IsRunning() { 630 t.Fatalf("Container %v should appear as running but isn't", container2.ID) 631 } 632 633 // Here are are simulating a docker restart - that is, reloading all containers 634 // from scratch 635 eng = newTestEngine(t, false, daemon1.Config().Root) 636 daemon2 := mkDaemonFromEngine(eng, t) 637 if len(daemon2.List()) != 2 { 638 t.Errorf("Expected 2 container, %v found", len(daemon2.List())) 639 } 640 runningCount := 0 641 for _, c := range daemon2.List() { 642 if c.IsRunning() { 643 t.Errorf("Running container found: %v (%v)", c.ID, c.Path) 644 runningCount++ 645 } 646 } 647 if runningCount != 0 { 648 t.Fatalf("Expected 0 container alive, %d found", runningCount) 649 } 650 container3, err := daemon2.Get(container1.ID) 651 if err != nil { 652 t.Fatal("Unable to Get container") 653 } 654 if err := container3.Run(); err != nil { 655 t.Fatal(err) 656 } 657 container2.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) 658 } 659 660 func TestDefaultContainerName(t *testing.T) { 661 eng := NewTestEngine(t) 662 daemon := mkDaemonFromEngine(eng, t) 663 defer nuke(daemon) 664 665 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 666 if err != nil { 667 t.Fatal(err) 668 } 669 670 container, err := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) 671 if err != nil { 672 t.Fatal(err) 673 } 674 containerID := container.ID 675 676 if container.Name != "/some_name" { 677 t.Fatalf("Expect /some_name got %s", container.Name) 678 } 679 680 c, err := daemon.Get("/some_name") 681 if err != nil { 682 t.Fatalf("Couldn't retrieve test container as /some_name") 683 } 684 if c.ID != containerID { 685 t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) 686 } 687 } 688 689 func TestRandomContainerName(t *testing.T) { 690 eng := NewTestEngine(t) 691 daemon := mkDaemonFromEngine(eng, t) 692 defer nuke(daemon) 693 694 config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}) 695 if err != nil { 696 t.Fatal(err) 697 } 698 699 container, err := daemon.Get(createTestContainer(eng, config, t)) 700 if err != nil { 701 t.Fatal(err) 702 } 703 containerID := container.ID 704 705 if container.Name == "" { 706 t.Fatalf("Expected not empty container name") 707 } 708 709 if c, err := daemon.Get(container.Name); err != nil { 710 log.Fatalf("Could not lookup container %s by its name", container.Name) 711 } else if c.ID != containerID { 712 log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) 713 } 714 } 715 716 func TestContainerNameValidation(t *testing.T) { 717 eng := NewTestEngine(t) 718 daemon := mkDaemonFromEngine(eng, t) 719 defer nuke(daemon) 720 721 for _, test := range []struct { 722 Name string 723 Valid bool 724 }{ 725 {"abc-123_AAA.1", true}, 726 {"\000asdf", false}, 727 } { 728 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 729 if err != nil { 730 if !test.Valid { 731 continue 732 } 733 t.Fatal(err) 734 } 735 736 var outputBuffer = bytes.NewBuffer(nil) 737 job := eng.Job("create", test.Name) 738 if err := job.ImportEnv(config); err != nil { 739 t.Fatal(err) 740 } 741 job.Stdout.Add(outputBuffer) 742 if err := job.Run(); err != nil { 743 if !test.Valid { 744 continue 745 } 746 t.Fatal(err) 747 } 748 749 container, err := daemon.Get(engine.Tail(outputBuffer, 1)) 750 if err != nil { 751 t.Fatal(err) 752 } 753 754 if container.Name != "/"+test.Name { 755 t.Fatalf("Expect /%s got %s", test.Name, container.Name) 756 } 757 758 if c, err := daemon.Get("/" + test.Name); err != nil { 759 t.Fatalf("Couldn't retrieve test container as /%s", test.Name) 760 } else if c.ID != container.ID { 761 t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) 762 } 763 } 764 765 } 766 767 func TestLinkChildContainer(t *testing.T) { 768 eng := NewTestEngine(t) 769 daemon := mkDaemonFromEngine(eng, t) 770 defer nuke(daemon) 771 772 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 773 if err != nil { 774 t.Fatal(err) 775 } 776 777 container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) 778 if err != nil { 779 t.Fatal(err) 780 } 781 782 webapp, err := daemon.GetByName("/webapp") 783 if err != nil { 784 t.Fatal(err) 785 } 786 787 if webapp.ID != container.ID { 788 t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) 789 } 790 791 config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}) 792 if err != nil { 793 t.Fatal(err) 794 } 795 796 childContainer, err := daemon.Get(createTestContainer(eng, config, t)) 797 if err != nil { 798 t.Fatal(err) 799 } 800 801 if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { 802 t.Fatal(err) 803 } 804 805 // Get the child by it's new name 806 db, err := daemon.GetByName("/webapp/db") 807 if err != nil { 808 t.Fatal(err) 809 } 810 if db.ID != childContainer.ID { 811 t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID) 812 } 813 } 814 815 func TestGetAllChildren(t *testing.T) { 816 eng := NewTestEngine(t) 817 daemon := mkDaemonFromEngine(eng, t) 818 defer nuke(daemon) 819 820 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 821 if err != nil { 822 t.Fatal(err) 823 } 824 825 container, err := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) 826 if err != nil { 827 t.Fatal(err) 828 } 829 830 webapp, err := daemon.GetByName("/webapp") 831 if err != nil { 832 t.Fatal(err) 833 } 834 835 if webapp.ID != container.ID { 836 t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) 837 } 838 839 config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}) 840 if err != nil { 841 t.Fatal(err) 842 } 843 844 childContainer, err := daemon.Get(createTestContainer(eng, config, t)) 845 if err != nil { 846 t.Fatal(err) 847 } 848 849 if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { 850 t.Fatal(err) 851 } 852 853 children, err := daemon.Children("/webapp") 854 if err != nil { 855 t.Fatal(err) 856 } 857 858 if children == nil { 859 t.Fatal("Children should not be nil") 860 } 861 if len(children) == 0 { 862 t.Fatal("Children should not be empty") 863 } 864 865 for key, value := range children { 866 if key != "/webapp/db" { 867 t.Fatalf("Expected /webapp/db got %s", key) 868 } 869 if value.ID != childContainer.ID { 870 t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID) 871 } 872 } 873 } 874 875 func TestDestroyWithInitLayer(t *testing.T) { 876 daemon := mkDaemon(t) 877 defer nuke(daemon) 878 879 container, _, err := daemon.Create(&runconfig.Config{ 880 Image: GetTestImage(daemon).ID, 881 Cmd: []string{"ls", "-al"}, 882 }, 883 &runconfig.HostConfig{}, 884 "") 885 886 if err != nil { 887 t.Fatal(err) 888 } 889 // Destroy 890 if err := daemon.Rm(container); err != nil { 891 t.Fatal(err) 892 } 893 894 // Make sure daemon.Exists() behaves correctly 895 if daemon.Exists("test_destroy") { 896 t.Fatalf("Exists() returned true") 897 } 898 899 // Make sure daemon.List() doesn't list the destroyed container 900 if len(daemon.List()) != 0 { 901 t.Fatalf("Expected 0 container, %v found", len(daemon.List())) 902 } 903 904 driver := daemon.Graph().Driver() 905 906 // Make sure that the container does not exist in the driver 907 if _, err := driver.Get(container.ID, ""); err == nil { 908 t.Fatal("Conttainer should not exist in the driver") 909 } 910 911 // Make sure that the init layer is removed from the driver 912 if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil { 913 t.Fatal("Container's init layer should not exist in the driver") 914 } 915 }