github.com/liuzhiyi/docker@v1.5.0/integration/runtime_test.go (about) 1 package docker 2 3 import ( 4 "bytes" 5 "fmt" 6 "io" 7 std_log "log" 8 "net" 9 "net/url" 10 "os" 11 "path/filepath" 12 "runtime" 13 "strconv" 14 "strings" 15 "syscall" 16 "testing" 17 "time" 18 19 log "github.com/Sirupsen/logrus" 20 "github.com/docker/docker/daemon" 21 "github.com/docker/docker/daemon/execdriver" 22 "github.com/docker/docker/engine" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/nat" 25 "github.com/docker/docker/pkg/ioutils" 26 "github.com/docker/docker/pkg/reexec" 27 "github.com/docker/docker/runconfig" 28 "github.com/docker/docker/utils" 29 ) 30 31 const ( 32 unitTestImageName = "docker-test-image" 33 unitTestImageID = "83599e29c455eb719f77d799bc7c51521b9551972f5a850d7ad265bc1b5292f6" // 1.0 34 unitTestImageIDShort = "83599e29c455" 35 unitTestNetworkBridge = "testdockbr0" 36 unitTestStoreBase = "/var/lib/docker/unit-tests" 37 unitTestDockerTmpdir = "/var/lib/docker/tmp" 38 testDaemonAddr = "127.0.0.1:4270" 39 testDaemonProto = "tcp" 40 testDaemonHttpsProto = "tcp" 41 testDaemonHttpsAddr = "localhost:4271" 42 testDaemonRogueHttpsAddr = "localhost:4272" 43 ) 44 45 var ( 46 // FIXME: globalDaemon is deprecated by globalEngine. All tests should be converted. 47 globalDaemon *daemon.Daemon 48 globalEngine *engine.Engine 49 globalHttpsEngine *engine.Engine 50 globalRogueHttpsEngine *engine.Engine 51 startFds int 52 startGoroutines int 53 ) 54 55 // FIXME: nuke() is deprecated by Daemon.Nuke() 56 func nuke(daemon *daemon.Daemon) error { 57 return daemon.Nuke() 58 } 59 60 // FIXME: cleanup and nuke are redundant. 61 func cleanup(eng *engine.Engine, t *testing.T) error { 62 daemon := mkDaemonFromEngine(eng, t) 63 for _, container := range daemon.List() { 64 container.Kill() 65 daemon.Destroy(container) 66 } 67 job := eng.Job("images") 68 images, err := job.Stdout.AddTable() 69 if err != nil { 70 t.Fatal(err) 71 } 72 if err := job.Run(); err != nil { 73 t.Fatal(err) 74 } 75 for _, image := range images.Data { 76 if image.Get("Id") != unitTestImageID { 77 eng.Job("image_delete", image.Get("Id")).Run() 78 } 79 } 80 return nil 81 } 82 83 func init() { 84 // Always use the same driver (vfs) for all integration tests. 85 // To test other drivers, we need a dedicated driver validation suite. 86 os.Setenv("DOCKER_DRIVER", "vfs") 87 os.Setenv("TEST", "1") 88 os.Setenv("DOCKER_TMPDIR", unitTestDockerTmpdir) 89 90 // Hack to run sys init during unit testing 91 if reexec.Init() { 92 return 93 } 94 95 if uid := syscall.Geteuid(); uid != 0 { 96 log.Fatalf("docker tests need to be run as root") 97 } 98 99 // Copy dockerinit into our current testing directory, if provided (so we can test a separate dockerinit binary) 100 if dockerinit := os.Getenv("TEST_DOCKERINIT_PATH"); dockerinit != "" { 101 src, err := os.Open(dockerinit) 102 if err != nil { 103 log.Fatalf("Unable to open TEST_DOCKERINIT_PATH: %s", err) 104 } 105 defer src.Close() 106 dst, err := os.OpenFile(filepath.Join(filepath.Dir(utils.SelfPath()), "dockerinit"), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0555) 107 if err != nil { 108 log.Fatalf("Unable to create dockerinit in test directory: %s", err) 109 } 110 defer dst.Close() 111 if _, err := io.Copy(dst, src); err != nil { 112 log.Fatalf("Unable to copy dockerinit to TEST_DOCKERINIT_PATH: %s", err) 113 } 114 dst.Close() 115 src.Close() 116 } 117 118 // Setup the base daemon, which will be duplicated for each test. 119 // (no tests are run directly in the base) 120 setupBaseImage() 121 122 // Create the "global daemon" with a long-running daemons for integration tests 123 spawnGlobalDaemon() 124 spawnLegitHttpsDaemon() 125 spawnRogueHttpsDaemon() 126 startFds, startGoroutines = utils.GetTotalUsedFds(), runtime.NumGoroutine() 127 } 128 129 func setupBaseImage() { 130 eng := newTestEngine(std_log.New(os.Stderr, "", 0), false, unitTestStoreBase) 131 job := eng.Job("image_inspect", unitTestImageName) 132 img, _ := job.Stdout.AddEnv() 133 // If the unit test is not found, try to download it. 134 if err := job.Run(); err != nil || img.Get("Id") != unitTestImageID { 135 // Retrieve the Image 136 job = eng.Job("pull", unitTestImageName) 137 job.Stdout.Add(ioutils.NopWriteCloser(os.Stdout)) 138 if err := job.Run(); err != nil { 139 log.Fatalf("Unable to pull the test image: %s", err) 140 } 141 } 142 } 143 144 func spawnGlobalDaemon() { 145 if globalDaemon != nil { 146 log.Debugf("Global daemon already exists. Skipping.") 147 return 148 } 149 t := std_log.New(os.Stderr, "", 0) 150 eng := NewTestEngine(t) 151 globalEngine = eng 152 globalDaemon = mkDaemonFromEngine(eng, t) 153 154 // Spawn a Daemon 155 go func() { 156 log.Debugf("Spawning global daemon for integration tests") 157 listenURL := &url.URL{ 158 Scheme: testDaemonProto, 159 Host: testDaemonAddr, 160 } 161 job := eng.Job("serveapi", listenURL.String()) 162 job.SetenvBool("Logging", true) 163 if err := job.Run(); err != nil { 164 log.Fatalf("Unable to spawn the test daemon: %s", err) 165 } 166 }() 167 168 // Give some time to ListenAndServer to actually start 169 // FIXME: use inmem transports instead of tcp 170 time.Sleep(time.Second) 171 172 if err := eng.Job("acceptconnections").Run(); err != nil { 173 log.Fatalf("Unable to accept connections for test api: %s", err) 174 } 175 } 176 177 func spawnLegitHttpsDaemon() { 178 if globalHttpsEngine != nil { 179 return 180 } 181 globalHttpsEngine = spawnHttpsDaemon(testDaemonHttpsAddr, "fixtures/https/ca.pem", 182 "fixtures/https/server-cert.pem", "fixtures/https/server-key.pem") 183 } 184 185 func spawnRogueHttpsDaemon() { 186 if globalRogueHttpsEngine != nil { 187 return 188 } 189 globalRogueHttpsEngine = spawnHttpsDaemon(testDaemonRogueHttpsAddr, "fixtures/https/ca.pem", 190 "fixtures/https/server-rogue-cert.pem", "fixtures/https/server-rogue-key.pem") 191 } 192 193 func spawnHttpsDaemon(addr, cacert, cert, key string) *engine.Engine { 194 t := std_log.New(os.Stderr, "", 0) 195 root, err := newTestDirectory(unitTestStoreBase) 196 if err != nil { 197 t.Fatal(err) 198 } 199 // FIXME: here we don't use NewTestEngine because it configures the daemon with Autorestart=false, 200 // and we want to set it to true. 201 202 eng := newTestEngine(t, true, root) 203 204 // Spawn a Daemon 205 go func() { 206 log.Debugf("Spawning https daemon for integration tests") 207 listenURL := &url.URL{ 208 Scheme: testDaemonHttpsProto, 209 Host: addr, 210 } 211 job := eng.Job("serveapi", listenURL.String()) 212 job.SetenvBool("Logging", true) 213 job.SetenvBool("Tls", true) 214 job.SetenvBool("TlsVerify", true) 215 job.Setenv("TlsCa", cacert) 216 job.Setenv("TlsCert", cert) 217 job.Setenv("TlsKey", key) 218 if err := job.Run(); err != nil { 219 log.Fatalf("Unable to spawn the test daemon: %s", err) 220 } 221 }() 222 223 // Give some time to ListenAndServer to actually start 224 time.Sleep(time.Second) 225 226 if err := eng.Job("acceptconnections").Run(); err != nil { 227 log.Fatalf("Unable to accept connections for test api: %s", err) 228 } 229 return eng 230 } 231 232 // FIXME: test that ImagePull(json=true) send correct json output 233 234 func GetTestImage(daemon *daemon.Daemon) *image.Image { 235 imgs, err := daemon.Graph().Map() 236 if err != nil { 237 log.Fatalf("Unable to get the test image: %s", err) 238 } 239 for _, image := range imgs { 240 if image.ID == unitTestImageID { 241 return image 242 } 243 } 244 log.Fatalf("Test image %v not found in %s: %s", unitTestImageID, daemon.Graph().Root, imgs) 245 return nil 246 } 247 248 func TestDaemonCreate(t *testing.T) { 249 daemon := mkDaemon(t) 250 defer nuke(daemon) 251 252 // Make sure we start we 0 containers 253 if len(daemon.List()) != 0 { 254 t.Errorf("Expected 0 containers, %v found", len(daemon.List())) 255 } 256 257 container, _, err := daemon.Create(&runconfig.Config{ 258 Image: GetTestImage(daemon).ID, 259 Cmd: []string{"ls", "-al"}, 260 }, 261 &runconfig.HostConfig{}, 262 "", 263 ) 264 if err != nil { 265 t.Fatal(err) 266 } 267 268 defer func() { 269 if err := daemon.Destroy(container); err != nil { 270 t.Error(err) 271 } 272 }() 273 274 // Make sure we can find the newly created container with List() 275 if len(daemon.List()) != 1 { 276 t.Errorf("Expected 1 container, %v found", len(daemon.List())) 277 } 278 279 // Make sure the container List() returns is the right one 280 if daemon.List()[0].ID != container.ID { 281 t.Errorf("Unexpected container %v returned by List", daemon.List()[0]) 282 } 283 284 // Make sure we can get the container with Get() 285 if daemon.Get(container.ID) == nil { 286 t.Errorf("Unable to get newly created container") 287 } 288 289 // Make sure it is the right container 290 if daemon.Get(container.ID) != container { 291 t.Errorf("Get() returned the wrong container") 292 } 293 294 // Make sure Exists returns it as existing 295 if !daemon.Exists(container.ID) { 296 t.Errorf("Exists() returned false for a newly created container") 297 } 298 299 // Test that conflict error displays correct details 300 testContainer, _, _ := daemon.Create( 301 &runconfig.Config{ 302 Image: GetTestImage(daemon).ID, 303 Cmd: []string{"ls", "-al"}, 304 }, 305 &runconfig.HostConfig{}, 306 "conflictname", 307 ) 308 if _, _, err := daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID, Cmd: []string{"ls", "-al"}}, &runconfig.HostConfig{}, testContainer.Name); err == nil || !strings.Contains(err.Error(), utils.TruncateID(testContainer.ID)) { 309 t.Fatalf("Name conflict error doesn't include the correct short id. Message was: %s", err.Error()) 310 } 311 312 // Make sure create with bad parameters returns an error 313 if _, _, err = daemon.Create(&runconfig.Config{Image: GetTestImage(daemon).ID}, &runconfig.HostConfig{}, ""); err == nil { 314 t.Fatal("Builder.Create should throw an error when Cmd is missing") 315 } 316 317 if _, _, err := daemon.Create( 318 &runconfig.Config{ 319 Image: GetTestImage(daemon).ID, 320 Cmd: []string{}, 321 }, 322 &runconfig.HostConfig{}, 323 "", 324 ); err == nil { 325 t.Fatal("Builder.Create should throw an error when Cmd is empty") 326 } 327 328 config := &runconfig.Config{ 329 Image: GetTestImage(daemon).ID, 330 Cmd: []string{"/bin/ls"}, 331 PortSpecs: []string{"80"}, 332 } 333 container, _, err = daemon.Create(config, &runconfig.HostConfig{}, "") 334 335 _, err = daemon.Commit(container, "testrepo", "testtag", "", "", true, config) 336 if err != nil { 337 t.Error(err) 338 } 339 340 // test expose 80:8000 341 container, warnings, err := daemon.Create(&runconfig.Config{ 342 Image: GetTestImage(daemon).ID, 343 Cmd: []string{"ls", "-al"}, 344 PortSpecs: []string{"80:8000"}, 345 }, 346 &runconfig.HostConfig{}, 347 "", 348 ) 349 if err != nil { 350 t.Fatal(err) 351 } 352 if warnings == nil || len(warnings) != 1 { 353 t.Error("Expected a warning, got none") 354 } 355 } 356 357 func TestDestroy(t *testing.T) { 358 daemon := mkDaemon(t) 359 defer nuke(daemon) 360 361 container, _, err := daemon.Create(&runconfig.Config{ 362 Image: GetTestImage(daemon).ID, 363 Cmd: []string{"ls", "-al"}, 364 }, 365 &runconfig.HostConfig{}, 366 "") 367 if err != nil { 368 t.Fatal(err) 369 } 370 // Destroy 371 if err := daemon.Destroy(container); err != nil { 372 t.Error(err) 373 } 374 375 // Make sure daemon.Exists() behaves correctly 376 if daemon.Exists("test_destroy") { 377 t.Errorf("Exists() returned true") 378 } 379 380 // Make sure daemon.List() doesn't list the destroyed container 381 if len(daemon.List()) != 0 { 382 t.Errorf("Expected 0 container, %v found", len(daemon.List())) 383 } 384 385 // Make sure daemon.Get() refuses to return the unexisting container 386 if daemon.Get(container.ID) != nil { 387 t.Errorf("Unable to get newly created container") 388 } 389 390 // Test double destroy 391 if err := daemon.Destroy(container); err == nil { 392 // It should have failed 393 t.Errorf("Double destroy did not fail") 394 } 395 } 396 397 func TestGet(t *testing.T) { 398 daemon := mkDaemon(t) 399 defer nuke(daemon) 400 401 container1, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) 402 defer daemon.Destroy(container1) 403 404 container2, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) 405 defer daemon.Destroy(container2) 406 407 container3, _, _ := mkContainer(daemon, []string{"_", "ls", "-al"}, t) 408 defer daemon.Destroy(container3) 409 410 if daemon.Get(container1.ID) != container1 { 411 t.Errorf("Get(test1) returned %v while expecting %v", daemon.Get(container1.ID), container1) 412 } 413 414 if daemon.Get(container2.ID) != container2 { 415 t.Errorf("Get(test2) returned %v while expecting %v", daemon.Get(container2.ID), container2) 416 } 417 418 if daemon.Get(container3.ID) != container3 { 419 t.Errorf("Get(test3) returned %v while expecting %v", daemon.Get(container3.ID), container3) 420 } 421 422 } 423 424 func startEchoServerContainer(t *testing.T, proto string) (*daemon.Daemon, *daemon.Container, string) { 425 var ( 426 err error 427 id string 428 outputBuffer = bytes.NewBuffer(nil) 429 strPort string 430 eng = NewTestEngine(t) 431 daemon = mkDaemonFromEngine(eng, t) 432 port = 5554 433 p nat.Port 434 ) 435 defer func() { 436 if err != nil { 437 daemon.Nuke() 438 } 439 }() 440 441 for { 442 port += 1 443 strPort = strconv.Itoa(port) 444 var cmd string 445 if proto == "tcp" { 446 cmd = "socat TCP-LISTEN:" + strPort + ",reuseaddr,fork EXEC:/bin/cat" 447 } else if proto == "udp" { 448 cmd = "socat UDP-RECVFROM:" + strPort + ",fork EXEC:/bin/cat" 449 } else { 450 t.Fatal(fmt.Errorf("Unknown protocol %v", proto)) 451 } 452 ep := make(map[nat.Port]struct{}, 1) 453 p = nat.Port(fmt.Sprintf("%s/%s", strPort, proto)) 454 ep[p] = struct{}{} 455 456 jobCreate := eng.Job("create") 457 jobCreate.Setenv("Image", unitTestImageID) 458 jobCreate.SetenvList("Cmd", []string{"sh", "-c", cmd}) 459 jobCreate.SetenvList("PortSpecs", []string{fmt.Sprintf("%s/%s", strPort, proto)}) 460 jobCreate.SetenvJson("ExposedPorts", ep) 461 jobCreate.Stdout.Add(outputBuffer) 462 if err := jobCreate.Run(); err != nil { 463 t.Fatal(err) 464 } 465 id = engine.Tail(outputBuffer, 1) 466 // FIXME: this relies on the undocumented behavior of daemon.Create 467 // which will return a nil error AND container if the exposed ports 468 // are invalid. That behavior should be fixed! 469 if id != "" { 470 break 471 } 472 t.Logf("Port %v already in use, trying another one", strPort) 473 474 } 475 476 jobStart := eng.Job("start", id) 477 portBindings := make(map[nat.Port][]nat.PortBinding) 478 portBindings[p] = []nat.PortBinding{ 479 {}, 480 } 481 if err := jobStart.SetenvJson("PortsBindings", portBindings); err != nil { 482 t.Fatal(err) 483 } 484 if err := jobStart.Run(); err != nil { 485 t.Fatal(err) 486 } 487 488 container := daemon.Get(id) 489 if container == nil { 490 t.Fatalf("Couldn't fetch test container %s", id) 491 } 492 493 setTimeout(t, "Waiting for the container to be started timed out", 2*time.Second, func() { 494 for !container.IsRunning() { 495 time.Sleep(10 * time.Millisecond) 496 } 497 }) 498 499 // Even if the state is running, lets give some time to lxc to spawn the process 500 container.WaitStop(500 * time.Millisecond) 501 502 strPort = container.NetworkSettings.Ports[p][0].HostPort 503 return daemon, container, strPort 504 } 505 506 // Run a container with a TCP port allocated, and test that it can receive connections on localhost 507 func TestAllocateTCPPortLocalhost(t *testing.T) { 508 daemon, container, port := startEchoServerContainer(t, "tcp") 509 defer nuke(daemon) 510 defer container.Kill() 511 512 for i := 0; i != 10; i++ { 513 conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", port)) 514 if err != nil { 515 t.Fatal(err) 516 } 517 defer conn.Close() 518 519 input := bytes.NewBufferString("well hello there\n") 520 _, err = conn.Write(input.Bytes()) 521 if err != nil { 522 t.Fatal(err) 523 } 524 buf := make([]byte, 16) 525 read := 0 526 conn.SetReadDeadline(time.Now().Add(3 * time.Second)) 527 read, err = conn.Read(buf) 528 if err != nil { 529 if err, ok := err.(*net.OpError); ok { 530 if err.Err == syscall.ECONNRESET { 531 t.Logf("Connection reset by the proxy, socat is probably not listening yet, trying again in a sec") 532 conn.Close() 533 time.Sleep(time.Second) 534 continue 535 } 536 if err.Timeout() { 537 t.Log("Timeout, trying again") 538 conn.Close() 539 continue 540 } 541 } 542 t.Fatal(err) 543 } 544 output := string(buf[:read]) 545 if !strings.Contains(output, "well hello there") { 546 t.Fatal(fmt.Errorf("[%v] doesn't contain [well hello there]", output)) 547 } else { 548 return 549 } 550 } 551 552 t.Fatal("No reply from the container") 553 } 554 555 // Run a container with an UDP port allocated, and test that it can receive connections on localhost 556 func TestAllocateUDPPortLocalhost(t *testing.T) { 557 daemon, container, port := startEchoServerContainer(t, "udp") 558 defer nuke(daemon) 559 defer container.Kill() 560 561 conn, err := net.Dial("udp", fmt.Sprintf("localhost:%v", port)) 562 if err != nil { 563 t.Fatal(err) 564 } 565 defer conn.Close() 566 567 input := bytes.NewBufferString("well hello there\n") 568 buf := make([]byte, 16) 569 // Try for a minute, for some reason the select in socat may take ages 570 // to return even though everything on the path seems fine (i.e: the 571 // UDPProxy forwards the traffic correctly and you can see the packets 572 // on the interface from within the container). 573 for i := 0; i != 120; i++ { 574 _, err := conn.Write(input.Bytes()) 575 if err != nil { 576 t.Fatal(err) 577 } 578 conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)) 579 read, err := conn.Read(buf) 580 if err == nil { 581 output := string(buf[:read]) 582 if strings.Contains(output, "well hello there") { 583 return 584 } 585 } 586 } 587 588 t.Fatal("No reply from the container") 589 } 590 591 func TestRestore(t *testing.T) { 592 eng := NewTestEngine(t) 593 daemon1 := mkDaemonFromEngine(eng, t) 594 defer daemon1.Nuke() 595 // Create a container with one instance of docker 596 container1, _, _ := mkContainer(daemon1, []string{"_", "ls", "-al"}, t) 597 defer daemon1.Destroy(container1) 598 599 // Create a second container meant to be killed 600 container2, _, _ := mkContainer(daemon1, []string{"-i", "_", "/bin/cat"}, t) 601 defer daemon1.Destroy(container2) 602 603 // Start the container non blocking 604 if err := container2.Start(); err != nil { 605 t.Fatal(err) 606 } 607 608 if !container2.IsRunning() { 609 t.Fatalf("Container %v should appear as running but isn't", container2.ID) 610 } 611 612 // Simulate a crash/manual quit of dockerd: process dies, states stays 'Running' 613 cStdin := container2.StdinPipe() 614 cStdin.Close() 615 if _, err := container2.WaitStop(2 * time.Second); err != nil { 616 t.Fatal(err) 617 } 618 container2.SetRunning(42) 619 container2.ToDisk() 620 621 if len(daemon1.List()) != 2 { 622 t.Errorf("Expected 2 container, %v found", len(daemon1.List())) 623 } 624 if err := container1.Run(); err != nil { 625 t.Fatal(err) 626 } 627 628 if !container2.IsRunning() { 629 t.Fatalf("Container %v should appear as running but isn't", container2.ID) 630 } 631 632 // Here are are simulating a docker restart - that is, reloading all containers 633 // from scratch 634 eng = newTestEngine(t, false, daemon1.Config().Root) 635 daemon2 := mkDaemonFromEngine(eng, t) 636 if len(daemon2.List()) != 2 { 637 t.Errorf("Expected 2 container, %v found", len(daemon2.List())) 638 } 639 runningCount := 0 640 for _, c := range daemon2.List() { 641 if c.IsRunning() { 642 t.Errorf("Running container found: %v (%v)", c.ID, c.Path) 643 runningCount++ 644 } 645 } 646 if runningCount != 0 { 647 t.Fatalf("Expected 0 container alive, %d found", runningCount) 648 } 649 container3 := daemon2.Get(container1.ID) 650 if container3 == nil { 651 t.Fatal("Unable to Get container") 652 } 653 if err := container3.Run(); err != nil { 654 t.Fatal(err) 655 } 656 container2.SetStopped(&execdriver.ExitStatus{ExitCode: 0}) 657 } 658 659 func TestDefaultContainerName(t *testing.T) { 660 eng := NewTestEngine(t) 661 daemon := mkDaemonFromEngine(eng, t) 662 defer nuke(daemon) 663 664 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 665 if err != nil { 666 t.Fatal(err) 667 } 668 669 container := daemon.Get(createNamedTestContainer(eng, config, t, "some_name")) 670 containerID := container.ID 671 672 if container.Name != "/some_name" { 673 t.Fatalf("Expect /some_name got %s", container.Name) 674 } 675 676 if c := daemon.Get("/some_name"); c == nil { 677 t.Fatalf("Couldn't retrieve test container as /some_name") 678 } else if c.ID != containerID { 679 t.Fatalf("Container /some_name has ID %s instead of %s", c.ID, containerID) 680 } 681 } 682 683 func TestRandomContainerName(t *testing.T) { 684 eng := NewTestEngine(t) 685 daemon := mkDaemonFromEngine(eng, t) 686 defer nuke(daemon) 687 688 config, _, _, err := parseRun([]string{GetTestImage(daemon).ID, "echo test"}) 689 if err != nil { 690 t.Fatal(err) 691 } 692 693 container := daemon.Get(createTestContainer(eng, config, t)) 694 containerID := container.ID 695 696 if container.Name == "" { 697 t.Fatalf("Expected not empty container name") 698 } 699 700 if c := daemon.Get(container.Name); c == nil { 701 log.Fatalf("Could not lookup container %s by its name", container.Name) 702 } else if c.ID != containerID { 703 log.Fatalf("Looking up container name %s returned id %s instead of %s", container.Name, c.ID, containerID) 704 } 705 } 706 707 func TestContainerNameValidation(t *testing.T) { 708 eng := NewTestEngine(t) 709 daemon := mkDaemonFromEngine(eng, t) 710 defer nuke(daemon) 711 712 for _, test := range []struct { 713 Name string 714 Valid bool 715 }{ 716 {"abc-123_AAA.1", true}, 717 {"\000asdf", false}, 718 } { 719 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 720 if err != nil { 721 if !test.Valid { 722 continue 723 } 724 t.Fatal(err) 725 } 726 727 var outputBuffer = bytes.NewBuffer(nil) 728 job := eng.Job("create", test.Name) 729 if err := job.ImportEnv(config); err != nil { 730 t.Fatal(err) 731 } 732 job.Stdout.Add(outputBuffer) 733 if err := job.Run(); err != nil { 734 if !test.Valid { 735 continue 736 } 737 t.Fatal(err) 738 } 739 740 container := daemon.Get(engine.Tail(outputBuffer, 1)) 741 742 if container.Name != "/"+test.Name { 743 t.Fatalf("Expect /%s got %s", test.Name, container.Name) 744 } 745 746 if c := daemon.Get("/" + test.Name); c == nil { 747 t.Fatalf("Couldn't retrieve test container as /%s", test.Name) 748 } else if c.ID != container.ID { 749 t.Fatalf("Container /%s has ID %s instead of %s", test.Name, c.ID, container.ID) 750 } 751 } 752 753 } 754 755 func TestLinkChildContainer(t *testing.T) { 756 eng := NewTestEngine(t) 757 daemon := mkDaemonFromEngine(eng, t) 758 defer nuke(daemon) 759 760 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 761 if err != nil { 762 t.Fatal(err) 763 } 764 765 container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) 766 767 webapp, err := daemon.GetByName("/webapp") 768 if err != nil { 769 t.Fatal(err) 770 } 771 772 if webapp.ID != container.ID { 773 t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) 774 } 775 776 config, _, _, err = parseRun([]string{GetTestImage(daemon).ID, "echo test"}) 777 if err != nil { 778 t.Fatal(err) 779 } 780 781 childContainer := daemon.Get(createTestContainer(eng, config, t)) 782 783 if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { 784 t.Fatal(err) 785 } 786 787 // Get the child by it's new name 788 db, err := daemon.GetByName("/webapp/db") 789 if err != nil { 790 t.Fatal(err) 791 } 792 if db.ID != childContainer.ID { 793 t.Fatalf("Expect db id to match container id: %s != %s", db.ID, childContainer.ID) 794 } 795 } 796 797 func TestGetAllChildren(t *testing.T) { 798 eng := NewTestEngine(t) 799 daemon := mkDaemonFromEngine(eng, t) 800 defer nuke(daemon) 801 802 config, _, _, err := parseRun([]string{unitTestImageID, "echo test"}) 803 if err != nil { 804 t.Fatal(err) 805 } 806 807 container := daemon.Get(createNamedTestContainer(eng, config, t, "/webapp")) 808 809 webapp, err := daemon.GetByName("/webapp") 810 if err != nil { 811 t.Fatal(err) 812 } 813 814 if webapp.ID != container.ID { 815 t.Fatalf("Expect webapp id to match container id: %s != %s", webapp.ID, container.ID) 816 } 817 818 config, _, _, err = parseRun([]string{unitTestImageID, "echo test"}) 819 if err != nil { 820 t.Fatal(err) 821 } 822 823 childContainer := daemon.Get(createTestContainer(eng, config, t)) 824 825 if err := daemon.RegisterLink(webapp, childContainer, "db"); err != nil { 826 t.Fatal(err) 827 } 828 829 children, err := daemon.Children("/webapp") 830 if err != nil { 831 t.Fatal(err) 832 } 833 834 if children == nil { 835 t.Fatal("Children should not be nil") 836 } 837 if len(children) == 0 { 838 t.Fatal("Children should not be empty") 839 } 840 841 for key, value := range children { 842 if key != "/webapp/db" { 843 t.Fatalf("Expected /webapp/db got %s", key) 844 } 845 if value.ID != childContainer.ID { 846 t.Fatalf("Expected id %s got %s", childContainer.ID, value.ID) 847 } 848 } 849 } 850 851 func TestDestroyWithInitLayer(t *testing.T) { 852 daemon := mkDaemon(t) 853 defer nuke(daemon) 854 855 container, _, err := daemon.Create(&runconfig.Config{ 856 Image: GetTestImage(daemon).ID, 857 Cmd: []string{"ls", "-al"}, 858 }, 859 &runconfig.HostConfig{}, 860 "") 861 862 if err != nil { 863 t.Fatal(err) 864 } 865 // Destroy 866 if err := daemon.Destroy(container); err != nil { 867 t.Fatal(err) 868 } 869 870 // Make sure daemon.Exists() behaves correctly 871 if daemon.Exists("test_destroy") { 872 t.Fatalf("Exists() returned true") 873 } 874 875 // Make sure daemon.List() doesn't list the destroyed container 876 if len(daemon.List()) != 0 { 877 t.Fatalf("Expected 0 container, %v found", len(daemon.List())) 878 } 879 880 driver := daemon.Graph().Driver() 881 882 // Make sure that the container does not exist in the driver 883 if _, err := driver.Get(container.ID, ""); err == nil { 884 t.Fatal("Conttainer should not exist in the driver") 885 } 886 887 // Make sure that the init layer is removed from the driver 888 if _, err := driver.Get(fmt.Sprintf("%s-init", container.ID), ""); err == nil { 889 t.Fatal("Container's init layer should not exist in the driver") 890 } 891 }