github.com/chenchun/docker@v1.3.2-0.20150629222414-20467faf132b/daemon/container.go (about) 1 package daemon 2 3 import ( 4 "encoding/json" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "path/filepath" 11 "strings" 12 "sync" 13 "syscall" 14 "time" 15 16 "github.com/docker/libcontainer/label" 17 18 "github.com/Sirupsen/logrus" 19 "github.com/docker/docker/daemon/execdriver" 20 "github.com/docker/docker/daemon/logger" 21 "github.com/docker/docker/daemon/logger/jsonfilelog" 22 "github.com/docker/docker/daemon/network" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/nat" 25 "github.com/docker/docker/pkg/archive" 26 "github.com/docker/docker/pkg/broadcastwriter" 27 "github.com/docker/docker/pkg/ioutils" 28 "github.com/docker/docker/pkg/jsonlog" 29 "github.com/docker/docker/pkg/mount" 30 "github.com/docker/docker/pkg/promise" 31 "github.com/docker/docker/pkg/symlink" 32 "github.com/docker/docker/runconfig" 33 "github.com/docker/docker/volume" 34 ) 35 36 var ( 37 ErrNotATTY = errors.New("The PTY is not a file") 38 ErrNoTTY = errors.New("No PTY found") 39 ErrContainerStart = errors.New("The container failed to start. Unknown error") 40 ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") 41 ) 42 43 type StreamConfig struct { 44 stdout *broadcastwriter.BroadcastWriter 45 stderr *broadcastwriter.BroadcastWriter 46 stdin io.ReadCloser 47 stdinPipe io.WriteCloser 48 } 49 50 // CommonContainer holds the settings for a container which are applicable 51 // across all platforms supported by the daemon. 52 type CommonContainer struct { 53 StreamConfig 54 55 *State `json:"State"` // Needed for remote api version <= 1.11 56 root string // Path to the "home" of the container, including metadata. 57 basefs string // Path to the graphdriver mountpoint 58 59 ID string 60 Created time.Time 61 Path string 62 Args []string 63 Config *runconfig.Config 64 ImageID string `json:"Image"` 65 NetworkSettings *network.Settings 66 ResolvConfPath string 67 HostnamePath string 68 HostsPath string 69 LogPath string 70 Name string 71 Driver string 72 ExecDriver string 73 MountLabel, ProcessLabel string 74 RestartCount int 75 UpdateDns bool 76 77 MountPoints map[string]*mountPoint 78 Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility 79 VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility 80 81 hostConfig *runconfig.HostConfig 82 command *execdriver.Command 83 84 monitor *containerMonitor 85 execCommands *execStore 86 daemon *Daemon 87 // logDriver for closing 88 logDriver logger.Logger 89 logCopier *logger.Copier 90 } 91 92 func (container *Container) FromDisk() error { 93 pth, err := container.jsonPath() 94 if err != nil { 95 return err 96 } 97 98 jsonSource, err := os.Open(pth) 99 if err != nil { 100 return err 101 } 102 defer jsonSource.Close() 103 104 dec := json.NewDecoder(jsonSource) 105 106 // Load container settings 107 // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it 108 if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { 109 return err 110 } 111 112 if err := label.ReserveLabel(container.ProcessLabel); err != nil { 113 return err 114 } 115 return container.readHostConfig() 116 } 117 118 func (container *Container) toDisk() error { 119 data, err := json.Marshal(container) 120 if err != nil { 121 return err 122 } 123 124 pth, err := container.jsonPath() 125 if err != nil { 126 return err 127 } 128 129 if err := ioutil.WriteFile(pth, data, 0666); err != nil { 130 return err 131 } 132 133 return container.WriteHostConfig() 134 } 135 136 func (container *Container) ToDisk() error { 137 container.Lock() 138 err := container.toDisk() 139 container.Unlock() 140 return err 141 } 142 143 func (container *Container) readHostConfig() error { 144 container.hostConfig = &runconfig.HostConfig{} 145 // If the hostconfig file does not exist, do not read it. 146 // (We still have to initialize container.hostConfig, 147 // but that's OK, since we just did that above.) 148 pth, err := container.hostConfigPath() 149 if err != nil { 150 return err 151 } 152 153 _, err = os.Stat(pth) 154 if os.IsNotExist(err) { 155 return nil 156 } 157 158 f, err := os.Open(pth) 159 if err != nil { 160 return err 161 } 162 defer f.Close() 163 164 return json.NewDecoder(f).Decode(&container.hostConfig) 165 } 166 167 func (container *Container) WriteHostConfig() error { 168 data, err := json.Marshal(container.hostConfig) 169 if err != nil { 170 return err 171 } 172 173 pth, err := container.hostConfigPath() 174 if err != nil { 175 return err 176 } 177 178 return ioutil.WriteFile(pth, data, 0666) 179 } 180 181 func (container *Container) LogEvent(action string) { 182 d := container.daemon 183 d.EventsService.Log( 184 action, 185 container.ID, 186 container.Config.Image, 187 ) 188 } 189 190 // Evaluates `path` in the scope of the container's basefs, with proper path 191 // sanitisation. Symlinks are all scoped to the basefs of the container, as 192 // though the container's basefs was `/`. 193 // 194 // The basefs of a container is the host-facing path which is bind-mounted as 195 // `/` inside the container. This method is essentially used to access a 196 // particular path inside the container as though you were a process in that 197 // container. 198 // 199 // NOTE: The returned path is *only* safely scoped inside the container's basefs 200 // if no component of the returned path changes (such as a component 201 // symlinking to a different path) between using this method and using the 202 // path. See symlink.FollowSymlinkInScope for more details. 203 func (container *Container) GetResourcePath(path string) (string, error) { 204 // IMPORTANT - These are paths on the OS where the daemon is running, hence 205 // any filepath operations must be done in an OS agnostic way. 206 cleanPath := filepath.Join(string(os.PathSeparator), path) 207 r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) 208 return r, e 209 } 210 211 // Evaluates `path` in the scope of the container's root, with proper path 212 // sanitisation. Symlinks are all scoped to the root of the container, as 213 // though the container's root was `/`. 214 // 215 // The root of a container is the host-facing configuration metadata directory. 216 // Only use this method to safely access the container's `container.json` or 217 // other metadata files. If in doubt, use container.GetResourcePath. 218 // 219 // NOTE: The returned path is *only* safely scoped inside the container's root 220 // if no component of the returned path changes (such as a component 221 // symlinking to a different path) between using this method and using the 222 // path. See symlink.FollowSymlinkInScope for more details. 223 func (container *Container) GetRootResourcePath(path string) (string, error) { 224 // IMPORTANT - These are paths on the OS where the daemon is running, hence 225 // any filepath operations must be done in an OS agnostic way. 226 cleanPath := filepath.Join(string(os.PathSeparator), path) 227 return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) 228 } 229 230 func (container *Container) Start() (err error) { 231 container.Lock() 232 defer container.Unlock() 233 234 if container.Running { 235 return nil 236 } 237 238 if container.removalInProgress || container.Dead { 239 return fmt.Errorf("Container is marked for removal and cannot be started.") 240 } 241 242 // if we encounter an error during start we need to ensure that any other 243 // setup has been cleaned up properly 244 defer func() { 245 if err != nil { 246 container.setError(err) 247 // if no one else has set it, make sure we don't leave it at zero 248 if container.ExitCode == 0 { 249 container.ExitCode = 128 250 } 251 container.toDisk() 252 container.cleanup() 253 container.LogEvent("die") 254 } 255 }() 256 257 if err := container.Mount(); err != nil { 258 return err 259 } 260 if err := container.initializeNetworking(); err != nil { 261 return err 262 } 263 linkedEnv, err := container.setupLinkedContainers() 264 if err != nil { 265 return err 266 } 267 if err := container.setupWorkingDirectory(); err != nil { 268 return err 269 } 270 env := container.createDaemonEnvironment(linkedEnv) 271 if err := populateCommand(container, env); err != nil { 272 return err 273 } 274 275 mounts, err := container.setupMounts() 276 if err != nil { 277 return err 278 } 279 280 container.command.Mounts = mounts 281 return container.waitForStart() 282 } 283 284 func (container *Container) Run() error { 285 if err := container.Start(); err != nil { 286 return err 287 } 288 container.WaitStop(-1 * time.Second) 289 return nil 290 } 291 292 func (container *Container) Output() (output []byte, err error) { 293 pipe := container.StdoutPipe() 294 defer pipe.Close() 295 if err := container.Start(); err != nil { 296 return nil, err 297 } 298 output, err = ioutil.ReadAll(pipe) 299 container.WaitStop(-1 * time.Second) 300 return output, err 301 } 302 303 // StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data 304 // to the standard input of the container's active process. 305 // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser 306 // which can be used to retrieve the standard output (and error) generated 307 // by the container's active process. The output (and error) are actually 308 // copied and delivered to all StdoutPipe and StderrPipe consumers, using 309 // a kind of "broadcaster". 310 311 func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { 312 return streamConfig.stdinPipe 313 } 314 315 func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { 316 reader, writer := io.Pipe() 317 streamConfig.stdout.AddWriter(writer, "") 318 return ioutils.NewBufReader(reader) 319 } 320 321 func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { 322 reader, writer := io.Pipe() 323 streamConfig.stderr.AddWriter(writer, "") 324 return ioutils.NewBufReader(reader) 325 } 326 327 func (streamConfig *StreamConfig) StdoutLogPipe() io.ReadCloser { 328 reader, writer := io.Pipe() 329 streamConfig.stdout.AddWriter(writer, "stdout") 330 return ioutils.NewBufReader(reader) 331 } 332 333 func (streamConfig *StreamConfig) StderrLogPipe() io.ReadCloser { 334 reader, writer := io.Pipe() 335 streamConfig.stderr.AddWriter(writer, "stderr") 336 return ioutils.NewBufReader(reader) 337 } 338 339 func (container *Container) isNetworkAllocated() bool { 340 return container.NetworkSettings.IPAddress != "" 341 } 342 343 // cleanup releases any network resources allocated to the container along with any rules 344 // around how containers are linked together. It also unmounts the container's root filesystem. 345 func (container *Container) cleanup() { 346 container.ReleaseNetwork() 347 348 disableAllActiveLinks(container) 349 350 if err := container.Unmount(); err != nil { 351 logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) 352 } 353 354 for _, eConfig := range container.execCommands.s { 355 container.daemon.unregisterExecCommand(eConfig) 356 } 357 358 container.UnmountVolumes(false) 359 } 360 361 func (container *Container) KillSig(sig int) error { 362 logrus.Debugf("Sending %d to %s", sig, container.ID) 363 container.Lock() 364 defer container.Unlock() 365 366 // We could unpause the container for them rather than returning this error 367 if container.Paused { 368 return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) 369 } 370 371 if !container.Running { 372 return fmt.Errorf("Container %s is not running", container.ID) 373 } 374 375 // signal to the monitor that it should not restart the container 376 // after we send the kill signal 377 container.monitor.ExitOnNext() 378 379 // if the container is currently restarting we do not need to send the signal 380 // to the process. Telling the monitor that it should exit on it's next event 381 // loop is enough 382 if container.Restarting { 383 return nil 384 } 385 386 if err := container.daemon.Kill(container, sig); err != nil { 387 return err 388 } 389 container.LogEvent("kill") 390 return nil 391 } 392 393 // Wrapper aroung KillSig() suppressing "no such process" error. 394 func (container *Container) killPossiblyDeadProcess(sig int) error { 395 err := container.KillSig(sig) 396 if err == syscall.ESRCH { 397 logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) 398 return nil 399 } 400 return err 401 } 402 403 func (container *Container) Pause() error { 404 container.Lock() 405 defer container.Unlock() 406 407 // We cannot Pause the container which is not running 408 if !container.Running { 409 return fmt.Errorf("Container %s is not running, cannot pause a non-running container", container.ID) 410 } 411 412 // We cannot Pause the container which is already paused 413 if container.Paused { 414 return fmt.Errorf("Container %s is already paused", container.ID) 415 } 416 417 if err := container.daemon.execDriver.Pause(container.command); err != nil { 418 return err 419 } 420 container.Paused = true 421 container.LogEvent("pause") 422 return nil 423 } 424 425 func (container *Container) Unpause() error { 426 container.Lock() 427 defer container.Unlock() 428 429 // We cannot unpause the container which is not running 430 if !container.Running { 431 return fmt.Errorf("Container %s is not running, cannot unpause a non-running container", container.ID) 432 } 433 434 // We cannot unpause the container which is not paused 435 if !container.Paused { 436 return fmt.Errorf("Container %s is not paused", container.ID) 437 } 438 439 if err := container.daemon.execDriver.Unpause(container.command); err != nil { 440 return err 441 } 442 container.Paused = false 443 container.LogEvent("unpause") 444 return nil 445 } 446 447 func (container *Container) Kill() error { 448 if !container.IsRunning() { 449 return fmt.Errorf("Container %s is not running", container.ID) 450 } 451 452 // 1. Send SIGKILL 453 if err := container.killPossiblyDeadProcess(9); err != nil { 454 // While normally we might "return err" here we're not going to 455 // because if we can't stop the container by this point then 456 // its probably because its already stopped. Meaning, between 457 // the time of the IsRunning() call above and now it stopped. 458 // Also, since the err return will be exec driver specific we can't 459 // look for any particular (common) error that would indicate 460 // that the process is already dead vs something else going wrong. 461 // So, instead we'll give it up to 2 more seconds to complete and if 462 // by that time the container is still running, then the error 463 // we got is probably valid and so we return it to the caller. 464 465 if container.IsRunning() { 466 container.WaitStop(2 * time.Second) 467 if container.IsRunning() { 468 return err 469 } 470 } 471 } 472 473 // 2. Wait for the process to die, in last resort, try to kill the process directly 474 if err := killProcessDirectly(container); err != nil { 475 return err 476 } 477 478 container.WaitStop(-1 * time.Second) 479 return nil 480 } 481 482 func (container *Container) Stop(seconds int) error { 483 if !container.IsRunning() { 484 return nil 485 } 486 487 // 1. Send a SIGTERM 488 if err := container.killPossiblyDeadProcess(15); err != nil { 489 logrus.Infof("Failed to send SIGTERM to the process, force killing") 490 if err := container.killPossiblyDeadProcess(9); err != nil { 491 return err 492 } 493 } 494 495 // 2. Wait for the process to exit on its own 496 if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { 497 logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) 498 // 3. If it doesn't, then send SIGKILL 499 if err := container.Kill(); err != nil { 500 container.WaitStop(-1 * time.Second) 501 return err 502 } 503 } 504 505 container.LogEvent("stop") 506 return nil 507 } 508 509 func (container *Container) Restart(seconds int) error { 510 // Avoid unnecessarily unmounting and then directly mounting 511 // the container when the container stops and then starts 512 // again 513 if err := container.Mount(); err == nil { 514 defer container.Unmount() 515 } 516 517 if err := container.Stop(seconds); err != nil { 518 return err 519 } 520 521 if err := container.Start(); err != nil { 522 return err 523 } 524 525 container.LogEvent("restart") 526 return nil 527 } 528 529 func (container *Container) Resize(h, w int) error { 530 if !container.IsRunning() { 531 return fmt.Errorf("Cannot resize container %s, container is not running", container.ID) 532 } 533 if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil { 534 return err 535 } 536 container.LogEvent("resize") 537 return nil 538 } 539 540 func (container *Container) Export() (archive.Archive, error) { 541 if err := container.Mount(); err != nil { 542 return nil, err 543 } 544 545 archive, err := archive.Tar(container.basefs, archive.Uncompressed) 546 if err != nil { 547 container.Unmount() 548 return nil, err 549 } 550 arch := ioutils.NewReadCloserWrapper(archive, func() error { 551 err := archive.Close() 552 container.Unmount() 553 return err 554 }) 555 container.LogEvent("export") 556 return arch, err 557 } 558 559 func (container *Container) Mount() error { 560 return container.daemon.Mount(container) 561 } 562 563 func (container *Container) changes() ([]archive.Change, error) { 564 return container.daemon.Changes(container) 565 } 566 567 func (container *Container) Changes() ([]archive.Change, error) { 568 container.Lock() 569 defer container.Unlock() 570 return container.changes() 571 } 572 573 func (container *Container) GetImage() (*image.Image, error) { 574 if container.daemon == nil { 575 return nil, fmt.Errorf("Can't get image of unregistered container") 576 } 577 return container.daemon.graph.Get(container.ImageID) 578 } 579 580 func (container *Container) Unmount() error { 581 return container.daemon.Unmount(container) 582 } 583 584 func (container *Container) hostConfigPath() (string, error) { 585 return container.GetRootResourcePath("hostconfig.json") 586 } 587 588 func (container *Container) jsonPath() (string, error) { 589 return container.GetRootResourcePath("config.json") 590 } 591 592 // This method must be exported to be used from the lxc template 593 // This directory is only usable when the container is running 594 func (container *Container) RootfsPath() string { 595 return container.basefs 596 } 597 598 func validateID(id string) error { 599 if id == "" { 600 return fmt.Errorf("Invalid empty id") 601 } 602 return nil 603 } 604 605 func (container *Container) Copy(resource string) (io.ReadCloser, error) { 606 container.Lock() 607 defer container.Unlock() 608 var err error 609 if err := container.Mount(); err != nil { 610 return nil, err 611 } 612 defer func() { 613 if err != nil { 614 // unmount any volumes 615 container.UnmountVolumes(true) 616 // unmount the container's rootfs 617 container.Unmount() 618 } 619 }() 620 mounts, err := container.setupMounts() 621 if err != nil { 622 return nil, err 623 } 624 for _, m := range mounts { 625 dest, err := container.GetResourcePath(m.Destination) 626 if err != nil { 627 return nil, err 628 } 629 if err := mount.Mount(m.Source, dest, "bind", "rbind,ro"); err != nil { 630 return nil, err 631 } 632 } 633 basePath, err := container.GetResourcePath(resource) 634 if err != nil { 635 return nil, err 636 } 637 stat, err := os.Stat(basePath) 638 if err != nil { 639 return nil, err 640 } 641 var filter []string 642 if !stat.IsDir() { 643 d, f := filepath.Split(basePath) 644 basePath = d 645 filter = []string{f} 646 } else { 647 filter = []string{filepath.Base(basePath)} 648 basePath = filepath.Dir(basePath) 649 } 650 archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ 651 Compression: archive.Uncompressed, 652 IncludeFiles: filter, 653 }) 654 if err != nil { 655 return nil, err 656 } 657 reader := ioutils.NewReadCloserWrapper(archive, func() error { 658 err := archive.Close() 659 container.UnmountVolumes(true) 660 container.Unmount() 661 return err 662 }) 663 container.LogEvent("copy") 664 return reader, nil 665 } 666 667 // Returns true if the container exposes a certain port 668 func (container *Container) Exposes(p nat.Port) bool { 669 _, exists := container.Config.ExposedPorts[p] 670 return exists 671 } 672 673 func (container *Container) HostConfig() *runconfig.HostConfig { 674 return container.hostConfig 675 } 676 677 func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { 678 container.hostConfig = hostConfig 679 } 680 681 func (container *Container) getLogConfig() runconfig.LogConfig { 682 cfg := container.hostConfig.LogConfig 683 if cfg.Type != "" { // container has log driver configured 684 return cfg 685 } 686 // Use daemon's default log config for containers 687 return container.daemon.defaultLogConfig 688 } 689 690 func (container *Container) getLogger() (logger.Logger, error) { 691 cfg := container.getLogConfig() 692 c, err := logger.GetLogDriver(cfg.Type) 693 if err != nil { 694 return nil, fmt.Errorf("Failed to get logging factory: %v", err) 695 } 696 ctx := logger.Context{ 697 Config: cfg.Config, 698 ContainerID: container.ID, 699 ContainerName: container.Name, 700 ContainerEntrypoint: container.Path, 701 ContainerArgs: container.Args, 702 ContainerImageID: container.ImageID, 703 ContainerImageName: container.Config.Image, 704 ContainerCreated: container.Created, 705 } 706 707 // Set logging file for "json-logger" 708 if cfg.Type == jsonfilelog.Name { 709 ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) 710 if err != nil { 711 return nil, err 712 } 713 } 714 return c(ctx) 715 } 716 717 func (container *Container) startLogging() error { 718 cfg := container.getLogConfig() 719 if cfg.Type == "none" { 720 return nil // do not start logging routines 721 } 722 723 l, err := container.getLogger() 724 if err != nil { 725 return fmt.Errorf("Failed to initialize logging driver: %v", err) 726 } 727 728 copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) 729 if err != nil { 730 return err 731 } 732 container.logCopier = copier 733 copier.Run() 734 container.logDriver = l 735 736 // set LogPath field only for json-file logdriver 737 if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { 738 container.LogPath = jl.LogPath() 739 } 740 741 return nil 742 } 743 744 func (container *Container) waitForStart() error { 745 container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) 746 747 // block until we either receive an error from the initial start of the container's 748 // process or until the process is running in the container 749 select { 750 case <-container.monitor.startSignal: 751 case err := <-promise.Go(container.monitor.Start): 752 return err 753 } 754 755 return nil 756 } 757 758 func (container *Container) GetProcessLabel() string { 759 // even if we have a process label return "" if we are running 760 // in privileged mode 761 if container.hostConfig.Privileged { 762 return "" 763 } 764 return container.ProcessLabel 765 } 766 767 func (container *Container) GetMountLabel() string { 768 if container.hostConfig.Privileged { 769 return "" 770 } 771 return container.MountLabel 772 } 773 774 func (container *Container) Stats() (*execdriver.ResourceStats, error) { 775 return container.daemon.Stats(container) 776 } 777 778 func (c *Container) LogDriverType() string { 779 c.Lock() 780 defer c.Unlock() 781 if c.hostConfig.LogConfig.Type == "" { 782 return c.daemon.defaultLogConfig.Type 783 } 784 return c.hostConfig.LogConfig.Type 785 } 786 787 func (container *Container) GetExecIDs() []string { 788 return container.execCommands.List() 789 } 790 791 func (container *Container) Exec(execConfig *execConfig) error { 792 container.Lock() 793 defer container.Unlock() 794 795 waitStart := make(chan struct{}) 796 797 callback := func(processConfig *execdriver.ProcessConfig, pid int) { 798 if processConfig.Tty { 799 // The callback is called after the process Start() 800 // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave 801 // which we close here. 802 if c, ok := processConfig.Stdout.(io.Closer); ok { 803 c.Close() 804 } 805 } 806 close(waitStart) 807 } 808 809 // We use a callback here instead of a goroutine and an chan for 810 // syncronization purposes 811 cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) 812 813 // Exec should not return until the process is actually running 814 select { 815 case <-waitStart: 816 case err := <-cErr: 817 return err 818 } 819 820 return nil 821 } 822 823 func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { 824 var ( 825 err error 826 exitCode int 827 ) 828 829 pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) 830 exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) 831 if err != nil { 832 logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) 833 } 834 835 logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) 836 if execConfig.OpenStdin { 837 if err := execConfig.StreamConfig.stdin.Close(); err != nil { 838 logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) 839 } 840 } 841 if err := execConfig.StreamConfig.stdout.Clean(); err != nil { 842 logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) 843 } 844 if err := execConfig.StreamConfig.stderr.Clean(); err != nil { 845 logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) 846 } 847 if execConfig.ProcessConfig.Terminal != nil { 848 if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { 849 logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) 850 } 851 } 852 853 return err 854 } 855 856 func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { 857 return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr) 858 } 859 860 func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { 861 862 if logs { 863 logDriver, err := c.getLogger() 864 cLog, err := logDriver.GetReader() 865 866 if err != nil { 867 logrus.Errorf("Error reading logs: %s", err) 868 } else if c.LogDriverType() != jsonfilelog.Name { 869 logrus.Errorf("Reading logs not implemented for driver %s", c.LogDriverType()) 870 } else { 871 dec := json.NewDecoder(cLog) 872 for { 873 l := &jsonlog.JSONLog{} 874 875 if err := dec.Decode(l); err == io.EOF { 876 break 877 } else if err != nil { 878 logrus.Errorf("Error streaming logs: %s", err) 879 break 880 } 881 if l.Stream == "stdout" && stdout != nil { 882 io.WriteString(stdout, l.Log) 883 } 884 if l.Stream == "stderr" && stderr != nil { 885 io.WriteString(stderr, l.Log) 886 } 887 } 888 } 889 } 890 891 c.LogEvent("attach") 892 893 //stream 894 if stream { 895 var stdinPipe io.ReadCloser 896 if stdin != nil { 897 r, w := io.Pipe() 898 go func() { 899 defer w.Close() 900 defer logrus.Debugf("Closing buffered stdin pipe") 901 io.Copy(w, stdin) 902 }() 903 stdinPipe = r 904 } 905 <-c.Attach(stdinPipe, stdout, stderr) 906 // If we are in stdinonce mode, wait for the process to end 907 // otherwise, simply return 908 if c.Config.StdinOnce && !c.Config.Tty { 909 c.WaitStop(-1 * time.Second) 910 } 911 } 912 return nil 913 } 914 915 func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { 916 var ( 917 cStdout, cStderr io.ReadCloser 918 cStdin io.WriteCloser 919 wg sync.WaitGroup 920 errors = make(chan error, 3) 921 ) 922 923 if stdin != nil && openStdin { 924 cStdin = streamConfig.StdinPipe() 925 wg.Add(1) 926 } 927 928 if stdout != nil { 929 cStdout = streamConfig.StdoutPipe() 930 wg.Add(1) 931 } 932 933 if stderr != nil { 934 cStderr = streamConfig.StderrPipe() 935 wg.Add(1) 936 } 937 938 // Connect stdin of container to the http conn. 939 go func() { 940 if stdin == nil || !openStdin { 941 return 942 } 943 logrus.Debugf("attach: stdin: begin") 944 defer func() { 945 if stdinOnce && !tty { 946 cStdin.Close() 947 } else { 948 // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr 949 if cStdout != nil { 950 cStdout.Close() 951 } 952 if cStderr != nil { 953 cStderr.Close() 954 } 955 } 956 wg.Done() 957 logrus.Debugf("attach: stdin: end") 958 }() 959 960 var err error 961 if tty { 962 _, err = copyEscapable(cStdin, stdin) 963 } else { 964 _, err = io.Copy(cStdin, stdin) 965 966 } 967 if err == io.ErrClosedPipe { 968 err = nil 969 } 970 if err != nil { 971 logrus.Errorf("attach: stdin: %s", err) 972 errors <- err 973 return 974 } 975 }() 976 977 attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { 978 if stream == nil { 979 return 980 } 981 defer func() { 982 // Make sure stdin gets closed 983 if stdin != nil { 984 stdin.Close() 985 } 986 streamPipe.Close() 987 wg.Done() 988 logrus.Debugf("attach: %s: end", name) 989 }() 990 991 logrus.Debugf("attach: %s: begin", name) 992 _, err := io.Copy(stream, streamPipe) 993 if err == io.ErrClosedPipe { 994 err = nil 995 } 996 if err != nil { 997 logrus.Errorf("attach: %s: %v", name, err) 998 errors <- err 999 } 1000 } 1001 1002 go attachStream("stdout", stdout, cStdout) 1003 go attachStream("stderr", stderr, cStderr) 1004 1005 return promise.Go(func() error { 1006 wg.Wait() 1007 close(errors) 1008 for err := range errors { 1009 if err != nil { 1010 return err 1011 } 1012 } 1013 return nil 1014 }) 1015 } 1016 1017 // Code c/c from io.Copy() modified to handle escape sequence 1018 func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { 1019 buf := make([]byte, 32*1024) 1020 for { 1021 nr, er := src.Read(buf) 1022 if nr > 0 { 1023 // ---- Docker addition 1024 // char 16 is C-p 1025 if nr == 1 && buf[0] == 16 { 1026 nr, er = src.Read(buf) 1027 // char 17 is C-q 1028 if nr == 1 && buf[0] == 17 { 1029 if err := src.Close(); err != nil { 1030 return 0, err 1031 } 1032 return 0, nil 1033 } 1034 } 1035 // ---- End of docker 1036 nw, ew := dst.Write(buf[0:nr]) 1037 if nw > 0 { 1038 written += int64(nw) 1039 } 1040 if ew != nil { 1041 err = ew 1042 break 1043 } 1044 if nr != nw { 1045 err = io.ErrShortWrite 1046 break 1047 } 1048 } 1049 if er == io.EOF { 1050 break 1051 } 1052 if er != nil { 1053 err = er 1054 break 1055 } 1056 } 1057 return written, err 1058 } 1059 1060 func (container *Container) networkMounts() []execdriver.Mount { 1061 var mounts []execdriver.Mount 1062 if container.ResolvConfPath != "" { 1063 label.SetFileLabel(container.ResolvConfPath, container.MountLabel) 1064 mounts = append(mounts, execdriver.Mount{ 1065 Source: container.ResolvConfPath, 1066 Destination: "/etc/resolv.conf", 1067 Writable: !container.hostConfig.ReadonlyRootfs, 1068 Private: true, 1069 }) 1070 } 1071 if container.HostnamePath != "" { 1072 label.SetFileLabel(container.HostnamePath, container.MountLabel) 1073 mounts = append(mounts, execdriver.Mount{ 1074 Source: container.HostnamePath, 1075 Destination: "/etc/hostname", 1076 Writable: !container.hostConfig.ReadonlyRootfs, 1077 Private: true, 1078 }) 1079 } 1080 if container.HostsPath != "" { 1081 label.SetFileLabel(container.HostsPath, container.MountLabel) 1082 mounts = append(mounts, execdriver.Mount{ 1083 Source: container.HostsPath, 1084 Destination: "/etc/hosts", 1085 Writable: !container.hostConfig.ReadonlyRootfs, 1086 Private: true, 1087 }) 1088 } 1089 return mounts 1090 } 1091 1092 func (container *Container) addBindMountPoint(name, source, destination string, rw bool) { 1093 container.MountPoints[destination] = &mountPoint{ 1094 Name: name, 1095 Source: source, 1096 Destination: destination, 1097 RW: rw, 1098 } 1099 } 1100 1101 func (container *Container) addLocalMountPoint(name, destination string, rw bool) { 1102 container.MountPoints[destination] = &mountPoint{ 1103 Name: name, 1104 Driver: volume.DefaultDriverName, 1105 Destination: destination, 1106 RW: rw, 1107 } 1108 } 1109 1110 func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) { 1111 container.MountPoints[destination] = &mountPoint{ 1112 Name: vol.Name(), 1113 Driver: vol.DriverName(), 1114 Destination: destination, 1115 RW: rw, 1116 Volume: vol, 1117 } 1118 } 1119 1120 func (container *Container) isDestinationMounted(destination string) bool { 1121 return container.MountPoints[destination] != nil 1122 } 1123 1124 func (container *Container) prepareMountPoints() error { 1125 for _, config := range container.MountPoints { 1126 if len(config.Driver) > 0 { 1127 v, err := createVolume(config.Name, config.Driver) 1128 if err != nil { 1129 return err 1130 } 1131 config.Volume = v 1132 } 1133 } 1134 return nil 1135 } 1136 1137 func (container *Container) removeMountPoints() error { 1138 for _, m := range container.MountPoints { 1139 if m.Volume != nil { 1140 if err := removeVolume(m.Volume); err != nil { 1141 return err 1142 } 1143 } 1144 } 1145 return nil 1146 } 1147 1148 func (container *Container) shouldRestart() bool { 1149 return container.hostConfig.RestartPolicy.Name == "always" || 1150 (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) 1151 } 1152 1153 func (container *Container) copyImagePathContent(v volume.Volume, destination string) error { 1154 rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) 1155 if err != nil { 1156 return err 1157 } 1158 1159 if _, err = ioutil.ReadDir(rootfs); err != nil { 1160 if os.IsNotExist(err) { 1161 return nil 1162 } 1163 return err 1164 } 1165 1166 path, err := v.Mount() 1167 if err != nil { 1168 return err 1169 } 1170 1171 if err := copyExistingContents(rootfs, path); err != nil { 1172 return err 1173 } 1174 1175 return v.Unmount() 1176 }