github.com/torfuzx/docker@v1.8.1/daemon/container.go (about) 1 package daemon 2 3 import ( 4 "encoding/json" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "path/filepath" 11 "strings" 12 "sync" 13 "syscall" 14 "time" 15 16 "github.com/opencontainers/runc/libcontainer/label" 17 18 "github.com/Sirupsen/logrus" 19 "github.com/docker/docker/daemon/execdriver" 20 "github.com/docker/docker/daemon/logger" 21 "github.com/docker/docker/daemon/logger/jsonfilelog" 22 "github.com/docker/docker/daemon/network" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/pkg/archive" 25 "github.com/docker/docker/pkg/broadcastwriter" 26 "github.com/docker/docker/pkg/fileutils" 27 "github.com/docker/docker/pkg/ioutils" 28 "github.com/docker/docker/pkg/mount" 29 "github.com/docker/docker/pkg/nat" 30 "github.com/docker/docker/pkg/promise" 31 "github.com/docker/docker/pkg/symlink" 32 "github.com/docker/docker/runconfig" 33 "github.com/docker/docker/volume" 34 ) 35 36 var ( 37 ErrNotATTY = errors.New("The PTY is not a file") 38 ErrNoTTY = errors.New("No PTY found") 39 ErrContainerStart = errors.New("The container failed to start. Unknown error") 40 ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.") 41 ErrContainerRootfsReadonly = errors.New("container rootfs is marked read-only") 42 ) 43 44 type ErrContainerNotRunning struct { 45 id string 46 } 47 48 func (e ErrContainerNotRunning) Error() string { 49 return fmt.Sprintf("Container %s is not running", e.id) 50 } 51 52 type StreamConfig struct { 53 stdout *broadcastwriter.BroadcastWriter 54 stderr *broadcastwriter.BroadcastWriter 55 stdin io.ReadCloser 56 stdinPipe io.WriteCloser 57 } 58 59 // CommonContainer holds the settings for a container which are applicable 60 // across all platforms supported by the daemon. 61 type CommonContainer struct { 62 StreamConfig 63 64 *State `json:"State"` // Needed for remote api version <= 1.11 65 root string // Path to the "home" of the container, including metadata. 66 basefs string // Path to the graphdriver mountpoint 67 68 ID string 69 Created time.Time 70 Path string 71 Args []string 72 Config *runconfig.Config 73 ImageID string `json:"Image"` 74 NetworkSettings *network.Settings 75 ResolvConfPath string 76 HostnamePath string 77 HostsPath string 78 LogPath string 79 Name string 80 Driver string 81 ExecDriver string 82 MountLabel, ProcessLabel string 83 RestartCount int 84 UpdateDns bool 85 HasBeenStartedBefore bool 86 87 MountPoints map[string]*mountPoint 88 Volumes map[string]string // Deprecated since 1.7, kept for backwards compatibility 89 VolumesRW map[string]bool // Deprecated since 1.7, kept for backwards compatibility 90 91 hostConfig *runconfig.HostConfig 92 command *execdriver.Command 93 94 monitor *containerMonitor 95 execCommands *execStore 96 daemon *Daemon 97 // logDriver for closing 98 logDriver logger.Logger 99 logCopier *logger.Copier 100 } 101 102 func (container *Container) FromDisk() error { 103 pth, err := container.jsonPath() 104 if err != nil { 105 return err 106 } 107 108 jsonSource, err := os.Open(pth) 109 if err != nil { 110 return err 111 } 112 defer jsonSource.Close() 113 114 dec := json.NewDecoder(jsonSource) 115 116 // Load container settings 117 // udp broke compat of docker.PortMapping, but it's not used when loading a container, we can skip it 118 if err := dec.Decode(container); err != nil && !strings.Contains(err.Error(), "docker.PortMapping") { 119 return err 120 } 121 122 if err := label.ReserveLabel(container.ProcessLabel); err != nil { 123 return err 124 } 125 return container.readHostConfig() 126 } 127 128 func (container *Container) toDisk() error { 129 data, err := json.Marshal(container) 130 if err != nil { 131 return err 132 } 133 134 pth, err := container.jsonPath() 135 if err != nil { 136 return err 137 } 138 139 if err := ioutil.WriteFile(pth, data, 0666); err != nil { 140 return err 141 } 142 143 return container.WriteHostConfig() 144 } 145 146 func (container *Container) ToDisk() error { 147 container.Lock() 148 err := container.toDisk() 149 container.Unlock() 150 return err 151 } 152 153 func (container *Container) readHostConfig() error { 154 container.hostConfig = &runconfig.HostConfig{} 155 // If the hostconfig file does not exist, do not read it. 156 // (We still have to initialize container.hostConfig, 157 // but that's OK, since we just did that above.) 158 pth, err := container.hostConfigPath() 159 if err != nil { 160 return err 161 } 162 163 _, err = os.Stat(pth) 164 if os.IsNotExist(err) { 165 return nil 166 } 167 168 f, err := os.Open(pth) 169 if err != nil { 170 return err 171 } 172 defer f.Close() 173 174 return json.NewDecoder(f).Decode(&container.hostConfig) 175 } 176 177 func (container *Container) WriteHostConfig() error { 178 data, err := json.Marshal(container.hostConfig) 179 if err != nil { 180 return err 181 } 182 183 pth, err := container.hostConfigPath() 184 if err != nil { 185 return err 186 } 187 188 return ioutil.WriteFile(pth, data, 0666) 189 } 190 191 func (container *Container) LogEvent(action string) { 192 d := container.daemon 193 d.EventsService.Log( 194 action, 195 container.ID, 196 container.Config.Image, 197 ) 198 } 199 200 // Evaluates `path` in the scope of the container's basefs, with proper path 201 // sanitisation. Symlinks are all scoped to the basefs of the container, as 202 // though the container's basefs was `/`. 203 // 204 // The basefs of a container is the host-facing path which is bind-mounted as 205 // `/` inside the container. This method is essentially used to access a 206 // particular path inside the container as though you were a process in that 207 // container. 208 // 209 // NOTE: The returned path is *only* safely scoped inside the container's basefs 210 // if no component of the returned path changes (such as a component 211 // symlinking to a different path) between using this method and using the 212 // path. See symlink.FollowSymlinkInScope for more details. 213 func (container *Container) GetResourcePath(path string) (string, error) { 214 // IMPORTANT - These are paths on the OS where the daemon is running, hence 215 // any filepath operations must be done in an OS agnostic way. 216 cleanPath := filepath.Join(string(os.PathSeparator), path) 217 r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) 218 return r, e 219 } 220 221 // Evaluates `path` in the scope of the container's root, with proper path 222 // sanitisation. Symlinks are all scoped to the root of the container, as 223 // though the container's root was `/`. 224 // 225 // The root of a container is the host-facing configuration metadata directory. 226 // Only use this method to safely access the container's `container.json` or 227 // other metadata files. If in doubt, use container.GetResourcePath. 228 // 229 // NOTE: The returned path is *only* safely scoped inside the container's root 230 // if no component of the returned path changes (such as a component 231 // symlinking to a different path) between using this method and using the 232 // path. See symlink.FollowSymlinkInScope for more details. 233 func (container *Container) GetRootResourcePath(path string) (string, error) { 234 // IMPORTANT - These are paths on the OS where the daemon is running, hence 235 // any filepath operations must be done in an OS agnostic way. 236 cleanPath := filepath.Join(string(os.PathSeparator), path) 237 return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) 238 } 239 240 func (container *Container) Start() (err error) { 241 container.Lock() 242 defer container.Unlock() 243 244 if container.Running { 245 return nil 246 } 247 248 if container.removalInProgress || container.Dead { 249 return fmt.Errorf("Container is marked for removal and cannot be started.") 250 } 251 252 // if we encounter an error during start we need to ensure that any other 253 // setup has been cleaned up properly 254 defer func() { 255 if err != nil { 256 container.setError(err) 257 // if no one else has set it, make sure we don't leave it at zero 258 if container.ExitCode == 0 { 259 container.ExitCode = 128 260 } 261 container.toDisk() 262 container.cleanup() 263 container.LogEvent("die") 264 } 265 }() 266 267 if err := container.Mount(); err != nil { 268 return err 269 } 270 271 // No-op if non-Windows. Once the container filesystem is mounted, 272 // prepare the layer to boot using the Windows driver. 273 if err := container.PrepareStorage(); err != nil { 274 return err 275 } 276 277 if err := container.initializeNetworking(); err != nil { 278 return err 279 } 280 linkedEnv, err := container.setupLinkedContainers() 281 if err != nil { 282 return err 283 } 284 if err := container.setupWorkingDirectory(); err != nil { 285 return err 286 } 287 env := container.createDaemonEnvironment(linkedEnv) 288 if err := populateCommand(container, env); err != nil { 289 return err 290 } 291 292 mounts, err := container.setupMounts() 293 if err != nil { 294 return err 295 } 296 297 container.command.Mounts = mounts 298 return container.waitForStart() 299 } 300 301 func (container *Container) Run() error { 302 if err := container.Start(); err != nil { 303 return err 304 } 305 container.HasBeenStartedBefore = true 306 container.WaitStop(-1 * time.Second) 307 return nil 308 } 309 310 func (container *Container) Output() (output []byte, err error) { 311 pipe := container.StdoutPipe() 312 defer pipe.Close() 313 if err := container.Start(); err != nil { 314 return nil, err 315 } 316 output, err = ioutil.ReadAll(pipe) 317 container.WaitStop(-1 * time.Second) 318 return output, err 319 } 320 321 // StreamConfig.StdinPipe returns a WriteCloser which can be used to feed data 322 // to the standard input of the container's active process. 323 // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser 324 // which can be used to retrieve the standard output (and error) generated 325 // by the container's active process. The output (and error) are actually 326 // copied and delivered to all StdoutPipe and StderrPipe consumers, using 327 // a kind of "broadcaster". 328 329 func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { 330 return streamConfig.stdinPipe 331 } 332 333 func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { 334 reader, writer := io.Pipe() 335 streamConfig.stdout.AddWriter(writer) 336 return ioutils.NewBufReader(reader) 337 } 338 339 func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { 340 reader, writer := io.Pipe() 341 streamConfig.stderr.AddWriter(writer) 342 return ioutils.NewBufReader(reader) 343 } 344 345 func (container *Container) isNetworkAllocated() bool { 346 return container.NetworkSettings.IPAddress != "" 347 } 348 349 // cleanup releases any network resources allocated to the container along with any rules 350 // around how containers are linked together. It also unmounts the container's root filesystem. 351 func (container *Container) cleanup() { 352 container.ReleaseNetwork() 353 354 disableAllActiveLinks(container) 355 356 if err := container.CleanupStorage(); err != nil { 357 logrus.Errorf("%v: Failed to cleanup storage: %v", container.ID, err) 358 } 359 360 if err := container.Unmount(); err != nil { 361 logrus.Errorf("%v: Failed to umount filesystem: %v", container.ID, err) 362 } 363 364 for _, eConfig := range container.execCommands.s { 365 container.daemon.unregisterExecCommand(eConfig) 366 } 367 368 container.UnmountVolumes(false) 369 } 370 371 func (container *Container) KillSig(sig int) error { 372 logrus.Debugf("Sending %d to %s", sig, container.ID) 373 container.Lock() 374 defer container.Unlock() 375 376 // We could unpause the container for them rather than returning this error 377 if container.Paused { 378 return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) 379 } 380 381 if !container.Running { 382 return ErrContainerNotRunning{container.ID} 383 } 384 385 // signal to the monitor that it should not restart the container 386 // after we send the kill signal 387 container.monitor.ExitOnNext() 388 389 // if the container is currently restarting we do not need to send the signal 390 // to the process. Telling the monitor that it should exit on it's next event 391 // loop is enough 392 if container.Restarting { 393 return nil 394 } 395 396 if err := container.daemon.Kill(container, sig); err != nil { 397 return err 398 } 399 container.LogEvent("kill") 400 return nil 401 } 402 403 // Wrapper aroung KillSig() suppressing "no such process" error. 404 func (container *Container) killPossiblyDeadProcess(sig int) error { 405 err := container.KillSig(sig) 406 if err == syscall.ESRCH { 407 logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.GetPid(), sig) 408 return nil 409 } 410 return err 411 } 412 413 func (container *Container) Pause() error { 414 container.Lock() 415 defer container.Unlock() 416 417 // We cannot Pause the container which is not running 418 if !container.Running { 419 return ErrContainerNotRunning{container.ID} 420 } 421 422 // We cannot Pause the container which is already paused 423 if container.Paused { 424 return fmt.Errorf("Container %s is already paused", container.ID) 425 } 426 427 if err := container.daemon.execDriver.Pause(container.command); err != nil { 428 return err 429 } 430 container.Paused = true 431 container.LogEvent("pause") 432 return nil 433 } 434 435 func (container *Container) Unpause() error { 436 container.Lock() 437 defer container.Unlock() 438 439 // We cannot unpause the container which is not running 440 if !container.Running { 441 return ErrContainerNotRunning{container.ID} 442 } 443 444 // We cannot unpause the container which is not paused 445 if !container.Paused { 446 return fmt.Errorf("Container %s is not paused", container.ID) 447 } 448 449 if err := container.daemon.execDriver.Unpause(container.command); err != nil { 450 return err 451 } 452 container.Paused = false 453 container.LogEvent("unpause") 454 return nil 455 } 456 457 func (container *Container) Kill() error { 458 if !container.IsRunning() { 459 return ErrContainerNotRunning{container.ID} 460 } 461 462 // 1. Send SIGKILL 463 if err := container.killPossiblyDeadProcess(9); err != nil { 464 // While normally we might "return err" here we're not going to 465 // because if we can't stop the container by this point then 466 // its probably because its already stopped. Meaning, between 467 // the time of the IsRunning() call above and now it stopped. 468 // Also, since the err return will be exec driver specific we can't 469 // look for any particular (common) error that would indicate 470 // that the process is already dead vs something else going wrong. 471 // So, instead we'll give it up to 2 more seconds to complete and if 472 // by that time the container is still running, then the error 473 // we got is probably valid and so we return it to the caller. 474 475 if container.IsRunning() { 476 container.WaitStop(2 * time.Second) 477 if container.IsRunning() { 478 return err 479 } 480 } 481 } 482 483 // 2. Wait for the process to die, in last resort, try to kill the process directly 484 if err := killProcessDirectly(container); err != nil { 485 return err 486 } 487 488 container.WaitStop(-1 * time.Second) 489 return nil 490 } 491 492 func (container *Container) Stop(seconds int) error { 493 if !container.IsRunning() { 494 return nil 495 } 496 497 // 1. Send a SIGTERM 498 if err := container.killPossiblyDeadProcess(15); err != nil { 499 logrus.Infof("Failed to send SIGTERM to the process, force killing") 500 if err := container.killPossiblyDeadProcess(9); err != nil { 501 return err 502 } 503 } 504 505 // 2. Wait for the process to exit on its own 506 if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { 507 logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) 508 // 3. If it doesn't, then send SIGKILL 509 if err := container.Kill(); err != nil { 510 container.WaitStop(-1 * time.Second) 511 return err 512 } 513 } 514 515 container.LogEvent("stop") 516 return nil 517 } 518 519 func (container *Container) Restart(seconds int) error { 520 // Avoid unnecessarily unmounting and then directly mounting 521 // the container when the container stops and then starts 522 // again 523 if err := container.Mount(); err == nil { 524 defer container.Unmount() 525 } 526 527 if err := container.Stop(seconds); err != nil { 528 return err 529 } 530 531 if err := container.Start(); err != nil { 532 return err 533 } 534 535 container.LogEvent("restart") 536 return nil 537 } 538 539 func (container *Container) Resize(h, w int) error { 540 if !container.IsRunning() { 541 return ErrContainerNotRunning{container.ID} 542 } 543 if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil { 544 return err 545 } 546 container.LogEvent("resize") 547 return nil 548 } 549 550 func (container *Container) Export() (archive.Archive, error) { 551 if err := container.Mount(); err != nil { 552 return nil, err 553 } 554 555 archive, err := archive.Tar(container.basefs, archive.Uncompressed) 556 if err != nil { 557 container.Unmount() 558 return nil, err 559 } 560 arch := ioutils.NewReadCloserWrapper(archive, func() error { 561 err := archive.Close() 562 container.Unmount() 563 return err 564 }) 565 container.LogEvent("export") 566 return arch, err 567 } 568 569 func (container *Container) Mount() error { 570 return container.daemon.Mount(container) 571 } 572 573 func (container *Container) changes() ([]archive.Change, error) { 574 return container.daemon.Changes(container) 575 } 576 577 func (container *Container) Changes() ([]archive.Change, error) { 578 container.Lock() 579 defer container.Unlock() 580 return container.changes() 581 } 582 583 func (container *Container) GetImage() (*image.Image, error) { 584 if container.daemon == nil { 585 return nil, fmt.Errorf("Can't get image of unregistered container") 586 } 587 return container.daemon.graph.Get(container.ImageID) 588 } 589 590 func (container *Container) Unmount() error { 591 return container.daemon.Unmount(container) 592 } 593 594 func (container *Container) hostConfigPath() (string, error) { 595 return container.GetRootResourcePath("hostconfig.json") 596 } 597 598 func (container *Container) jsonPath() (string, error) { 599 return container.GetRootResourcePath("config.json") 600 } 601 602 // This method must be exported to be used from the lxc template 603 // This directory is only usable when the container is running 604 func (container *Container) RootfsPath() string { 605 return container.basefs 606 } 607 608 func validateID(id string) error { 609 if id == "" { 610 return fmt.Errorf("Invalid empty id") 611 } 612 return nil 613 } 614 615 func (container *Container) Copy(resource string) (rc io.ReadCloser, err error) { 616 container.Lock() 617 618 defer func() { 619 if err != nil { 620 // Wait to unlock the container until the archive is fully read 621 // (see the ReadCloseWrapper func below) or if there is an error 622 // before that occurs. 623 container.Unlock() 624 } 625 }() 626 627 if err := container.Mount(); err != nil { 628 return nil, err 629 } 630 631 defer func() { 632 if err != nil { 633 // unmount any volumes 634 container.UnmountVolumes(true) 635 // unmount the container's rootfs 636 container.Unmount() 637 } 638 }() 639 640 if err := container.mountVolumes(); err != nil { 641 return nil, err 642 } 643 644 basePath, err := container.GetResourcePath(resource) 645 if err != nil { 646 return nil, err 647 } 648 stat, err := os.Stat(basePath) 649 if err != nil { 650 return nil, err 651 } 652 var filter []string 653 if !stat.IsDir() { 654 d, f := filepath.Split(basePath) 655 basePath = d 656 filter = []string{f} 657 } else { 658 filter = []string{filepath.Base(basePath)} 659 basePath = filepath.Dir(basePath) 660 } 661 archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ 662 Compression: archive.Uncompressed, 663 IncludeFiles: filter, 664 }) 665 if err != nil { 666 return nil, err 667 } 668 669 if err := container.PrepareStorage(); err != nil { 670 container.Unmount() 671 return nil, err 672 } 673 674 reader := ioutils.NewReadCloserWrapper(archive, func() error { 675 err := archive.Close() 676 container.CleanupStorage() 677 container.UnmountVolumes(true) 678 container.Unmount() 679 container.Unlock() 680 return err 681 }) 682 container.LogEvent("copy") 683 return reader, nil 684 } 685 686 // Returns true if the container exposes a certain port 687 func (container *Container) Exposes(p nat.Port) bool { 688 _, exists := container.Config.ExposedPorts[p] 689 return exists 690 } 691 692 func (container *Container) HostConfig() *runconfig.HostConfig { 693 return container.hostConfig 694 } 695 696 func (container *Container) SetHostConfig(hostConfig *runconfig.HostConfig) { 697 container.hostConfig = hostConfig 698 } 699 700 func (container *Container) getLogConfig() runconfig.LogConfig { 701 cfg := container.hostConfig.LogConfig 702 if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured 703 if cfg.Type == "" { 704 cfg.Type = jsonfilelog.Name 705 } 706 return cfg 707 } 708 // Use daemon's default log config for containers 709 return container.daemon.defaultLogConfig 710 } 711 712 func (container *Container) getLogger() (logger.Logger, error) { 713 if container.logDriver != nil && container.IsRunning() { 714 return container.logDriver, nil 715 } 716 cfg := container.getLogConfig() 717 if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { 718 return nil, err 719 } 720 c, err := logger.GetLogDriver(cfg.Type) 721 if err != nil { 722 return nil, fmt.Errorf("Failed to get logging factory: %v", err) 723 } 724 ctx := logger.Context{ 725 Config: cfg.Config, 726 ContainerID: container.ID, 727 ContainerName: container.Name, 728 ContainerEntrypoint: container.Path, 729 ContainerArgs: container.Args, 730 ContainerImageID: container.ImageID, 731 ContainerImageName: container.Config.Image, 732 ContainerCreated: container.Created, 733 } 734 735 // Set logging file for "json-logger" 736 if cfg.Type == jsonfilelog.Name { 737 ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) 738 if err != nil { 739 return nil, err 740 } 741 } 742 return c(ctx) 743 } 744 745 func (container *Container) startLogging() error { 746 cfg := container.getLogConfig() 747 if cfg.Type == "none" { 748 return nil // do not start logging routines 749 } 750 751 l, err := container.getLogger() 752 if err != nil { 753 return fmt.Errorf("Failed to initialize logging driver: %v", err) 754 } 755 756 copier, err := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) 757 if err != nil { 758 return err 759 } 760 container.logCopier = copier 761 copier.Run() 762 container.logDriver = l 763 764 // set LogPath field only for json-file logdriver 765 if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { 766 container.LogPath = jl.LogPath() 767 } 768 769 return nil 770 } 771 772 func (container *Container) waitForStart() error { 773 container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) 774 775 // block until we either receive an error from the initial start of the container's 776 // process or until the process is running in the container 777 select { 778 case <-container.monitor.startSignal: 779 case err := <-promise.Go(container.monitor.Start): 780 return err 781 } 782 783 return nil 784 } 785 786 func (container *Container) GetProcessLabel() string { 787 // even if we have a process label return "" if we are running 788 // in privileged mode 789 if container.hostConfig.Privileged { 790 return "" 791 } 792 return container.ProcessLabel 793 } 794 795 func (container *Container) GetMountLabel() string { 796 if container.hostConfig.Privileged { 797 return "" 798 } 799 return container.MountLabel 800 } 801 802 func (container *Container) Stats() (*execdriver.ResourceStats, error) { 803 return container.daemon.Stats(container) 804 } 805 806 func (c *Container) LogDriverType() string { 807 c.Lock() 808 defer c.Unlock() 809 if c.hostConfig.LogConfig.Type == "" { 810 return c.daemon.defaultLogConfig.Type 811 } 812 return c.hostConfig.LogConfig.Type 813 } 814 815 func (container *Container) GetExecIDs() []string { 816 return container.execCommands.List() 817 } 818 819 func (container *Container) Exec(execConfig *execConfig) error { 820 container.Lock() 821 defer container.Unlock() 822 823 waitStart := make(chan struct{}) 824 825 callback := func(processConfig *execdriver.ProcessConfig, pid int) { 826 if processConfig.Tty { 827 // The callback is called after the process Start() 828 // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave 829 // which we close here. 830 if c, ok := processConfig.Stdout.(io.Closer); ok { 831 c.Close() 832 } 833 } 834 close(waitStart) 835 } 836 837 // We use a callback here instead of a goroutine and an chan for 838 // syncronization purposes 839 cErr := promise.Go(func() error { return container.monitorExec(execConfig, callback) }) 840 841 // Exec should not return until the process is actually running 842 select { 843 case <-waitStart: 844 case err := <-cErr: 845 return err 846 } 847 848 return nil 849 } 850 851 func (container *Container) monitorExec(execConfig *execConfig, callback execdriver.StartCallback) error { 852 var ( 853 err error 854 exitCode int 855 ) 856 pipes := execdriver.NewPipes(execConfig.StreamConfig.stdin, execConfig.StreamConfig.stdout, execConfig.StreamConfig.stderr, execConfig.OpenStdin) 857 exitCode, err = container.daemon.Exec(container, execConfig, pipes, callback) 858 if err != nil { 859 logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) 860 } 861 logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) 862 if execConfig.OpenStdin { 863 if err := execConfig.StreamConfig.stdin.Close(); err != nil { 864 logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) 865 } 866 } 867 if err := execConfig.StreamConfig.stdout.Clean(); err != nil { 868 logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) 869 } 870 if err := execConfig.StreamConfig.stderr.Clean(); err != nil { 871 logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) 872 } 873 if execConfig.ProcessConfig.Terminal != nil { 874 if err := execConfig.ProcessConfig.Terminal.Close(); err != nil { 875 logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) 876 } 877 } 878 // remove the exec command from the container's store only and not the 879 // daemon's store so that the exec command can be inspected. 880 container.execCommands.Delete(execConfig.ID) 881 return err 882 } 883 884 func (c *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { 885 return attach(&c.StreamConfig, c.Config.OpenStdin, c.Config.StdinOnce, c.Config.Tty, stdin, stdout, stderr) 886 } 887 888 func (c *Container) AttachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { 889 if logs { 890 logDriver, err := c.getLogger() 891 if err != nil { 892 return err 893 } 894 cLog, ok := logDriver.(logger.LogReader) 895 if !ok { 896 return logger.ErrReadLogsNotSupported 897 } 898 logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) 899 900 LogLoop: 901 for { 902 select { 903 case msg, ok := <-logs.Msg: 904 if !ok { 905 break LogLoop 906 } 907 if msg.Source == "stdout" && stdout != nil { 908 stdout.Write(msg.Line) 909 } 910 if msg.Source == "stderr" && stderr != nil { 911 stderr.Write(msg.Line) 912 } 913 case err := <-logs.Err: 914 logrus.Errorf("Error streaming logs: %v", err) 915 break LogLoop 916 } 917 } 918 } 919 920 c.LogEvent("attach") 921 922 //stream 923 if stream { 924 var stdinPipe io.ReadCloser 925 if stdin != nil { 926 r, w := io.Pipe() 927 go func() { 928 defer w.Close() 929 defer logrus.Debugf("Closing buffered stdin pipe") 930 io.Copy(w, stdin) 931 }() 932 stdinPipe = r 933 } 934 <-c.Attach(stdinPipe, stdout, stderr) 935 // If we are in stdinonce mode, wait for the process to end 936 // otherwise, simply return 937 if c.Config.StdinOnce && !c.Config.Tty { 938 c.WaitStop(-1 * time.Second) 939 } 940 } 941 return nil 942 } 943 944 func attach(streamConfig *StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { 945 var ( 946 cStdout, cStderr io.ReadCloser 947 cStdin io.WriteCloser 948 wg sync.WaitGroup 949 errors = make(chan error, 3) 950 ) 951 952 if stdin != nil && openStdin { 953 cStdin = streamConfig.StdinPipe() 954 wg.Add(1) 955 } 956 957 if stdout != nil { 958 cStdout = streamConfig.StdoutPipe() 959 wg.Add(1) 960 } 961 962 if stderr != nil { 963 cStderr = streamConfig.StderrPipe() 964 wg.Add(1) 965 } 966 967 // Connect stdin of container to the http conn. 968 go func() { 969 if stdin == nil || !openStdin { 970 return 971 } 972 logrus.Debugf("attach: stdin: begin") 973 defer func() { 974 if stdinOnce && !tty { 975 cStdin.Close() 976 } else { 977 // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr 978 if cStdout != nil { 979 cStdout.Close() 980 } 981 if cStderr != nil { 982 cStderr.Close() 983 } 984 } 985 wg.Done() 986 logrus.Debugf("attach: stdin: end") 987 }() 988 989 var err error 990 if tty { 991 _, err = copyEscapable(cStdin, stdin) 992 } else { 993 _, err = io.Copy(cStdin, stdin) 994 995 } 996 if err == io.ErrClosedPipe { 997 err = nil 998 } 999 if err != nil { 1000 logrus.Errorf("attach: stdin: %s", err) 1001 errors <- err 1002 return 1003 } 1004 }() 1005 1006 attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { 1007 if stream == nil { 1008 return 1009 } 1010 defer func() { 1011 // Make sure stdin gets closed 1012 if stdin != nil { 1013 stdin.Close() 1014 } 1015 streamPipe.Close() 1016 wg.Done() 1017 logrus.Debugf("attach: %s: end", name) 1018 }() 1019 1020 logrus.Debugf("attach: %s: begin", name) 1021 _, err := io.Copy(stream, streamPipe) 1022 if err == io.ErrClosedPipe { 1023 err = nil 1024 } 1025 if err != nil { 1026 logrus.Errorf("attach: %s: %v", name, err) 1027 errors <- err 1028 } 1029 } 1030 1031 go attachStream("stdout", stdout, cStdout) 1032 go attachStream("stderr", stderr, cStderr) 1033 1034 return promise.Go(func() error { 1035 wg.Wait() 1036 close(errors) 1037 for err := range errors { 1038 if err != nil { 1039 return err 1040 } 1041 } 1042 return nil 1043 }) 1044 } 1045 1046 // Code c/c from io.Copy() modified to handle escape sequence 1047 func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { 1048 buf := make([]byte, 32*1024) 1049 for { 1050 nr, er := src.Read(buf) 1051 if nr > 0 { 1052 // ---- Docker addition 1053 // char 16 is C-p 1054 if nr == 1 && buf[0] == 16 { 1055 nr, er = src.Read(buf) 1056 // char 17 is C-q 1057 if nr == 1 && buf[0] == 17 { 1058 if err := src.Close(); err != nil { 1059 return 0, err 1060 } 1061 return 0, nil 1062 } 1063 } 1064 // ---- End of docker 1065 nw, ew := dst.Write(buf[0:nr]) 1066 if nw > 0 { 1067 written += int64(nw) 1068 } 1069 if ew != nil { 1070 err = ew 1071 break 1072 } 1073 if nr != nw { 1074 err = io.ErrShortWrite 1075 break 1076 } 1077 } 1078 if er == io.EOF { 1079 break 1080 } 1081 if er != nil { 1082 err = er 1083 break 1084 } 1085 } 1086 return written, err 1087 } 1088 1089 func (container *Container) networkMounts() []execdriver.Mount { 1090 var mounts []execdriver.Mount 1091 mode := "Z" 1092 if container.hostConfig.NetworkMode.IsContainer() { 1093 mode = "z" 1094 } 1095 if container.ResolvConfPath != "" { 1096 label.Relabel(container.ResolvConfPath, container.MountLabel, mode) 1097 mounts = append(mounts, execdriver.Mount{ 1098 Source: container.ResolvConfPath, 1099 Destination: "/etc/resolv.conf", 1100 Writable: !container.hostConfig.ReadonlyRootfs, 1101 Private: true, 1102 }) 1103 } 1104 if container.HostnamePath != "" { 1105 label.Relabel(container.HostnamePath, container.MountLabel, mode) 1106 mounts = append(mounts, execdriver.Mount{ 1107 Source: container.HostnamePath, 1108 Destination: "/etc/hostname", 1109 Writable: !container.hostConfig.ReadonlyRootfs, 1110 Private: true, 1111 }) 1112 } 1113 if container.HostsPath != "" { 1114 label.Relabel(container.HostsPath, container.MountLabel, mode) 1115 mounts = append(mounts, execdriver.Mount{ 1116 Source: container.HostsPath, 1117 Destination: "/etc/hosts", 1118 Writable: !container.hostConfig.ReadonlyRootfs, 1119 Private: true, 1120 }) 1121 } 1122 return mounts 1123 } 1124 1125 func (container *Container) addBindMountPoint(name, source, destination string, rw bool) { 1126 container.MountPoints[destination] = &mountPoint{ 1127 Name: name, 1128 Source: source, 1129 Destination: destination, 1130 RW: rw, 1131 } 1132 } 1133 1134 func (container *Container) addLocalMountPoint(name, destination string, rw bool) { 1135 container.MountPoints[destination] = &mountPoint{ 1136 Name: name, 1137 Driver: volume.DefaultDriverName, 1138 Destination: destination, 1139 RW: rw, 1140 } 1141 } 1142 1143 func (container *Container) addMountPointWithVolume(destination string, vol volume.Volume, rw bool) { 1144 container.MountPoints[destination] = &mountPoint{ 1145 Name: vol.Name(), 1146 Driver: vol.DriverName(), 1147 Destination: destination, 1148 RW: rw, 1149 Volume: vol, 1150 } 1151 } 1152 1153 func (container *Container) isDestinationMounted(destination string) bool { 1154 return container.MountPoints[destination] != nil 1155 } 1156 1157 func (container *Container) prepareMountPoints() error { 1158 for _, config := range container.MountPoints { 1159 if len(config.Driver) > 0 { 1160 v, err := createVolume(config.Name, config.Driver) 1161 if err != nil { 1162 return err 1163 } 1164 config.Volume = v 1165 } 1166 } 1167 return nil 1168 } 1169 1170 func (container *Container) removeMountPoints() error { 1171 for _, m := range container.MountPoints { 1172 if m.Volume != nil { 1173 if err := removeVolume(m.Volume); err != nil { 1174 return err 1175 } 1176 } 1177 } 1178 return nil 1179 } 1180 1181 func (container *Container) shouldRestart() bool { 1182 return container.hostConfig.RestartPolicy.Name == "always" || 1183 (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) 1184 } 1185 1186 func (container *Container) mountVolumes() error { 1187 mounts, err := container.setupMounts() 1188 if err != nil { 1189 return err 1190 } 1191 1192 for _, m := range mounts { 1193 dest, err := container.GetResourcePath(m.Destination) 1194 if err != nil { 1195 return err 1196 } 1197 1198 var stat os.FileInfo 1199 stat, err = os.Stat(m.Source) 1200 if err != nil { 1201 return err 1202 } 1203 if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { 1204 return err 1205 } 1206 1207 opts := "rbind,ro" 1208 if m.Writable { 1209 opts = "rbind,rw" 1210 } 1211 1212 if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { 1213 return err 1214 } 1215 } 1216 1217 return nil 1218 } 1219 1220 func (container *Container) copyImagePathContent(v volume.Volume, destination string) error { 1221 rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) 1222 if err != nil { 1223 return err 1224 } 1225 1226 if _, err = ioutil.ReadDir(rootfs); err != nil { 1227 if os.IsNotExist(err) { 1228 return nil 1229 } 1230 return err 1231 } 1232 1233 path, err := v.Mount() 1234 if err != nil { 1235 return err 1236 } 1237 1238 if err := copyExistingContents(rootfs, path); err != nil { 1239 return err 1240 } 1241 1242 return v.Unmount() 1243 }