github.com/tompao/docker@v1.9.1/daemon/container.go (about) 1 package daemon 2 3 import ( 4 "encoding/json" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "path/filepath" 11 "sync" 12 "syscall" 13 "time" 14 15 "github.com/opencontainers/runc/libcontainer/label" 16 17 "github.com/Sirupsen/logrus" 18 "github.com/docker/docker/daemon/execdriver" 19 "github.com/docker/docker/daemon/logger" 20 "github.com/docker/docker/daemon/logger/jsonfilelog" 21 "github.com/docker/docker/daemon/network" 22 derr "github.com/docker/docker/errors" 23 "github.com/docker/docker/image" 24 "github.com/docker/docker/pkg/archive" 25 "github.com/docker/docker/pkg/broadcaster" 26 "github.com/docker/docker/pkg/fileutils" 27 "github.com/docker/docker/pkg/ioutils" 28 "github.com/docker/docker/pkg/mount" 29 "github.com/docker/docker/pkg/nat" 30 "github.com/docker/docker/pkg/promise" 31 "github.com/docker/docker/pkg/signal" 32 "github.com/docker/docker/pkg/symlink" 33 "github.com/docker/docker/runconfig" 34 "github.com/docker/docker/volume" 35 ) 36 37 var ( 38 // ErrRootFSReadOnly is returned when a container 39 // rootfs is marked readonly. 40 ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") 41 ) 42 43 type streamConfig struct { 44 stdout *broadcaster.Unbuffered 45 stderr *broadcaster.Unbuffered 46 stdin io.ReadCloser 47 stdinPipe io.WriteCloser 48 } 49 50 // CommonContainer holds the fields for a container which are 51 // applicable across all platforms supported by the daemon. 52 type CommonContainer struct { 53 streamConfig 54 // embed for Container to support states directly. 55 *State `json:"State"` // Needed for remote api version <= 1.11 56 root string // Path to the "home" of the container, including metadata. 57 basefs string // Path to the graphdriver mountpoint 58 ID string 59 Created time.Time 60 Path string 61 Args []string 62 Config *runconfig.Config 63 ImageID string `json:"Image"` 64 NetworkSettings *network.Settings 65 LogPath string 66 Name string 67 Driver string 68 ExecDriver string 69 // MountLabel contains the options for the 'mount' command 70 MountLabel string 71 ProcessLabel string 72 RestartCount int 73 HasBeenStartedBefore bool 74 HasBeenManuallyStopped bool // used for unless-stopped restart policy 75 hostConfig *runconfig.HostConfig 76 command *execdriver.Command 77 monitor *containerMonitor 78 execCommands *execStore 79 daemon *Daemon 80 // logDriver for closing 81 logDriver logger.Logger 82 logCopier *logger.Copier 83 } 84 85 func (container *Container) fromDisk() error { 86 pth, err := container.jsonPath() 87 if err != nil { 88 return err 89 } 90 91 jsonSource, err := os.Open(pth) 92 if err != nil { 93 return err 94 } 95 defer jsonSource.Close() 96 97 dec := json.NewDecoder(jsonSource) 98 99 // Load container settings 100 if err := dec.Decode(container); err != nil { 101 return err 102 } 103 104 if err := label.ReserveLabel(container.ProcessLabel); err != nil { 105 return err 106 } 107 return container.readHostConfig() 108 } 109 110 func (container *Container) toDisk() error { 111 data, err := json.Marshal(container) 112 if err != nil { 113 return err 114 } 115 116 pth, err := container.jsonPath() 117 if err != nil { 118 return err 119 } 120 121 if err := ioutil.WriteFile(pth, data, 0666); err != nil { 122 return err 123 } 124 125 return container.writeHostConfig() 126 } 127 128 func (container *Container) toDiskLocking() error { 129 container.Lock() 130 err := container.toDisk() 131 container.Unlock() 132 return err 133 } 134 135 func (container *Container) readHostConfig() error { 136 container.hostConfig = &runconfig.HostConfig{} 137 // If the hostconfig file does not exist, do not read it. 138 // (We still have to initialize container.hostConfig, 139 // but that's OK, since we just did that above.) 140 pth, err := container.hostConfigPath() 141 if err != nil { 142 return err 143 } 144 145 _, err = os.Stat(pth) 146 if os.IsNotExist(err) { 147 return nil 148 } 149 150 f, err := os.Open(pth) 151 if err != nil { 152 return err 153 } 154 defer f.Close() 155 156 if err := json.NewDecoder(f).Decode(&container.hostConfig); err != nil { 157 return err 158 } 159 160 // Make sure the dns fields are never nil. 161 // New containers don't ever have those fields nil, 162 // but pre created containers can still have those nil values. 163 // See https://github.com/docker/docker/pull/17779 164 // for a more detailed explanation on why we don't want that. 165 if container.hostConfig.DNS == nil { 166 container.hostConfig.DNS = make([]string, 0) 167 } 168 169 if container.hostConfig.DNSSearch == nil { 170 container.hostConfig.DNSSearch = make([]string, 0) 171 } 172 173 if container.hostConfig.DNSOptions == nil { 174 container.hostConfig.DNSOptions = make([]string, 0) 175 } 176 177 return nil 178 } 179 180 func (container *Container) writeHostConfig() error { 181 data, err := json.Marshal(container.hostConfig) 182 if err != nil { 183 return err 184 } 185 186 pth, err := container.hostConfigPath() 187 if err != nil { 188 return err 189 } 190 191 return ioutil.WriteFile(pth, data, 0666) 192 } 193 194 func (container *Container) logEvent(action string) { 195 d := container.daemon 196 d.EventsService.Log( 197 action, 198 container.ID, 199 container.Config.Image, 200 ) 201 } 202 203 // GetResourcePath evaluates `path` in the scope of the container's basefs, with proper path 204 // sanitisation. Symlinks are all scoped to the basefs of the container, as 205 // though the container's basefs was `/`. 206 // 207 // The basefs of a container is the host-facing path which is bind-mounted as 208 // `/` inside the container. This method is essentially used to access a 209 // particular path inside the container as though you were a process in that 210 // container. 211 // 212 // NOTE: The returned path is *only* safely scoped inside the container's basefs 213 // if no component of the returned path changes (such as a component 214 // symlinking to a different path) between using this method and using the 215 // path. See symlink.FollowSymlinkInScope for more details. 216 func (container *Container) GetResourcePath(path string) (string, error) { 217 // IMPORTANT - These are paths on the OS where the daemon is running, hence 218 // any filepath operations must be done in an OS agnostic way. 219 cleanPath := filepath.Join(string(os.PathSeparator), path) 220 r, e := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, cleanPath), container.basefs) 221 return r, e 222 } 223 224 // Evaluates `path` in the scope of the container's root, with proper path 225 // sanitisation. Symlinks are all scoped to the root of the container, as 226 // though the container's root was `/`. 227 // 228 // The root of a container is the host-facing configuration metadata directory. 229 // Only use this method to safely access the container's `container.json` or 230 // other metadata files. If in doubt, use container.GetResourcePath. 231 // 232 // NOTE: The returned path is *only* safely scoped inside the container's root 233 // if no component of the returned path changes (such as a component 234 // symlinking to a different path) between using this method and using the 235 // path. See symlink.FollowSymlinkInScope for more details. 236 func (container *Container) getRootResourcePath(path string) (string, error) { 237 // IMPORTANT - These are paths on the OS where the daemon is running, hence 238 // any filepath operations must be done in an OS agnostic way. 239 cleanPath := filepath.Join(string(os.PathSeparator), path) 240 return symlink.FollowSymlinkInScope(filepath.Join(container.root, cleanPath), container.root) 241 } 242 243 func (container *Container) exportContainerRw() (archive.Archive, error) { 244 if container.daemon == nil { 245 return nil, derr.ErrorCodeUnregisteredContainer.WithArgs(container.ID) 246 } 247 archive, err := container.daemon.diff(container) 248 if err != nil { 249 return nil, err 250 } 251 return ioutils.NewReadCloserWrapper(archive, func() error { 252 err := archive.Close() 253 return err 254 }), 255 nil 256 } 257 258 // Start prepares the container to run by setting up everything the 259 // container needs, such as storage and networking, as well as links 260 // between containers. The container is left waiting for a signal to 261 // begin running. 262 func (container *Container) Start() (err error) { 263 container.Lock() 264 defer container.Unlock() 265 266 if container.Running { 267 return nil 268 } 269 270 if container.removalInProgress || container.Dead { 271 return derr.ErrorCodeContainerBeingRemoved 272 } 273 274 // if we encounter an error during start we need to ensure that any other 275 // setup has been cleaned up properly 276 defer func() { 277 if err != nil { 278 container.setError(err) 279 // if no one else has set it, make sure we don't leave it at zero 280 if container.ExitCode == 0 { 281 container.ExitCode = 128 282 } 283 container.toDisk() 284 container.cleanup() 285 container.logEvent("die") 286 } 287 }() 288 289 if err := container.Mount(); err != nil { 290 return err 291 } 292 293 // Make sure NetworkMode has an acceptable value. We do this to ensure 294 // backwards API compatibility. 295 container.hostConfig = runconfig.SetDefaultNetModeIfBlank(container.hostConfig) 296 297 if err := container.initializeNetworking(); err != nil { 298 return err 299 } 300 linkedEnv, err := container.setupLinkedContainers() 301 if err != nil { 302 return err 303 } 304 if err := container.setupWorkingDirectory(); err != nil { 305 return err 306 } 307 env := container.createDaemonEnvironment(linkedEnv) 308 if err := populateCommand(container, env); err != nil { 309 return err 310 } 311 312 if !container.hostConfig.IpcMode.IsContainer() && !container.hostConfig.IpcMode.IsHost() { 313 if err := container.setupIpcDirs(); err != nil { 314 return err 315 } 316 } 317 318 mounts, err := container.setupMounts() 319 if err != nil { 320 return err 321 } 322 mounts = append(mounts, container.ipcMounts()...) 323 324 container.command.Mounts = mounts 325 return container.waitForStart() 326 } 327 328 // streamConfig.StdinPipe returns a WriteCloser which can be used to feed data 329 // to the standard input of the container's active process. 330 // Container.StdoutPipe and Container.StderrPipe each return a ReadCloser 331 // which can be used to retrieve the standard output (and error) generated 332 // by the container's active process. The output (and error) are actually 333 // copied and delivered to all StdoutPipe and StderrPipe consumers, using 334 // a kind of "broadcaster". 335 336 func (streamConfig *streamConfig) StdinPipe() io.WriteCloser { 337 return streamConfig.stdinPipe 338 } 339 340 func (streamConfig *streamConfig) StdoutPipe() io.ReadCloser { 341 reader, writer := io.Pipe() 342 streamConfig.stdout.Add(writer) 343 return ioutils.NewBufReader(reader) 344 } 345 346 func (streamConfig *streamConfig) StderrPipe() io.ReadCloser { 347 reader, writer := io.Pipe() 348 streamConfig.stderr.Add(writer) 349 return ioutils.NewBufReader(reader) 350 } 351 352 // cleanup releases any network resources allocated to the container along with any rules 353 // around how containers are linked together. It also unmounts the container's root filesystem. 354 func (container *Container) cleanup() { 355 container.releaseNetwork() 356 357 container.unmountIpcMounts(detachMounted) 358 359 if err := container.Unmount(); err != nil { 360 logrus.Errorf("%s: Failed to umount filesystem: %v", container.ID, err) 361 } 362 363 for _, eConfig := range container.execCommands.s { 364 container.daemon.unregisterExecCommand(eConfig) 365 } 366 367 container.unmountVolumes(false) 368 } 369 370 // killSig sends the container the given signal. This wrapper for the 371 // host specific kill command prepares the container before attempting 372 // to send the signal. An error is returned if the container is paused 373 // or not running, or if there is a problem returned from the 374 // underlying kill command. 375 func (container *Container) killSig(sig int) error { 376 logrus.Debugf("Sending %d to %s", sig, container.ID) 377 container.Lock() 378 defer container.Unlock() 379 380 // We could unpause the container for them rather than returning this error 381 if container.Paused { 382 return derr.ErrorCodeUnpauseContainer.WithArgs(container.ID) 383 } 384 385 if !container.Running { 386 return derr.ErrorCodeNotRunning.WithArgs(container.ID) 387 } 388 389 // signal to the monitor that it should not restart the container 390 // after we send the kill signal 391 container.monitor.ExitOnNext() 392 393 // if the container is currently restarting we do not need to send the signal 394 // to the process. Telling the monitor that it should exit on it's next event 395 // loop is enough 396 if container.Restarting { 397 return nil 398 } 399 400 if err := container.daemon.kill(container, sig); err != nil { 401 return err 402 } 403 container.logEvent("kill") 404 return nil 405 } 406 407 // Wrapper aroung killSig() suppressing "no such process" error. 408 func (container *Container) killPossiblyDeadProcess(sig int) error { 409 err := container.killSig(sig) 410 if err == syscall.ESRCH { 411 logrus.Debugf("Cannot kill process (pid=%d) with signal %d: no such process.", container.getPID(), sig) 412 return nil 413 } 414 return err 415 } 416 417 func (container *Container) pause() error { 418 container.Lock() 419 defer container.Unlock() 420 421 // We cannot Pause the container which is not running 422 if !container.Running { 423 return derr.ErrorCodeNotRunning.WithArgs(container.ID) 424 } 425 426 // We cannot Pause the container which is already paused 427 if container.Paused { 428 return derr.ErrorCodeAlreadyPaused.WithArgs(container.ID) 429 } 430 431 if err := container.daemon.execDriver.Pause(container.command); err != nil { 432 return err 433 } 434 container.Paused = true 435 container.logEvent("pause") 436 return nil 437 } 438 439 func (container *Container) unpause() error { 440 container.Lock() 441 defer container.Unlock() 442 443 // We cannot unpause the container which is not running 444 if !container.Running { 445 return derr.ErrorCodeNotRunning.WithArgs(container.ID) 446 } 447 448 // We cannot unpause the container which is not paused 449 if !container.Paused { 450 return derr.ErrorCodeNotPaused.WithArgs(container.ID) 451 } 452 453 if err := container.daemon.execDriver.Unpause(container.command); err != nil { 454 return err 455 } 456 container.Paused = false 457 container.logEvent("unpause") 458 return nil 459 } 460 461 // Kill forcefully terminates a container. 462 func (container *Container) Kill() error { 463 if !container.IsRunning() { 464 return derr.ErrorCodeNotRunning.WithArgs(container.ID) 465 } 466 467 // 1. Send SIGKILL 468 if err := container.killPossiblyDeadProcess(int(syscall.SIGKILL)); err != nil { 469 // While normally we might "return err" here we're not going to 470 // because if we can't stop the container by this point then 471 // its probably because its already stopped. Meaning, between 472 // the time of the IsRunning() call above and now it stopped. 473 // Also, since the err return will be exec driver specific we can't 474 // look for any particular (common) error that would indicate 475 // that the process is already dead vs something else going wrong. 476 // So, instead we'll give it up to 2 more seconds to complete and if 477 // by that time the container is still running, then the error 478 // we got is probably valid and so we return it to the caller. 479 480 if container.IsRunning() { 481 container.WaitStop(2 * time.Second) 482 if container.IsRunning() { 483 return err 484 } 485 } 486 } 487 488 // 2. Wait for the process to die, in last resort, try to kill the process directly 489 if err := killProcessDirectly(container); err != nil { 490 return err 491 } 492 493 container.WaitStop(-1 * time.Second) 494 return nil 495 } 496 497 // Stop halts a container by sending a stop signal, waiting for the given 498 // duration in seconds, and then calling SIGKILL and waiting for the 499 // process to exit. If a negative duration is given, Stop will wait 500 // for the initial signal forever. If the container is not running Stop returns 501 // immediately. 502 func (container *Container) Stop(seconds int) error { 503 if !container.IsRunning() { 504 return nil 505 } 506 507 // 1. Send a SIGTERM 508 if err := container.killPossiblyDeadProcess(container.stopSignal()); err != nil { 509 logrus.Infof("Failed to send SIGTERM to the process, force killing") 510 if err := container.killPossiblyDeadProcess(9); err != nil { 511 return err 512 } 513 } 514 515 // 2. Wait for the process to exit on its own 516 if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { 517 logrus.Infof("Container %v failed to exit within %d seconds of SIGTERM - using the force", container.ID, seconds) 518 // 3. If it doesn't, then send SIGKILL 519 if err := container.Kill(); err != nil { 520 container.WaitStop(-1 * time.Second) 521 return err 522 } 523 } 524 525 container.logEvent("stop") 526 return nil 527 } 528 529 // Restart attempts to gracefully stop and then start the 530 // container. When stopping, wait for the given duration in seconds to 531 // gracefully stop, before forcefully terminating the container. If 532 // given a negative duration, wait forever for a graceful stop. 533 func (container *Container) Restart(seconds int) error { 534 // Avoid unnecessarily unmounting and then directly mounting 535 // the container when the container stops and then starts 536 // again 537 if err := container.Mount(); err == nil { 538 defer container.Unmount() 539 } 540 541 if err := container.Stop(seconds); err != nil { 542 return err 543 } 544 545 if err := container.Start(); err != nil { 546 return err 547 } 548 549 container.logEvent("restart") 550 return nil 551 } 552 553 // Resize changes the TTY of the process running inside the container 554 // to the given height and width. The container must be running. 555 func (container *Container) Resize(h, w int) error { 556 if !container.IsRunning() { 557 return derr.ErrorCodeNotRunning.WithArgs(container.ID) 558 } 559 if err := container.command.ProcessConfig.Terminal.Resize(h, w); err != nil { 560 return err 561 } 562 container.logEvent("resize") 563 return nil 564 } 565 566 func (container *Container) export() (archive.Archive, error) { 567 if err := container.Mount(); err != nil { 568 return nil, err 569 } 570 571 uidMaps, gidMaps := container.daemon.GetUIDGIDMaps() 572 archive, err := archive.TarWithOptions(container.basefs, &archive.TarOptions{ 573 Compression: archive.Uncompressed, 574 UIDMaps: uidMaps, 575 GIDMaps: gidMaps, 576 }) 577 if err != nil { 578 container.Unmount() 579 return nil, err 580 } 581 arch := ioutils.NewReadCloserWrapper(archive, func() error { 582 err := archive.Close() 583 container.Unmount() 584 return err 585 }) 586 container.logEvent("export") 587 return arch, err 588 } 589 590 // Mount sets container.basefs 591 func (container *Container) Mount() error { 592 return container.daemon.Mount(container) 593 } 594 595 func (container *Container) changes() ([]archive.Change, error) { 596 container.Lock() 597 defer container.Unlock() 598 return container.daemon.changes(container) 599 } 600 601 func (container *Container) getImage() (*image.Image, error) { 602 if container.daemon == nil { 603 return nil, derr.ErrorCodeImageUnregContainer 604 } 605 return container.daemon.graph.Get(container.ImageID) 606 } 607 608 // Unmount asks the daemon to release the layered filesystems that are 609 // mounted by the container. 610 func (container *Container) Unmount() error { 611 return container.daemon.unmount(container) 612 } 613 614 func (container *Container) hostConfigPath() (string, error) { 615 return container.getRootResourcePath("hostconfig.json") 616 } 617 618 func (container *Container) jsonPath() (string, error) { 619 return container.getRootResourcePath("config.json") 620 } 621 622 // This method must be exported to be used from the lxc template 623 // This directory is only usable when the container is running 624 func (container *Container) rootfsPath() string { 625 return container.basefs 626 } 627 628 func validateID(id string) error { 629 if id == "" { 630 return derr.ErrorCodeEmptyID 631 } 632 return nil 633 } 634 635 func (container *Container) copy(resource string) (rc io.ReadCloser, err error) { 636 container.Lock() 637 638 defer func() { 639 if err != nil { 640 // Wait to unlock the container until the archive is fully read 641 // (see the ReadCloseWrapper func below) or if there is an error 642 // before that occurs. 643 container.Unlock() 644 } 645 }() 646 647 if err := container.Mount(); err != nil { 648 return nil, err 649 } 650 651 defer func() { 652 if err != nil { 653 // unmount any volumes 654 container.unmountVolumes(true) 655 // unmount the container's rootfs 656 container.Unmount() 657 } 658 }() 659 660 if err := container.mountVolumes(); err != nil { 661 return nil, err 662 } 663 664 basePath, err := container.GetResourcePath(resource) 665 if err != nil { 666 return nil, err 667 } 668 stat, err := os.Stat(basePath) 669 if err != nil { 670 return nil, err 671 } 672 var filter []string 673 if !stat.IsDir() { 674 d, f := filepath.Split(basePath) 675 basePath = d 676 filter = []string{f} 677 } else { 678 filter = []string{filepath.Base(basePath)} 679 basePath = filepath.Dir(basePath) 680 } 681 archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ 682 Compression: archive.Uncompressed, 683 IncludeFiles: filter, 684 }) 685 if err != nil { 686 return nil, err 687 } 688 689 reader := ioutils.NewReadCloserWrapper(archive, func() error { 690 err := archive.Close() 691 container.unmountVolumes(true) 692 container.Unmount() 693 container.Unlock() 694 return err 695 }) 696 container.logEvent("copy") 697 return reader, nil 698 } 699 700 // Returns true if the container exposes a certain port 701 func (container *Container) exposes(p nat.Port) bool { 702 _, exists := container.Config.ExposedPorts[p] 703 return exists 704 } 705 706 func (container *Container) getLogConfig() runconfig.LogConfig { 707 cfg := container.hostConfig.LogConfig 708 if cfg.Type != "" || len(cfg.Config) > 0 { // container has log driver configured 709 if cfg.Type == "" { 710 cfg.Type = jsonfilelog.Name 711 } 712 return cfg 713 } 714 // Use daemon's default log config for containers 715 return container.daemon.defaultLogConfig 716 } 717 718 func (container *Container) getLogger() (logger.Logger, error) { 719 if container.logDriver != nil && container.IsRunning() { 720 return container.logDriver, nil 721 } 722 cfg := container.getLogConfig() 723 if err := logger.ValidateLogOpts(cfg.Type, cfg.Config); err != nil { 724 return nil, err 725 } 726 c, err := logger.GetLogDriver(cfg.Type) 727 if err != nil { 728 return nil, derr.ErrorCodeLoggingFactory.WithArgs(err) 729 } 730 ctx := logger.Context{ 731 Config: cfg.Config, 732 ContainerID: container.ID, 733 ContainerName: container.Name, 734 ContainerEntrypoint: container.Path, 735 ContainerArgs: container.Args, 736 ContainerImageID: container.ImageID, 737 ContainerImageName: container.Config.Image, 738 ContainerCreated: container.Created, 739 ContainerEnv: container.Config.Env, 740 ContainerLabels: container.Config.Labels, 741 } 742 743 // Set logging file for "json-logger" 744 if cfg.Type == jsonfilelog.Name { 745 ctx.LogPath, err = container.getRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) 746 if err != nil { 747 return nil, err 748 } 749 } 750 return c(ctx) 751 } 752 753 func (container *Container) startLogging() error { 754 cfg := container.getLogConfig() 755 if cfg.Type == "none" { 756 return nil // do not start logging routines 757 } 758 759 l, err := container.getLogger() 760 if err != nil { 761 return derr.ErrorCodeInitLogger.WithArgs(err) 762 } 763 764 copier := logger.NewCopier(container.ID, map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) 765 container.logCopier = copier 766 copier.Run() 767 container.logDriver = l 768 769 // set LogPath field only for json-file logdriver 770 if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { 771 container.LogPath = jl.LogPath() 772 } 773 774 return nil 775 } 776 777 func (container *Container) waitForStart() error { 778 container.monitor = newContainerMonitor(container, container.hostConfig.RestartPolicy) 779 780 // block until we either receive an error from the initial start of the container's 781 // process or until the process is running in the container 782 select { 783 case <-container.monitor.startSignal: 784 case err := <-promise.Go(container.monitor.Start): 785 return err 786 } 787 788 return nil 789 } 790 791 func (container *Container) getProcessLabel() string { 792 // even if we have a process label return "" if we are running 793 // in privileged mode 794 if container.hostConfig.Privileged { 795 return "" 796 } 797 return container.ProcessLabel 798 } 799 800 func (container *Container) getMountLabel() string { 801 if container.hostConfig.Privileged { 802 return "" 803 } 804 return container.MountLabel 805 } 806 807 func (container *Container) stats() (*execdriver.ResourceStats, error) { 808 return container.daemon.stats(container) 809 } 810 811 func (container *Container) getExecIDs() []string { 812 return container.execCommands.List() 813 } 814 815 func (container *Container) exec(ec *ExecConfig) error { 816 container.Lock() 817 defer container.Unlock() 818 819 callback := func(processConfig *execdriver.ProcessConfig, pid int, chOOM <-chan struct{}) error { 820 if processConfig.Tty { 821 // The callback is called after the process Start() 822 // so we are in the parent process. In TTY mode, stdin/out/err is the PtySlave 823 // which we close here. 824 if c, ok := processConfig.Stdout.(io.Closer); ok { 825 c.Close() 826 } 827 } 828 close(ec.waitStart) 829 return nil 830 } 831 832 // We use a callback here instead of a goroutine and an chan for 833 // synchronization purposes 834 cErr := promise.Go(func() error { return container.monitorExec(ec, callback) }) 835 836 // Exec should not return until the process is actually running 837 select { 838 case <-ec.waitStart: 839 case err := <-cErr: 840 return err 841 } 842 843 return nil 844 } 845 846 func (container *Container) monitorExec(ExecConfig *ExecConfig, callback execdriver.DriverCallback) error { 847 var ( 848 err error 849 exitCode int 850 ) 851 pipes := execdriver.NewPipes(ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdout, ExecConfig.streamConfig.stderr, ExecConfig.OpenStdin) 852 exitCode, err = container.daemon.Exec(container, ExecConfig, pipes, callback) 853 if err != nil { 854 logrus.Errorf("Error running command in existing container %s: %s", container.ID, err) 855 } 856 logrus.Debugf("Exec task in container %s exited with code %d", container.ID, exitCode) 857 if ExecConfig.OpenStdin { 858 if err := ExecConfig.streamConfig.stdin.Close(); err != nil { 859 logrus.Errorf("Error closing stdin while running in %s: %s", container.ID, err) 860 } 861 } 862 if err := ExecConfig.streamConfig.stdout.Clean(); err != nil { 863 logrus.Errorf("Error closing stdout while running in %s: %s", container.ID, err) 864 } 865 if err := ExecConfig.streamConfig.stderr.Clean(); err != nil { 866 logrus.Errorf("Error closing stderr while running in %s: %s", container.ID, err) 867 } 868 if ExecConfig.ProcessConfig.Terminal != nil { 869 if err := ExecConfig.ProcessConfig.Terminal.Close(); err != nil { 870 logrus.Errorf("Error closing terminal while running in container %s: %s", container.ID, err) 871 } 872 } 873 // remove the exec command from the container's store only and not the 874 // daemon's store so that the exec command can be inspected. 875 container.execCommands.Delete(ExecConfig.ID) 876 return err 877 } 878 879 // Attach connects to the container's TTY, delegating to standard 880 // streams or websockets depending on the configuration. 881 func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { 882 return attach(&container.streamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr) 883 } 884 885 func (container *Container) attachWithLogs(stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool) error { 886 if logs { 887 logDriver, err := container.getLogger() 888 if err != nil { 889 return err 890 } 891 cLog, ok := logDriver.(logger.LogReader) 892 if !ok { 893 return logger.ErrReadLogsNotSupported 894 } 895 logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) 896 897 LogLoop: 898 for { 899 select { 900 case msg, ok := <-logs.Msg: 901 if !ok { 902 break LogLoop 903 } 904 if msg.Source == "stdout" && stdout != nil { 905 stdout.Write(msg.Line) 906 } 907 if msg.Source == "stderr" && stderr != nil { 908 stderr.Write(msg.Line) 909 } 910 case err := <-logs.Err: 911 logrus.Errorf("Error streaming logs: %v", err) 912 break LogLoop 913 } 914 } 915 } 916 917 container.logEvent("attach") 918 919 //stream 920 if stream { 921 var stdinPipe io.ReadCloser 922 if stdin != nil { 923 r, w := io.Pipe() 924 go func() { 925 defer w.Close() 926 defer logrus.Debugf("Closing buffered stdin pipe") 927 io.Copy(w, stdin) 928 }() 929 stdinPipe = r 930 } 931 <-container.Attach(stdinPipe, stdout, stderr) 932 // If we are in stdinonce mode, wait for the process to end 933 // otherwise, simply return 934 if container.Config.StdinOnce && !container.Config.Tty { 935 container.WaitStop(-1 * time.Second) 936 } 937 } 938 return nil 939 } 940 941 func attach(streamConfig *streamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) chan error { 942 var ( 943 cStdout, cStderr io.ReadCloser 944 cStdin io.WriteCloser 945 wg sync.WaitGroup 946 errors = make(chan error, 3) 947 ) 948 949 if stdin != nil && openStdin { 950 cStdin = streamConfig.StdinPipe() 951 wg.Add(1) 952 } 953 954 if stdout != nil { 955 cStdout = streamConfig.StdoutPipe() 956 wg.Add(1) 957 } 958 959 if stderr != nil { 960 cStderr = streamConfig.StderrPipe() 961 wg.Add(1) 962 } 963 964 // Connect stdin of container to the http conn. 965 go func() { 966 if stdin == nil || !openStdin { 967 return 968 } 969 logrus.Debugf("attach: stdin: begin") 970 defer func() { 971 if stdinOnce && !tty { 972 cStdin.Close() 973 } else { 974 // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr 975 if cStdout != nil { 976 cStdout.Close() 977 } 978 if cStderr != nil { 979 cStderr.Close() 980 } 981 } 982 wg.Done() 983 logrus.Debugf("attach: stdin: end") 984 }() 985 986 var err error 987 if tty { 988 _, err = copyEscapable(cStdin, stdin) 989 } else { 990 _, err = io.Copy(cStdin, stdin) 991 992 } 993 if err == io.ErrClosedPipe { 994 err = nil 995 } 996 if err != nil { 997 logrus.Errorf("attach: stdin: %s", err) 998 errors <- err 999 return 1000 } 1001 }() 1002 1003 attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { 1004 if stream == nil { 1005 return 1006 } 1007 defer func() { 1008 // Make sure stdin gets closed 1009 if stdin != nil { 1010 stdin.Close() 1011 } 1012 streamPipe.Close() 1013 wg.Done() 1014 logrus.Debugf("attach: %s: end", name) 1015 }() 1016 1017 logrus.Debugf("attach: %s: begin", name) 1018 _, err := io.Copy(stream, streamPipe) 1019 if err == io.ErrClosedPipe { 1020 err = nil 1021 } 1022 if err != nil { 1023 logrus.Errorf("attach: %s: %v", name, err) 1024 errors <- err 1025 } 1026 } 1027 1028 go attachStream("stdout", stdout, cStdout) 1029 go attachStream("stderr", stderr, cStderr) 1030 1031 return promise.Go(func() error { 1032 wg.Wait() 1033 close(errors) 1034 for err := range errors { 1035 if err != nil { 1036 return err 1037 } 1038 } 1039 return nil 1040 }) 1041 } 1042 1043 // Code c/c from io.Copy() modified to handle escape sequence 1044 func copyEscapable(dst io.Writer, src io.ReadCloser) (written int64, err error) { 1045 buf := make([]byte, 32*1024) 1046 for { 1047 nr, er := src.Read(buf) 1048 if nr > 0 { 1049 // ---- Docker addition 1050 // char 16 is C-p 1051 if nr == 1 && buf[0] == 16 { 1052 nr, er = src.Read(buf) 1053 // char 17 is C-q 1054 if nr == 1 && buf[0] == 17 { 1055 if err := src.Close(); err != nil { 1056 return 0, err 1057 } 1058 return 0, nil 1059 } 1060 } 1061 // ---- End of docker 1062 nw, ew := dst.Write(buf[0:nr]) 1063 if nw > 0 { 1064 written += int64(nw) 1065 } 1066 if ew != nil { 1067 err = ew 1068 break 1069 } 1070 if nr != nw { 1071 err = io.ErrShortWrite 1072 break 1073 } 1074 } 1075 if er == io.EOF { 1076 break 1077 } 1078 if er != nil { 1079 err = er 1080 break 1081 } 1082 } 1083 return written, err 1084 } 1085 1086 func (container *Container) shouldRestart() bool { 1087 return container.hostConfig.RestartPolicy.Name == "always" || 1088 (container.hostConfig.RestartPolicy.Name == "unless-stopped" && !container.HasBeenManuallyStopped) || 1089 (container.hostConfig.RestartPolicy.Name == "on-failure" && container.ExitCode != 0) 1090 } 1091 1092 func (container *Container) mountVolumes() error { 1093 mounts, err := container.setupMounts() 1094 if err != nil { 1095 return err 1096 } 1097 1098 for _, m := range mounts { 1099 dest, err := container.GetResourcePath(m.Destination) 1100 if err != nil { 1101 return err 1102 } 1103 1104 var stat os.FileInfo 1105 stat, err = os.Stat(m.Source) 1106 if err != nil { 1107 return err 1108 } 1109 if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { 1110 return err 1111 } 1112 1113 opts := "rbind,ro" 1114 if m.Writable { 1115 opts = "rbind,rw" 1116 } 1117 1118 if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { 1119 return err 1120 } 1121 } 1122 1123 return nil 1124 } 1125 1126 func (container *Container) copyImagePathContent(v volume.Volume, destination string) error { 1127 rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.basefs, destination), container.basefs) 1128 if err != nil { 1129 return err 1130 } 1131 1132 if _, err = ioutil.ReadDir(rootfs); err != nil { 1133 if os.IsNotExist(err) { 1134 return nil 1135 } 1136 return err 1137 } 1138 1139 path, err := v.Mount() 1140 if err != nil { 1141 return err 1142 } 1143 1144 if err := copyExistingContents(rootfs, path); err != nil { 1145 return err 1146 } 1147 1148 return v.Unmount() 1149 } 1150 1151 func (container *Container) stopSignal() int { 1152 var stopSignal syscall.Signal 1153 if container.Config.StopSignal != "" { 1154 stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) 1155 } 1156 1157 if int(stopSignal) == 0 { 1158 stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) 1159 } 1160 return int(stopSignal) 1161 }