github.com/mforkel/docker-ce-i386@v17.12.1-ce-rc2+incompatible/components/engine/daemon/exec.go (about) 1 package daemon 2 3 import ( 4 "fmt" 5 "io" 6 "strings" 7 "time" 8 9 "golang.org/x/net/context" 10 11 "github.com/docker/docker/api/types" 12 "github.com/docker/docker/api/types/strslice" 13 "github.com/docker/docker/container" 14 "github.com/docker/docker/container/stream" 15 "github.com/docker/docker/daemon/exec" 16 "github.com/docker/docker/pkg/pools" 17 "github.com/docker/docker/pkg/signal" 18 "github.com/docker/docker/pkg/term" 19 specs "github.com/opencontainers/runtime-spec/specs-go" 20 "github.com/pkg/errors" 21 "github.com/sirupsen/logrus" 22 ) 23 24 // Seconds to wait after sending TERM before trying KILL 25 const termProcessTimeout = 10 26 27 func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { 28 // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. 29 container.ExecCommands.Add(config.ID, config) 30 // Storing execs in daemon for easy access via Engine API. 31 d.execCommands.Add(config.ID, config) 32 } 33 34 // ExecExists looks up the exec instance and returns a bool if it exists or not. 35 // It will also return the error produced by `getConfig` 36 func (d *Daemon) ExecExists(name string) (bool, error) { 37 if _, err := d.getExecConfig(name); err != nil { 38 return false, err 39 } 40 return true, nil 41 } 42 43 // getExecConfig looks up the exec instance by name. If the container associated 44 // with the exec instance is stopped or paused, it will return an error. 45 func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { 46 ec := d.execCommands.Get(name) 47 48 // If the exec is found but its container is not in the daemon's list of 49 // containers then it must have been deleted, in which case instead of 50 // saying the container isn't running, we should return a 404 so that 51 // the user sees the same error now that they will after the 52 // 5 minute clean-up loop is run which erases old/dead execs. 53 54 if ec != nil { 55 if container := d.containers.Get(ec.ContainerID); container != nil { 56 if !container.IsRunning() { 57 return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) 58 } 59 if container.IsPaused() { 60 return nil, errExecPaused(container.ID) 61 } 62 if container.IsRestarting() { 63 return nil, errContainerIsRestarting(container.ID) 64 } 65 return ec, nil 66 } 67 } 68 69 return nil, errExecNotFound(name) 70 } 71 72 func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { 73 container.ExecCommands.Delete(execConfig.ID, execConfig.Pid) 74 d.execCommands.Delete(execConfig.ID, execConfig.Pid) 75 } 76 77 func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { 78 container, err := d.GetContainer(name) 79 if err != nil { 80 return nil, err 81 } 82 83 if !container.IsRunning() { 84 return nil, errNotRunning(container.ID) 85 } 86 if container.IsPaused() { 87 return nil, errExecPaused(name) 88 } 89 if container.IsRestarting() { 90 return nil, errContainerIsRestarting(container.ID) 91 } 92 return container, nil 93 } 94 95 // ContainerExecCreate sets up an exec in a running container. 96 func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { 97 cntr, err := d.getActiveContainer(name) 98 if err != nil { 99 return "", err 100 } 101 102 cmd := strslice.StrSlice(config.Cmd) 103 entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) 104 105 keys := []byte{} 106 if config.DetachKeys != "" { 107 keys, err = term.ToBytes(config.DetachKeys) 108 if err != nil { 109 err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) 110 return "", err 111 } 112 } 113 114 execConfig := exec.NewConfig() 115 execConfig.OpenStdin = config.AttachStdin 116 execConfig.OpenStdout = config.AttachStdout 117 execConfig.OpenStderr = config.AttachStderr 118 execConfig.ContainerID = cntr.ID 119 execConfig.DetachKeys = keys 120 execConfig.Entrypoint = entrypoint 121 execConfig.Args = args 122 execConfig.Tty = config.Tty 123 execConfig.Privileged = config.Privileged 124 execConfig.User = config.User 125 execConfig.WorkingDir = config.WorkingDir 126 127 linkedEnv, err := d.setupLinkedContainers(cntr) 128 if err != nil { 129 return "", err 130 } 131 execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) 132 if len(execConfig.User) == 0 { 133 execConfig.User = cntr.Config.User 134 } 135 if len(execConfig.WorkingDir) == 0 { 136 execConfig.WorkingDir = cntr.Config.WorkingDir 137 } 138 139 d.registerExecCommand(cntr, execConfig) 140 141 d.LogContainerEvent(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) 142 143 return execConfig.ID, nil 144 } 145 146 // ContainerExecStart starts a previously set up exec instance. The 147 // std streams are set up. 148 // If ctx is cancelled, the process is terminated. 149 func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.Reader, stdout io.Writer, stderr io.Writer) (err error) { 150 var ( 151 cStdin io.ReadCloser 152 cStdout, cStderr io.Writer 153 ) 154 155 ec, err := d.getExecConfig(name) 156 if err != nil { 157 return errExecNotFound(name) 158 } 159 160 ec.Lock() 161 if ec.ExitCode != nil { 162 ec.Unlock() 163 err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) 164 return stateConflictError{err} 165 } 166 167 if ec.Running { 168 ec.Unlock() 169 return stateConflictError{fmt.Errorf("Error: Exec command %s is already running", ec.ID)} 170 } 171 ec.Running = true 172 ec.Unlock() 173 174 c := d.containers.Get(ec.ContainerID) 175 logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) 176 d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) 177 178 defer func() { 179 if err != nil { 180 ec.Lock() 181 ec.Running = false 182 exitCode := 126 183 ec.ExitCode = &exitCode 184 if err := ec.CloseStreams(); err != nil { 185 logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err) 186 } 187 ec.Unlock() 188 c.ExecCommands.Delete(ec.ID, ec.Pid) 189 } 190 }() 191 192 if ec.OpenStdin && stdin != nil { 193 r, w := io.Pipe() 194 go func() { 195 defer w.Close() 196 defer logrus.Debug("Closing buffered stdin pipe") 197 pools.Copy(w, stdin) 198 }() 199 cStdin = r 200 } 201 if ec.OpenStdout { 202 cStdout = stdout 203 } 204 if ec.OpenStderr { 205 cStderr = stderr 206 } 207 208 if ec.OpenStdin { 209 ec.StreamConfig.NewInputPipes() 210 } else { 211 ec.StreamConfig.NewNopInputPipe() 212 } 213 214 p := &specs.Process{ 215 Args: append([]string{ec.Entrypoint}, ec.Args...), 216 Env: ec.Env, 217 Terminal: ec.Tty, 218 Cwd: ec.WorkingDir, 219 } 220 if p.Cwd == "" { 221 p.Cwd = "/" 222 } 223 224 if err := d.execSetPlatformOpt(c, ec, p); err != nil { 225 return err 226 } 227 228 attachConfig := stream.AttachConfig{ 229 TTY: ec.Tty, 230 UseStdin: cStdin != nil, 231 UseStdout: cStdout != nil, 232 UseStderr: cStderr != nil, 233 Stdin: cStdin, 234 Stdout: cStdout, 235 Stderr: cStderr, 236 DetachKeys: ec.DetachKeys, 237 CloseStdin: true, 238 } 239 ec.StreamConfig.AttachStreams(&attachConfig) 240 attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig) 241 242 // Synchronize with libcontainerd event loop 243 ec.Lock() 244 c.ExecCommands.Lock() 245 systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio) 246 if err != nil { 247 c.ExecCommands.Unlock() 248 ec.Unlock() 249 return translateContainerdStartErr(ec.Entrypoint, ec.SetExitCode, err) 250 } 251 ec.Pid = systemPid 252 c.ExecCommands.Unlock() 253 ec.Unlock() 254 255 select { 256 case <-ctx.Done(): 257 logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) 258 d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"])) 259 select { 260 case <-time.After(termProcessTimeout * time.Second): 261 logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) 262 d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"])) 263 case <-attachErr: 264 // TERM signal worked 265 } 266 return fmt.Errorf("context cancelled") 267 case err := <-attachErr: 268 if err != nil { 269 if _, ok := err.(term.EscapeError); !ok { 270 return errors.Wrap(systemError{err}, "exec attach failed") 271 } 272 d.LogContainerEvent(c, "exec_detach") 273 } 274 } 275 return nil 276 } 277 278 // execCommandGC runs a ticker to clean up the daemon references 279 // of exec configs that are no longer part of the container. 280 func (d *Daemon) execCommandGC() { 281 for range time.Tick(5 * time.Minute) { 282 var ( 283 cleaned int 284 liveExecCommands = d.containerExecIds() 285 ) 286 for id, config := range d.execCommands.Commands() { 287 if config.CanRemove { 288 cleaned++ 289 d.execCommands.Delete(id, config.Pid) 290 } else { 291 if _, exists := liveExecCommands[id]; !exists { 292 config.CanRemove = true 293 } 294 } 295 } 296 if cleaned > 0 { 297 logrus.Debugf("clean %d unused exec commands", cleaned) 298 } 299 } 300 } 301 302 // containerExecIds returns a list of all the current exec ids that are in use 303 // and running inside a container. 304 func (d *Daemon) containerExecIds() map[string]struct{} { 305 ids := map[string]struct{}{} 306 for _, c := range d.containers.List() { 307 for _, id := range c.ExecCommands.List() { 308 ids[id] = struct{}{} 309 } 310 } 311 return ids 312 }