github.com/moby/docker@v26.1.3+incompatible/daemon/exec.go (about) 1 package daemon // import "github.com/docker/docker/daemon" 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "io" 8 "runtime" 9 "strings" 10 "time" 11 12 "github.com/containerd/containerd" 13 "github.com/containerd/log" 14 "github.com/docker/docker/api/types" 15 containertypes "github.com/docker/docker/api/types/container" 16 "github.com/docker/docker/api/types/events" 17 "github.com/docker/docker/api/types/strslice" 18 "github.com/docker/docker/container" 19 "github.com/docker/docker/container/stream" 20 "github.com/docker/docker/errdefs" 21 "github.com/docker/docker/pkg/pools" 22 "github.com/moby/sys/signal" 23 "github.com/moby/term" 24 specs "github.com/opencontainers/runtime-spec/specs-go" 25 "github.com/pkg/errors" 26 ) 27 28 func (daemon *Daemon) registerExecCommand(container *container.Container, config *container.ExecConfig) { 29 // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. 30 container.ExecCommands.Add(config.ID, config) 31 // Storing execs in daemon for easy access via Engine API. 32 daemon.execCommands.Add(config.ID, config) 33 } 34 35 // ExecExists looks up the exec instance and returns a bool if it exists or not. 36 // It will also return the error produced by `getConfig` 37 func (daemon *Daemon) ExecExists(name string) (bool, error) { 38 if _, err := daemon.getExecConfig(name); err != nil { 39 return false, err 40 } 41 return true, nil 42 } 43 44 // getExecConfig looks up the exec instance by name. If the container associated 45 // with the exec instance is stopped or paused, it will return an error. 46 func (daemon *Daemon) getExecConfig(name string) (*container.ExecConfig, error) { 47 ec := daemon.execCommands.Get(name) 48 if ec == nil { 49 return nil, errExecNotFound(name) 50 } 51 52 // If the exec is found but its container is not in the daemon's list of 53 // containers then it must have been deleted, in which case instead of 54 // saying the container isn't running, we should return a 404 so that 55 // the user sees the same error now that they will after the 56 // 5 minute clean-up loop is run which erases old/dead execs. 57 ctr := daemon.containers.Get(ec.Container.ID) 58 if ctr == nil { 59 return nil, containerNotFound(name) 60 } 61 if !ctr.IsRunning() { 62 return nil, errNotRunning(ctr.ID) 63 } 64 if ctr.IsPaused() { 65 return nil, errExecPaused(ctr.ID) 66 } 67 if ctr.IsRestarting() { 68 return nil, errContainerIsRestarting(ctr.ID) 69 } 70 return ec, nil 71 } 72 73 func (daemon *Daemon) unregisterExecCommand(container *container.Container, execConfig *container.ExecConfig) { 74 container.ExecCommands.Delete(execConfig.ID) 75 daemon.execCommands.Delete(execConfig.ID) 76 } 77 78 func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) { 79 ctr, err := daemon.GetContainer(name) 80 if err != nil { 81 return nil, err 82 } 83 84 if !ctr.IsRunning() { 85 return nil, errNotRunning(ctr.ID) 86 } 87 if ctr.IsPaused() { 88 return nil, errExecPaused(name) 89 } 90 if ctr.IsRestarting() { 91 return nil, errContainerIsRestarting(ctr.ID) 92 } 93 return ctr, nil 94 } 95 96 // ContainerExecCreate sets up an exec in a running container. 97 func (daemon *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { 98 cntr, err := daemon.getActiveContainer(name) 99 if err != nil { 100 return "", err 101 } 102 103 cmd := strslice.StrSlice(config.Cmd) 104 entrypoint, args := daemon.getEntrypointAndArgs(strslice.StrSlice{}, cmd) 105 106 keys := []byte{} 107 if config.DetachKeys != "" { 108 keys, err = term.ToBytes(config.DetachKeys) 109 if err != nil { 110 err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) 111 return "", err 112 } 113 } 114 115 execConfig := container.NewExecConfig(cntr) 116 execConfig.OpenStdin = config.AttachStdin 117 execConfig.OpenStdout = config.AttachStdout 118 execConfig.OpenStderr = config.AttachStderr 119 execConfig.DetachKeys = keys 120 execConfig.Entrypoint = entrypoint 121 execConfig.Args = args 122 execConfig.Tty = config.Tty 123 execConfig.ConsoleSize = config.ConsoleSize 124 execConfig.Privileged = config.Privileged 125 execConfig.User = config.User 126 execConfig.WorkingDir = config.WorkingDir 127 128 linkedEnv, err := daemon.setupLinkedContainers(cntr) 129 if err != nil { 130 return "", err 131 } 132 execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) 133 if len(execConfig.User) == 0 { 134 execConfig.User = cntr.Config.User 135 } 136 if len(execConfig.WorkingDir) == 0 { 137 execConfig.WorkingDir = cntr.Config.WorkingDir 138 } 139 140 daemon.registerExecCommand(cntr, execConfig) 141 daemon.LogContainerEventWithAttributes(cntr, events.Action(string(events.ActionExecCreate)+": "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")), map[string]string{ 142 "execID": execConfig.ID, 143 }) 144 145 return execConfig.ID, nil 146 } 147 148 // ContainerExecStart starts a previously set up exec instance. The 149 // std streams are set up. 150 // If ctx is cancelled, the process is terminated. 151 func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, options containertypes.ExecStartOptions) (err error) { 152 var ( 153 cStdin io.ReadCloser 154 cStdout, cStderr io.Writer 155 ) 156 157 ec, err := daemon.getExecConfig(name) 158 if err != nil { 159 return err 160 } 161 162 ec.Lock() 163 if ec.ExitCode != nil { 164 ec.Unlock() 165 err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) 166 return errdefs.Conflict(err) 167 } 168 169 if ec.Running { 170 ec.Unlock() 171 return errdefs.Conflict(fmt.Errorf("Error: Exec command %s is already running", ec.ID)) 172 } 173 ec.Running = true 174 ec.Unlock() 175 176 log.G(ctx).Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID) 177 daemon.LogContainerEventWithAttributes(ec.Container, events.Action(string(events.ActionExecStart)+": "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")), map[string]string{ 178 "execID": ec.ID, 179 }) 180 181 defer func() { 182 if err != nil { 183 ec.Lock() 184 ec.Container.ExecCommands.Delete(ec.ID) 185 ec.Running = false 186 exitCode := 126 187 ec.ExitCode = &exitCode 188 if err := ec.CloseStreams(); err != nil { 189 log.G(ctx).Errorf("failed to cleanup exec %s streams: %s", ec.Container.ID, err) 190 } 191 ec.Unlock() 192 } 193 }() 194 195 if ec.OpenStdin && options.Stdin != nil { 196 r, w := io.Pipe() 197 go func() { 198 defer w.Close() 199 defer log.G(ctx).Debug("Closing buffered stdin pipe") 200 pools.Copy(w, options.Stdin) 201 }() 202 cStdin = r 203 } 204 if ec.OpenStdout { 205 cStdout = options.Stdout 206 } 207 if ec.OpenStderr { 208 cStderr = options.Stderr 209 } 210 211 if ec.OpenStdin { 212 ec.StreamConfig.NewInputPipes() 213 } else { 214 ec.StreamConfig.NewNopInputPipe() 215 } 216 217 p := &specs.Process{} 218 if runtime.GOOS != "windows" { 219 ctr, err := daemon.containerdClient.LoadContainer(ctx, ec.Container.ID) 220 if err != nil { 221 return err 222 } 223 md, err := ctr.Info(ctx, containerd.WithoutRefreshedMetadata) 224 if err != nil { 225 return err 226 } 227 spec := specs.Spec{Process: p} 228 if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil { 229 return err 230 } 231 } 232 p.Args = append([]string{ec.Entrypoint}, ec.Args...) 233 p.Env = ec.Env 234 p.Cwd = ec.WorkingDir 235 p.Terminal = ec.Tty 236 237 consoleSize := options.ConsoleSize 238 // If size isn't specified for start, use the one provided for create 239 if consoleSize == nil { 240 consoleSize = ec.ConsoleSize 241 } 242 if p.Terminal && consoleSize != nil { 243 p.ConsoleSize = &specs.Box{ 244 Height: consoleSize[0], 245 Width: consoleSize[1], 246 } 247 } 248 249 if p.Cwd == "" { 250 p.Cwd = "/" 251 } 252 253 daemonCfg := &daemon.config().Config 254 if err := daemon.execSetPlatformOpt(ctx, daemonCfg, ec, p); err != nil { 255 return err 256 } 257 258 attachConfig := stream.AttachConfig{ 259 TTY: ec.Tty, 260 UseStdin: cStdin != nil, 261 UseStdout: cStdout != nil, 262 UseStderr: cStderr != nil, 263 Stdin: cStdin, 264 Stdout: cStdout, 265 Stderr: cStderr, 266 DetachKeys: ec.DetachKeys, 267 CloseStdin: true, 268 } 269 ec.StreamConfig.AttachStreams(&attachConfig) 270 // using context.Background() so that attachErr does not race ctx.Done(). 271 copyCtx, cancel := context.WithCancel(context.Background()) 272 defer cancel() 273 attachErr := ec.StreamConfig.CopyStreams(copyCtx, &attachConfig) 274 275 ec.Container.Lock() 276 tsk, err := ec.Container.GetRunningTask() 277 ec.Container.Unlock() 278 if err != nil { 279 return err 280 } 281 282 // Synchronize with libcontainerd event loop 283 ec.Lock() 284 ec.Process, err = tsk.Exec(ctx, ec.ID, p, cStdin != nil, ec.InitializeStdio) 285 // the exec context should be ready, or error happened. 286 // close the chan to notify readiness 287 close(ec.Started) 288 if err != nil { 289 defer ec.Unlock() 290 return setExitCodeFromError(ec.SetExitCode, err) 291 } 292 ec.Unlock() 293 294 select { 295 case <-ctx.Done(): 296 log := log.G(ctx). 297 WithField("container", ec.Container.ID). 298 WithField("exec", ec.ID) 299 log.Debug("Sending KILL signal to container process") 300 sigCtx, cancelFunc := context.WithTimeout(context.Background(), 30*time.Second) 301 defer cancelFunc() 302 err := ec.Process.Kill(sigCtx, signal.SignalMap["KILL"]) 303 if err != nil { 304 log.WithError(err).Error("Could not send KILL signal to container process") 305 } 306 return ctx.Err() 307 case err := <-attachErr: 308 if err != nil { 309 if _, ok := err.(term.EscapeError); !ok { 310 return errdefs.System(errors.Wrap(err, "exec attach failed")) 311 } 312 daemon.LogContainerEventWithAttributes(ec.Container, events.ActionExecDetach, map[string]string{ 313 "execID": ec.ID, 314 }) 315 } 316 } 317 return nil 318 } 319 320 // execCommandGC runs a ticker to clean up the daemon references 321 // of exec configs that are no longer part of the container. 322 func (daemon *Daemon) execCommandGC() { 323 for range time.Tick(5 * time.Minute) { 324 var ( 325 cleaned int 326 liveExecCommands = daemon.containerExecIds() 327 ) 328 for id, config := range daemon.execCommands.Commands() { 329 if config.CanRemove { 330 cleaned++ 331 daemon.execCommands.Delete(id) 332 } else { 333 if _, exists := liveExecCommands[id]; !exists { 334 config.CanRemove = true 335 } 336 } 337 } 338 if cleaned > 0 { 339 log.G(context.TODO()).Debugf("clean %d unused exec commands", cleaned) 340 } 341 } 342 } 343 344 // containerExecIds returns a list of all the current exec ids that are in use 345 // and running inside a container. 346 func (daemon *Daemon) containerExecIds() map[string]struct{} { 347 ids := map[string]struct{}{} 348 for _, c := range daemon.containers.List() { 349 for _, id := range c.ExecCommands.List() { 350 ids[id] = struct{}{} 351 } 352 } 353 return ids 354 }