github.com/rawahars/moby@v24.0.4+incompatible/daemon/exec.go (about) 1 package daemon // import "github.com/docker/docker/daemon" 2 3 import ( 4 "context" 5 "encoding/json" 6 "fmt" 7 "io" 8 "runtime" 9 "strings" 10 "time" 11 12 "github.com/containerd/containerd" 13 "github.com/docker/docker/api/types" 14 containertypes "github.com/docker/docker/api/types/container" 15 "github.com/docker/docker/api/types/strslice" 16 "github.com/docker/docker/container" 17 "github.com/docker/docker/container/stream" 18 "github.com/docker/docker/errdefs" 19 "github.com/docker/docker/pkg/pools" 20 "github.com/moby/sys/signal" 21 "github.com/moby/term" 22 specs "github.com/opencontainers/runtime-spec/specs-go" 23 "github.com/pkg/errors" 24 "github.com/sirupsen/logrus" 25 ) 26 27 func (daemon *Daemon) registerExecCommand(container *container.Container, config *container.ExecConfig) { 28 // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. 29 container.ExecCommands.Add(config.ID, config) 30 // Storing execs in daemon for easy access via Engine API. 31 daemon.execCommands.Add(config.ID, config) 32 } 33 34 // ExecExists looks up the exec instance and returns a bool if it exists or not. 35 // It will also return the error produced by `getConfig` 36 func (daemon *Daemon) ExecExists(name string) (bool, error) { 37 if _, err := daemon.getExecConfig(name); err != nil { 38 return false, err 39 } 40 return true, nil 41 } 42 43 // getExecConfig looks up the exec instance by name. If the container associated 44 // with the exec instance is stopped or paused, it will return an error. 45 func (daemon *Daemon) getExecConfig(name string) (*container.ExecConfig, error) { 46 ec := daemon.execCommands.Get(name) 47 if ec == nil { 48 return nil, errExecNotFound(name) 49 } 50 51 // If the exec is found but its container is not in the daemon's list of 52 // containers then it must have been deleted, in which case instead of 53 // saying the container isn't running, we should return a 404 so that 54 // the user sees the same error now that they will after the 55 // 5 minute clean-up loop is run which erases old/dead execs. 56 ctr := daemon.containers.Get(ec.Container.ID) 57 if ctr == nil { 58 return nil, containerNotFound(name) 59 } 60 if !ctr.IsRunning() { 61 return nil, errNotRunning(ctr.ID) 62 } 63 if ctr.IsPaused() { 64 return nil, errExecPaused(ctr.ID) 65 } 66 if ctr.IsRestarting() { 67 return nil, errContainerIsRestarting(ctr.ID) 68 } 69 return ec, nil 70 } 71 72 func (daemon *Daemon) unregisterExecCommand(container *container.Container, execConfig *container.ExecConfig) { 73 container.ExecCommands.Delete(execConfig.ID) 74 daemon.execCommands.Delete(execConfig.ID) 75 } 76 77 func (daemon *Daemon) getActiveContainer(name string) (*container.Container, error) { 78 ctr, err := daemon.GetContainer(name) 79 if err != nil { 80 return nil, err 81 } 82 83 if !ctr.IsRunning() { 84 return nil, errNotRunning(ctr.ID) 85 } 86 if ctr.IsPaused() { 87 return nil, errExecPaused(name) 88 } 89 if ctr.IsRestarting() { 90 return nil, errContainerIsRestarting(ctr.ID) 91 } 92 return ctr, nil 93 } 94 95 // ContainerExecCreate sets up an exec in a running container. 96 func (daemon *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { 97 cntr, err := daemon.getActiveContainer(name) 98 if err != nil { 99 return "", err 100 } 101 102 cmd := strslice.StrSlice(config.Cmd) 103 entrypoint, args := daemon.getEntrypointAndArgs(strslice.StrSlice{}, cmd) 104 105 keys := []byte{} 106 if config.DetachKeys != "" { 107 keys, err = term.ToBytes(config.DetachKeys) 108 if err != nil { 109 err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) 110 return "", err 111 } 112 } 113 114 execConfig := container.NewExecConfig(cntr) 115 execConfig.OpenStdin = config.AttachStdin 116 execConfig.OpenStdout = config.AttachStdout 117 execConfig.OpenStderr = config.AttachStderr 118 execConfig.DetachKeys = keys 119 execConfig.Entrypoint = entrypoint 120 execConfig.Args = args 121 execConfig.Tty = config.Tty 122 execConfig.ConsoleSize = config.ConsoleSize 123 execConfig.Privileged = config.Privileged 124 execConfig.User = config.User 125 execConfig.WorkingDir = config.WorkingDir 126 127 linkedEnv, err := daemon.setupLinkedContainers(cntr) 128 if err != nil { 129 return "", err 130 } 131 execConfig.Env = container.ReplaceOrAppendEnvValues(cntr.CreateDaemonEnvironment(config.Tty, linkedEnv), config.Env) 132 if len(execConfig.User) == 0 { 133 execConfig.User = cntr.Config.User 134 } 135 if len(execConfig.WorkingDir) == 0 { 136 execConfig.WorkingDir = cntr.Config.WorkingDir 137 } 138 139 daemon.registerExecCommand(cntr, execConfig) 140 141 attributes := map[string]string{ 142 "execID": execConfig.ID, 143 } 144 daemon.LogContainerEventWithAttributes(cntr, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " "), attributes) 145 146 return execConfig.ID, nil 147 } 148 149 // ContainerExecStart starts a previously set up exec instance. The 150 // std streams are set up. 151 // If ctx is cancelled, the process is terminated. 152 func (daemon *Daemon) ContainerExecStart(ctx context.Context, name string, options containertypes.ExecStartOptions) (err error) { 153 var ( 154 cStdin io.ReadCloser 155 cStdout, cStderr io.Writer 156 ) 157 158 ec, err := daemon.getExecConfig(name) 159 if err != nil { 160 return err 161 } 162 163 ec.Lock() 164 if ec.ExitCode != nil { 165 ec.Unlock() 166 err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) 167 return errdefs.Conflict(err) 168 } 169 170 if ec.Running { 171 ec.Unlock() 172 return errdefs.Conflict(fmt.Errorf("Error: Exec command %s is already running", ec.ID)) 173 } 174 ec.Running = true 175 ec.Unlock() 176 177 logrus.Debugf("starting exec command %s in container %s", ec.ID, ec.Container.ID) 178 attributes := map[string]string{ 179 "execID": ec.ID, 180 } 181 daemon.LogContainerEventWithAttributes(ec.Container, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " "), attributes) 182 183 defer func() { 184 if err != nil { 185 ec.Lock() 186 ec.Container.ExecCommands.Delete(ec.ID) 187 ec.Running = false 188 exitCode := 126 189 ec.ExitCode = &exitCode 190 if err := ec.CloseStreams(); err != nil { 191 logrus.Errorf("failed to cleanup exec %s streams: %s", ec.Container.ID, err) 192 } 193 ec.Unlock() 194 } 195 }() 196 197 if ec.OpenStdin && options.Stdin != nil { 198 r, w := io.Pipe() 199 go func() { 200 defer w.Close() 201 defer logrus.Debug("Closing buffered stdin pipe") 202 pools.Copy(w, options.Stdin) 203 }() 204 cStdin = r 205 } 206 if ec.OpenStdout { 207 cStdout = options.Stdout 208 } 209 if ec.OpenStderr { 210 cStderr = options.Stderr 211 } 212 213 if ec.OpenStdin { 214 ec.StreamConfig.NewInputPipes() 215 } else { 216 ec.StreamConfig.NewNopInputPipe() 217 } 218 219 p := &specs.Process{} 220 if runtime.GOOS != "windows" { 221 ctr, err := daemon.containerdCli.LoadContainer(ctx, ec.Container.ID) 222 if err != nil { 223 return err 224 } 225 md, err := ctr.Info(ctx, containerd.WithoutRefreshedMetadata) 226 if err != nil { 227 return err 228 } 229 spec := specs.Spec{Process: p} 230 if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil { 231 return err 232 } 233 } 234 p.Args = append([]string{ec.Entrypoint}, ec.Args...) 235 p.Env = ec.Env 236 p.Cwd = ec.WorkingDir 237 p.Terminal = ec.Tty 238 239 consoleSize := options.ConsoleSize 240 // If size isn't specified for start, use the one provided for create 241 if consoleSize == nil { 242 consoleSize = ec.ConsoleSize 243 } 244 if p.Terminal && consoleSize != nil { 245 p.ConsoleSize = &specs.Box{ 246 Height: consoleSize[0], 247 Width: consoleSize[1], 248 } 249 } 250 251 if p.Cwd == "" { 252 p.Cwd = "/" 253 } 254 255 if err := daemon.execSetPlatformOpt(ctx, ec, p); err != nil { 256 return err 257 } 258 259 attachConfig := stream.AttachConfig{ 260 TTY: ec.Tty, 261 UseStdin: cStdin != nil, 262 UseStdout: cStdout != nil, 263 UseStderr: cStderr != nil, 264 Stdin: cStdin, 265 Stdout: cStdout, 266 Stderr: cStderr, 267 DetachKeys: ec.DetachKeys, 268 CloseStdin: true, 269 } 270 ec.StreamConfig.AttachStreams(&attachConfig) 271 // using context.Background() so that attachErr does not race ctx.Done(). 272 copyCtx, cancel := context.WithCancel(context.Background()) 273 defer cancel() 274 attachErr := ec.StreamConfig.CopyStreams(copyCtx, &attachConfig) 275 276 ec.Container.Lock() 277 tsk, err := ec.Container.GetRunningTask() 278 ec.Container.Unlock() 279 if err != nil { 280 return err 281 } 282 283 // Synchronize with libcontainerd event loop 284 ec.Lock() 285 ec.Process, err = tsk.Exec(ctx, ec.ID, p, cStdin != nil, ec.InitializeStdio) 286 // the exec context should be ready, or error happened. 287 // close the chan to notify readiness 288 close(ec.Started) 289 if err != nil { 290 defer ec.Unlock() 291 return setExitCodeFromError(ec.SetExitCode, err) 292 } 293 ec.Unlock() 294 295 select { 296 case <-ctx.Done(): 297 log := logrus. 298 WithField("container", ec.Container.ID). 299 WithField("exec", ec.ID) 300 log.Debug("Sending KILL signal to container process") 301 sigCtx, cancelFunc := context.WithTimeout(context.Background(), 30*time.Second) 302 defer cancelFunc() 303 err := ec.Process.Kill(sigCtx, signal.SignalMap["KILL"]) 304 if err != nil { 305 log.WithError(err).Error("Could not send KILL signal to container process") 306 } 307 return ctx.Err() 308 case err := <-attachErr: 309 if err != nil { 310 if _, ok := err.(term.EscapeError); !ok { 311 return errdefs.System(errors.Wrap(err, "exec attach failed")) 312 } 313 attributes := map[string]string{ 314 "execID": ec.ID, 315 } 316 daemon.LogContainerEventWithAttributes(ec.Container, "exec_detach", attributes) 317 } 318 } 319 return nil 320 } 321 322 // execCommandGC runs a ticker to clean up the daemon references 323 // of exec configs that are no longer part of the container. 324 func (daemon *Daemon) execCommandGC() { 325 for range time.Tick(5 * time.Minute) { 326 var ( 327 cleaned int 328 liveExecCommands = daemon.containerExecIds() 329 ) 330 for id, config := range daemon.execCommands.Commands() { 331 if config.CanRemove { 332 cleaned++ 333 daemon.execCommands.Delete(id) 334 } else { 335 if _, exists := liveExecCommands[id]; !exists { 336 config.CanRemove = true 337 } 338 } 339 } 340 if cleaned > 0 { 341 logrus.Debugf("clean %d unused exec commands", cleaned) 342 } 343 } 344 } 345 346 // containerExecIds returns a list of all the current exec ids that are in use 347 // and running inside a container. 348 func (daemon *Daemon) containerExecIds() map[string]struct{} { 349 ids := map[string]struct{}{} 350 for _, c := range daemon.containers.List() { 351 for _, id := range c.ExecCommands.List() { 352 ids[id] = struct{}{} 353 } 354 } 355 return ids 356 }