github.com/mheon/docker@v0.11.2-0.20150922122814-44f47903a831/daemon/exec.go (about) 1 package daemon 2 3 import ( 4 "fmt" 5 "io" 6 "io/ioutil" 7 "strings" 8 "sync" 9 "time" 10 11 "github.com/Sirupsen/logrus" 12 "github.com/docker/docker/daemon/execdriver" 13 "github.com/docker/docker/pkg/broadcastwriter" 14 "github.com/docker/docker/pkg/ioutils" 15 "github.com/docker/docker/pkg/pools" 16 "github.com/docker/docker/pkg/stringid" 17 "github.com/docker/docker/pkg/stringutils" 18 "github.com/docker/docker/runconfig" 19 ) 20 21 // ExecConfig holds the configurations for execs. The Daemon keeps 22 // track of both running and finished execs so that they can be 23 // examined both during and after completion. 24 type ExecConfig struct { 25 sync.Mutex 26 ID string 27 Running bool 28 ExitCode int 29 ProcessConfig *execdriver.ProcessConfig 30 streamConfig 31 OpenStdin bool 32 OpenStderr bool 33 OpenStdout bool 34 Container *Container 35 canRemove bool 36 37 // waitStart will be closed immediately after the exec is really started. 38 waitStart chan struct{} 39 } 40 41 type execStore struct { 42 s map[string]*ExecConfig 43 sync.RWMutex 44 } 45 46 func newExecStore() *execStore { 47 return &execStore{s: make(map[string]*ExecConfig, 0)} 48 } 49 50 func (e *execStore) Add(id string, ExecConfig *ExecConfig) { 51 e.Lock() 52 e.s[id] = ExecConfig 53 e.Unlock() 54 } 55 56 func (e *execStore) Get(id string) *ExecConfig { 57 e.RLock() 58 res := e.s[id] 59 e.RUnlock() 60 return res 61 } 62 63 func (e *execStore) Delete(id string) { 64 e.Lock() 65 delete(e.s, id) 66 e.Unlock() 67 } 68 69 func (e *execStore) List() []string { 70 var IDs []string 71 e.RLock() 72 for id := range e.s { 73 IDs = append(IDs, id) 74 } 75 e.RUnlock() 76 return IDs 77 } 78 79 func (ExecConfig *ExecConfig) resize(h, w int) error { 80 select { 81 case <-ExecConfig.waitStart: 82 case <-time.After(time.Second): 83 return fmt.Errorf("Exec %s is not running, so it can not be resized.", ExecConfig.ID) 84 } 85 return ExecConfig.ProcessConfig.Terminal.Resize(h, w) 86 } 87 88 func (d *Daemon) registerExecCommand(ExecConfig *ExecConfig) { 89 // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. 90 ExecConfig.Container.execCommands.Add(ExecConfig.ID, ExecConfig) 91 // Storing execs in daemon for easy access via remote API. 92 d.execCommands.Add(ExecConfig.ID, ExecConfig) 93 } 94 95 func (d *Daemon) getExecConfig(name string) (*ExecConfig, error) { 96 ExecConfig := d.execCommands.Get(name) 97 98 // If the exec is found but its container is not in the daemon's list of 99 // containers then it must have been delete, in which case instead of 100 // saying the container isn't running, we should return a 404 so that 101 // the user sees the same error now that they will after the 102 // 5 minute clean-up loop is run which erases old/dead execs. 103 104 if ExecConfig != nil && d.containers.Get(ExecConfig.Container.ID) != nil { 105 106 if !ExecConfig.Container.IsRunning() { 107 return nil, fmt.Errorf("Container %s is not running", ExecConfig.Container.ID) 108 } 109 return ExecConfig, nil 110 } 111 112 return nil, fmt.Errorf("No such exec instance '%s' found in daemon", name) 113 } 114 115 func (d *Daemon) unregisterExecCommand(ExecConfig *ExecConfig) { 116 ExecConfig.Container.execCommands.Delete(ExecConfig.ID) 117 d.execCommands.Delete(ExecConfig.ID) 118 } 119 120 func (d *Daemon) getActiveContainer(name string) (*Container, error) { 121 container, err := d.Get(name) 122 if err != nil { 123 return nil, err 124 } 125 126 if !container.IsRunning() { 127 return nil, fmt.Errorf("Container %s is not running", name) 128 } 129 if container.isPaused() { 130 return nil, fmt.Errorf("Container %s is paused, unpause the container before exec", name) 131 } 132 return container, nil 133 } 134 135 // ContainerExecCreate sets up an exec in a running container. 136 func (d *Daemon) ContainerExecCreate(config *runconfig.ExecConfig) (string, error) { 137 // Not all drivers support Exec (LXC for example) 138 if err := checkExecSupport(d.execDriver.Name()); err != nil { 139 return "", err 140 } 141 142 container, err := d.getActiveContainer(config.Container) 143 if err != nil { 144 return "", err 145 } 146 147 cmd := stringutils.NewStrSlice(config.Cmd...) 148 entrypoint, args := d.getEntrypointAndArgs(stringutils.NewStrSlice(), cmd) 149 150 user := config.User 151 if len(user) == 0 { 152 user = container.Config.User 153 } 154 155 processConfig := &execdriver.ProcessConfig{ 156 Tty: config.Tty, 157 Entrypoint: entrypoint, 158 Arguments: args, 159 User: user, 160 Privileged: config.Privileged, 161 } 162 163 ExecConfig := &ExecConfig{ 164 ID: stringid.GenerateNonCryptoID(), 165 OpenStdin: config.AttachStdin, 166 OpenStdout: config.AttachStdout, 167 OpenStderr: config.AttachStderr, 168 streamConfig: streamConfig{}, 169 ProcessConfig: processConfig, 170 Container: container, 171 Running: false, 172 waitStart: make(chan struct{}), 173 } 174 175 d.registerExecCommand(ExecConfig) 176 177 container.logEvent("exec_create: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " ")) 178 179 return ExecConfig.ID, nil 180 } 181 182 // ContainerExecStart starts a previously set up exec instance. The 183 // std streams are set up. 184 func (d *Daemon) ContainerExecStart(execName string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error { 185 var ( 186 cStdin io.ReadCloser 187 cStdout, cStderr io.Writer 188 ) 189 190 ExecConfig, err := d.getExecConfig(execName) 191 if err != nil { 192 return err 193 } 194 195 func() { 196 ExecConfig.Lock() 197 defer ExecConfig.Unlock() 198 if ExecConfig.Running { 199 err = fmt.Errorf("Error: Exec command %s is already running", execName) 200 } 201 ExecConfig.Running = true 202 }() 203 if err != nil { 204 return err 205 } 206 207 logrus.Debugf("starting exec command %s in container %s", ExecConfig.ID, ExecConfig.Container.ID) 208 container := ExecConfig.Container 209 210 container.logEvent("exec_start: " + ExecConfig.ProcessConfig.Entrypoint + " " + strings.Join(ExecConfig.ProcessConfig.Arguments, " ")) 211 212 if ExecConfig.OpenStdin { 213 r, w := io.Pipe() 214 go func() { 215 defer w.Close() 216 defer logrus.Debugf("Closing buffered stdin pipe") 217 pools.Copy(w, stdin) 218 }() 219 cStdin = r 220 } 221 if ExecConfig.OpenStdout { 222 cStdout = stdout 223 } 224 if ExecConfig.OpenStderr { 225 cStderr = stderr 226 } 227 228 ExecConfig.streamConfig.stderr = broadcastwriter.New() 229 ExecConfig.streamConfig.stdout = broadcastwriter.New() 230 // Attach to stdin 231 if ExecConfig.OpenStdin { 232 ExecConfig.streamConfig.stdin, ExecConfig.streamConfig.stdinPipe = io.Pipe() 233 } else { 234 ExecConfig.streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) // Silently drop stdin 235 } 236 237 attachErr := attach(&ExecConfig.streamConfig, ExecConfig.OpenStdin, true, ExecConfig.ProcessConfig.Tty, cStdin, cStdout, cStderr) 238 239 execErr := make(chan error) 240 241 // Note, the ExecConfig data will be removed when the container 242 // itself is deleted. This allows us to query it (for things like 243 // the exitStatus) even after the cmd is done running. 244 245 go func() { 246 if err := container.exec(ExecConfig); err != nil { 247 execErr <- fmt.Errorf("Cannot run exec command %s in container %s: %s", execName, container.ID, err) 248 } 249 }() 250 select { 251 case err := <-attachErr: 252 if err != nil { 253 return fmt.Errorf("attach failed with error: %s", err) 254 } 255 return nil 256 case err := <-execErr: 257 if err == nil { 258 return nil 259 } 260 261 // Maybe the container stopped while we were trying to exec 262 if !container.IsRunning() { 263 return fmt.Errorf("container stopped while running exec") 264 } 265 return err 266 } 267 } 268 269 // Exec calls the underlying exec driver to run 270 func (d *Daemon) Exec(c *Container, ExecConfig *ExecConfig, pipes *execdriver.Pipes, startCallback execdriver.DriverCallback) (int, error) { 271 hooks := execdriver.Hooks{ 272 Start: startCallback, 273 } 274 exitStatus, err := d.execDriver.Exec(c.command, ExecConfig.ProcessConfig, pipes, hooks) 275 276 // On err, make sure we don't leave ExitCode at zero 277 if err != nil && exitStatus == 0 { 278 exitStatus = 128 279 } 280 281 ExecConfig.ExitCode = exitStatus 282 ExecConfig.Running = false 283 284 return exitStatus, err 285 } 286 287 // execCommandGC runs a ticker to clean up the daemon references 288 // of exec configs that are no longer part of the container. 289 func (d *Daemon) execCommandGC() { 290 for range time.Tick(5 * time.Minute) { 291 var ( 292 cleaned int 293 liveExecCommands = d.containerExecIds() 294 ) 295 for id, config := range d.execCommands.s { 296 if config.canRemove { 297 cleaned++ 298 d.execCommands.Delete(id) 299 } else { 300 if _, exists := liveExecCommands[id]; !exists { 301 config.canRemove = true 302 } 303 } 304 } 305 if cleaned > 0 { 306 logrus.Debugf("clean %d unused exec commands", cleaned) 307 } 308 } 309 } 310 311 // containerExecIds returns a list of all the current exec ids that are in use 312 // and running inside a container. 313 func (d *Daemon) containerExecIds() map[string]struct{} { 314 ids := map[string]struct{}{} 315 for _, c := range d.containers.List() { 316 for _, id := range c.execCommands.List() { 317 ids[id] = struct{}{} 318 } 319 } 320 return ids 321 }