github.com/lmars/docker@v1.6.0-rc2/daemon/execdriver/native/driver.go (about) 1 // +build linux,cgo 2 3 package native 4 5 import ( 6 "encoding/json" 7 "fmt" 8 "io" 9 "io/ioutil" 10 "os" 11 "os/exec" 12 "path/filepath" 13 "strings" 14 "sync" 15 "syscall" 16 "time" 17 18 log "github.com/Sirupsen/logrus" 19 "github.com/docker/docker/daemon/execdriver" 20 "github.com/docker/docker/pkg/reexec" 21 sysinfo "github.com/docker/docker/pkg/system" 22 "github.com/docker/docker/pkg/term" 23 "github.com/docker/libcontainer" 24 "github.com/docker/libcontainer/apparmor" 25 "github.com/docker/libcontainer/cgroups/systemd" 26 "github.com/docker/libcontainer/configs" 27 "github.com/docker/libcontainer/system" 28 "github.com/docker/libcontainer/utils" 29 ) 30 31 const ( 32 DriverName = "native" 33 Version = "0.2" 34 ) 35 36 type driver struct { 37 root string 38 initPath string 39 activeContainers map[string]libcontainer.Container 40 machineMemory int64 41 factory libcontainer.Factory 42 sync.Mutex 43 } 44 45 func NewDriver(root, initPath string) (*driver, error) { 46 meminfo, err := sysinfo.ReadMemInfo() 47 if err != nil { 48 return nil, err 49 } 50 51 if err := os.MkdirAll(root, 0700); err != nil { 52 return nil, err 53 } 54 // native driver root is at docker_root/execdriver/native. Put apparmor at docker_root 55 if err := apparmor.InstallDefaultProfile(); err != nil { 56 return nil, err 57 } 58 cgm := libcontainer.Cgroupfs 59 if systemd.UseSystemd() { 60 cgm = libcontainer.SystemdCgroups 61 } 62 63 f, err := libcontainer.New( 64 root, 65 cgm, 66 libcontainer.InitPath(reexec.Self(), DriverName), 67 ) 68 if err != nil { 69 return nil, err 70 } 71 72 return &driver{ 73 root: root, 74 initPath: initPath, 75 activeContainers: make(map[string]libcontainer.Container), 76 machineMemory: meminfo.MemTotal, 77 factory: f, 78 }, nil 79 } 80 81 type execOutput struct { 82 exitCode int 83 err error 84 } 85 86 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { 87 // take the Command and populate the libcontainer.Config from it 88 container, err := d.createContainer(c) 89 if err != nil { 90 return execdriver.ExitStatus{ExitCode: -1}, err 91 } 92 93 var term execdriver.Terminal 94 95 p := &libcontainer.Process{ 96 Args: append([]string{c.ProcessConfig.Entrypoint}, c.ProcessConfig.Arguments...), 97 Env: c.ProcessConfig.Env, 98 Cwd: c.WorkingDir, 99 User: c.ProcessConfig.User, 100 } 101 102 if c.ProcessConfig.Tty { 103 rootuid, err := container.HostUID() 104 if err != nil { 105 return execdriver.ExitStatus{ExitCode: -1}, err 106 } 107 cons, err := p.NewConsole(rootuid) 108 if err != nil { 109 return execdriver.ExitStatus{ExitCode: -1}, err 110 } 111 term, err = NewTtyConsole(cons, pipes, rootuid) 112 } else { 113 p.Stdout = pipes.Stdout 114 p.Stderr = pipes.Stderr 115 r, w, err := os.Pipe() 116 if err != nil { 117 return execdriver.ExitStatus{ExitCode: -1}, err 118 } 119 if pipes.Stdin != nil { 120 go func() { 121 io.Copy(w, pipes.Stdin) 122 w.Close() 123 }() 124 p.Stdin = r 125 } 126 term = &execdriver.StdConsole{} 127 } 128 if err != nil { 129 return execdriver.ExitStatus{ExitCode: -1}, err 130 } 131 c.ProcessConfig.Terminal = term 132 133 cont, err := d.factory.Create(c.ID, container) 134 if err != nil { 135 return execdriver.ExitStatus{ExitCode: -1}, err 136 } 137 d.Lock() 138 d.activeContainers[c.ID] = cont 139 d.Unlock() 140 defer func() { 141 cont.Destroy() 142 d.cleanContainer(c.ID) 143 }() 144 145 if err := cont.Start(p); err != nil { 146 return execdriver.ExitStatus{ExitCode: -1}, err 147 } 148 149 if startCallback != nil { 150 pid, err := p.Pid() 151 if err != nil { 152 p.Signal(os.Kill) 153 p.Wait() 154 return execdriver.ExitStatus{ExitCode: -1}, err 155 } 156 startCallback(&c.ProcessConfig, pid) 157 } 158 159 oomKillNotification, err := cont.NotifyOOM() 160 if err != nil { 161 oomKillNotification = nil 162 log.Warnf("Your kernel does not support OOM notifications: %s", err) 163 } 164 waitF := p.Wait 165 if nss := cont.Config().Namespaces; nss.Contains(configs.NEWPID) { 166 // we need such hack for tracking processes with inerited fds, 167 // because cmd.Wait() waiting for all streams to be copied 168 waitF = waitInPIDHost(p, cont) 169 } 170 ps, err := waitF() 171 if err != nil { 172 if err, ok := err.(*exec.ExitError); !ok { 173 return execdriver.ExitStatus{ExitCode: -1}, err 174 } else { 175 ps = err.ProcessState 176 } 177 } 178 cont.Destroy() 179 180 _, oomKill := <-oomKillNotification 181 182 return execdriver.ExitStatus{ExitCode: utils.ExitStatus(ps.Sys().(syscall.WaitStatus)), OOMKilled: oomKill}, nil 183 } 184 185 func waitInPIDHost(p *libcontainer.Process, c libcontainer.Container) func() (*os.ProcessState, error) { 186 return func() (*os.ProcessState, error) { 187 pid, err := p.Pid() 188 if err != nil { 189 return nil, err 190 } 191 192 process, err := os.FindProcess(pid) 193 s, err := process.Wait() 194 if err != nil { 195 if err, ok := err.(*exec.ExitError); !ok { 196 return s, err 197 } else { 198 s = err.ProcessState 199 } 200 } 201 processes, err := c.Processes() 202 if err != nil { 203 return s, err 204 } 205 206 for _, pid := range processes { 207 process, err := os.FindProcess(pid) 208 if err != nil { 209 log.Errorf("Failed to kill process: %d", pid) 210 continue 211 } 212 process.Kill() 213 } 214 215 p.Wait() 216 return s, err 217 } 218 } 219 220 func (d *driver) Kill(c *execdriver.Command, sig int) error { 221 active := d.activeContainers[c.ID] 222 if active == nil { 223 return fmt.Errorf("active container for %s does not exist", c.ID) 224 } 225 state, err := active.State() 226 if err != nil { 227 return err 228 } 229 return syscall.Kill(state.InitProcessPid, syscall.Signal(sig)) 230 } 231 232 func (d *driver) Pause(c *execdriver.Command) error { 233 active := d.activeContainers[c.ID] 234 if active == nil { 235 return fmt.Errorf("active container for %s does not exist", c.ID) 236 } 237 return active.Pause() 238 } 239 240 func (d *driver) Unpause(c *execdriver.Command) error { 241 active := d.activeContainers[c.ID] 242 if active == nil { 243 return fmt.Errorf("active container for %s does not exist", c.ID) 244 } 245 return active.Resume() 246 } 247 248 func (d *driver) Terminate(c *execdriver.Command) error { 249 defer d.cleanContainer(c.ID) 250 // lets check the start time for the process 251 active := d.activeContainers[c.ID] 252 if active == nil { 253 return fmt.Errorf("active container for %s does not exist", c.ID) 254 } 255 state, err := active.State() 256 if err != nil { 257 return err 258 } 259 pid := state.InitProcessPid 260 261 currentStartTime, err := system.GetProcessStartTime(pid) 262 if err != nil { 263 return err 264 } 265 266 if state.InitProcessStartTime == currentStartTime { 267 err = syscall.Kill(pid, 9) 268 syscall.Wait4(pid, nil, 0, nil) 269 } 270 271 return err 272 273 } 274 275 func (d *driver) Info(id string) execdriver.Info { 276 return &info{ 277 ID: id, 278 driver: d, 279 } 280 } 281 282 func (d *driver) Name() string { 283 return fmt.Sprintf("%s-%s", DriverName, Version) 284 } 285 286 func (d *driver) GetPidsForContainer(id string) ([]int, error) { 287 d.Lock() 288 active := d.activeContainers[id] 289 d.Unlock() 290 291 if active == nil { 292 return nil, fmt.Errorf("active container for %s does not exist", id) 293 } 294 return active.Processes() 295 } 296 297 func (d *driver) writeContainerFile(container *configs.Config, id string) error { 298 data, err := json.Marshal(container) 299 if err != nil { 300 return err 301 } 302 return ioutil.WriteFile(filepath.Join(d.root, id, "container.json"), data, 0655) 303 } 304 305 func (d *driver) cleanContainer(id string) error { 306 d.Lock() 307 delete(d.activeContainers, id) 308 d.Unlock() 309 return os.RemoveAll(filepath.Join(d.root, id)) 310 } 311 312 func (d *driver) createContainerRoot(id string) error { 313 return os.MkdirAll(filepath.Join(d.root, id), 0655) 314 } 315 316 func (d *driver) Clean(id string) error { 317 return os.RemoveAll(filepath.Join(d.root, id)) 318 } 319 320 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { 321 c := d.activeContainers[id] 322 if c == nil { 323 return nil, execdriver.ErrNotRunning 324 } 325 now := time.Now() 326 stats, err := c.Stats() 327 if err != nil { 328 return nil, err 329 } 330 memoryLimit := c.Config().Cgroups.Memory 331 // if the container does not have any memory limit specified set the 332 // limit to the machines memory 333 if memoryLimit == 0 { 334 memoryLimit = d.machineMemory 335 } 336 return &execdriver.ResourceStats{ 337 Stats: stats, 338 Read: now, 339 MemoryLimit: memoryLimit, 340 }, nil 341 } 342 343 func getEnv(key string, env []string) string { 344 for _, pair := range env { 345 parts := strings.Split(pair, "=") 346 if parts[0] == key { 347 return parts[1] 348 } 349 } 350 return "" 351 } 352 353 type TtyConsole struct { 354 console libcontainer.Console 355 } 356 357 func NewTtyConsole(console libcontainer.Console, pipes *execdriver.Pipes, rootuid int) (*TtyConsole, error) { 358 tty := &TtyConsole{ 359 console: console, 360 } 361 362 if err := tty.AttachPipes(pipes); err != nil { 363 tty.Close() 364 return nil, err 365 } 366 367 return tty, nil 368 } 369 370 func (t *TtyConsole) Master() libcontainer.Console { 371 return t.console 372 } 373 374 func (t *TtyConsole) Resize(h, w int) error { 375 return term.SetWinsize(t.console.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) 376 } 377 378 func (t *TtyConsole) AttachPipes(pipes *execdriver.Pipes) error { 379 go func() { 380 if wb, ok := pipes.Stdout.(interface { 381 CloseWriters() error 382 }); ok { 383 defer wb.CloseWriters() 384 } 385 386 io.Copy(pipes.Stdout, t.console) 387 }() 388 389 if pipes.Stdin != nil { 390 go func() { 391 io.Copy(t.console, pipes.Stdin) 392 393 pipes.Stdin.Close() 394 }() 395 } 396 397 return nil 398 } 399 400 func (t *TtyConsole) Close() error { 401 return t.console.Close() 402 }