github.com/guilhermebr/docker@v1.4.2-0.20150428121140-67da055cebca/daemon/execdriver/lxc/driver.go (about) 1 package lxc 2 3 import ( 4 "encoding/json" 5 "errors" 6 "fmt" 7 "io" 8 "io/ioutil" 9 "os" 10 "os/exec" 11 "path" 12 "path/filepath" 13 "strconv" 14 "strings" 15 "sync" 16 "syscall" 17 "time" 18 19 "github.com/Sirupsen/logrus" 20 "github.com/docker/docker/daemon/execdriver" 21 "github.com/docker/docker/pkg/stringutils" 22 sysinfo "github.com/docker/docker/pkg/system" 23 "github.com/docker/docker/pkg/term" 24 "github.com/docker/docker/pkg/version" 25 "github.com/docker/libcontainer" 26 "github.com/docker/libcontainer/cgroups" 27 "github.com/docker/libcontainer/configs" 28 "github.com/docker/libcontainer/system" 29 "github.com/docker/libcontainer/user" 30 "github.com/kr/pty" 31 ) 32 33 const DriverName = "lxc" 34 35 var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver") 36 37 type driver struct { 38 root string // root path for the driver to use 39 libPath string 40 initPath string 41 apparmor bool 42 sharedRoot bool 43 activeContainers map[string]*activeContainer 44 machineMemory int64 45 sync.Mutex 46 } 47 48 type activeContainer struct { 49 container *configs.Config 50 cmd *exec.Cmd 51 } 52 53 func NewDriver(root, libPath, initPath string, apparmor bool) (*driver, error) { 54 if err := os.MkdirAll(root, 0700); err != nil { 55 return nil, err 56 } 57 // setup unconfined symlink 58 if err := linkLxcStart(root); err != nil { 59 return nil, err 60 } 61 meminfo, err := sysinfo.ReadMemInfo() 62 if err != nil { 63 return nil, err 64 } 65 return &driver{ 66 apparmor: apparmor, 67 root: root, 68 libPath: libPath, 69 initPath: initPath, 70 sharedRoot: rootIsShared(), 71 activeContainers: make(map[string]*activeContainer), 72 machineMemory: meminfo.MemTotal, 73 }, nil 74 } 75 76 func (d *driver) Name() string { 77 version := d.version() 78 return fmt.Sprintf("%s-%s", DriverName, version) 79 } 80 81 func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) { 82 var ( 83 term execdriver.Terminal 84 err error 85 dataPath = d.containerDir(c.ID) 86 ) 87 88 if c.ProcessConfig.Tty { 89 term, err = NewTtyConsole(&c.ProcessConfig, pipes) 90 } else { 91 term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes) 92 } 93 c.ProcessConfig.Terminal = term 94 container, err := d.createContainer(c) 95 if err != nil { 96 return execdriver.ExitStatus{ExitCode: -1}, err 97 } 98 d.Lock() 99 d.activeContainers[c.ID] = &activeContainer{ 100 container: container, 101 cmd: &c.ProcessConfig.Cmd, 102 } 103 d.Unlock() 104 105 c.Mounts = append(c.Mounts, execdriver.Mount{ 106 Source: d.initPath, 107 Destination: c.InitPath, 108 Writable: false, 109 Private: true, 110 }) 111 112 if err := d.generateEnvConfig(c); err != nil { 113 return execdriver.ExitStatus{ExitCode: -1}, err 114 } 115 configPath, err := d.generateLXCConfig(c) 116 if err != nil { 117 return execdriver.ExitStatus{ExitCode: -1}, err 118 } 119 params := []string{ 120 "lxc-start", 121 "-n", c.ID, 122 "-f", configPath, 123 } 124 125 // From lxc>=1.1 the default behavior is to daemonize containers after start 126 lxcVersion := version.Version(d.version()) 127 if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) { 128 params = append(params, "-F") 129 } 130 131 if c.Network.ContainerID != "" { 132 params = append(params, 133 "--share-net", c.Network.ContainerID, 134 ) 135 } 136 if c.Ipc != nil { 137 if c.Ipc.ContainerID != "" { 138 params = append(params, 139 "--share-ipc", c.Ipc.ContainerID, 140 ) 141 } else if c.Ipc.HostIpc { 142 params = append(params, 143 "--share-ipc", "1", 144 ) 145 } 146 } 147 148 params = append(params, 149 "--", 150 c.InitPath, 151 ) 152 if c.Network.Interface != nil { 153 params = append(params, 154 "-g", c.Network.Interface.Gateway, 155 "-i", fmt.Sprintf("%s/%d", c.Network.Interface.IPAddress, c.Network.Interface.IPPrefixLen), 156 ) 157 } 158 params = append(params, 159 "-mtu", strconv.Itoa(c.Network.Mtu), 160 ) 161 162 if c.ProcessConfig.User != "" { 163 params = append(params, "-u", c.ProcessConfig.User) 164 } 165 166 if c.ProcessConfig.Privileged { 167 if d.apparmor { 168 params[0] = path.Join(d.root, "lxc-start-unconfined") 169 170 } 171 params = append(params, "-privileged") 172 } 173 174 if c.WorkingDir != "" { 175 params = append(params, "-w", c.WorkingDir) 176 } 177 178 params = append(params, "--", c.ProcessConfig.Entrypoint) 179 params = append(params, c.ProcessConfig.Arguments...) 180 181 if d.sharedRoot { 182 // lxc-start really needs / to be non-shared, or all kinds of stuff break 183 // when lxc-start unmount things and those unmounts propagate to the main 184 // mount namespace. 185 // What we really want is to clone into a new namespace and then 186 // mount / MS_REC|MS_SLAVE, but since we can't really clone or fork 187 // without exec in go we have to do this horrible shell hack... 188 shellString := 189 "mount --make-rslave /; exec " + 190 stringutils.ShellQuoteArguments(params) 191 192 params = []string{ 193 "unshare", "-m", "--", "/bin/sh", "-c", shellString, 194 } 195 } 196 logrus.Debugf("lxc params %s", params) 197 var ( 198 name = params[0] 199 arg = params[1:] 200 ) 201 aname, err := exec.LookPath(name) 202 if err != nil { 203 aname = name 204 } 205 c.ProcessConfig.Path = aname 206 c.ProcessConfig.Args = append([]string{name}, arg...) 207 208 if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil { 209 return execdriver.ExitStatus{ExitCode: -1}, err 210 } 211 212 if err := c.ProcessConfig.Start(); err != nil { 213 return execdriver.ExitStatus{ExitCode: -1}, err 214 } 215 216 var ( 217 waitErr error 218 waitLock = make(chan struct{}) 219 ) 220 221 go func() { 222 if err := c.ProcessConfig.Wait(); err != nil { 223 if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0 224 waitErr = err 225 } 226 } 227 close(waitLock) 228 }() 229 230 terminate := func(terr error) (execdriver.ExitStatus, error) { 231 if c.ProcessConfig.Process != nil { 232 c.ProcessConfig.Process.Kill() 233 c.ProcessConfig.Wait() 234 } 235 return execdriver.ExitStatus{ExitCode: -1}, terr 236 } 237 // Poll lxc for RUNNING status 238 pid, err := d.waitForStart(c, waitLock) 239 if err != nil { 240 return terminate(err) 241 } 242 243 cgroupPaths, err := cgroupPaths(c.ID) 244 if err != nil { 245 return terminate(err) 246 } 247 248 state := &libcontainer.State{ 249 InitProcessPid: pid, 250 CgroupPaths: cgroupPaths, 251 } 252 253 f, err := os.Create(filepath.Join(dataPath, "state.json")) 254 if err != nil { 255 return terminate(err) 256 } 257 defer f.Close() 258 259 if err := json.NewEncoder(f).Encode(state); err != nil { 260 return terminate(err) 261 } 262 263 c.ContainerPid = pid 264 265 if startCallback != nil { 266 logrus.Debugf("Invoking startCallback") 267 startCallback(&c.ProcessConfig, pid) 268 } 269 270 oomKill := false 271 oomKillNotification, err := notifyOnOOM(cgroupPaths) 272 273 <-waitLock 274 275 if err == nil { 276 _, oomKill = <-oomKillNotification 277 logrus.Debugf("oomKill error %s waitErr %s", oomKill, waitErr) 278 } else { 279 logrus.Warnf("Your kernel does not support OOM notifications: %s", err) 280 } 281 282 // check oom error 283 exitCode := getExitCode(c) 284 if oomKill { 285 exitCode = 137 286 } 287 return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr 288 } 289 290 // copy from libcontainer 291 func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) { 292 dir := paths["memory"] 293 if dir == "" { 294 return nil, fmt.Errorf("There is no path for %q in state", "memory") 295 } 296 oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control")) 297 if err != nil { 298 return nil, err 299 } 300 fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0) 301 if syserr != 0 { 302 oomControl.Close() 303 return nil, syserr 304 } 305 306 eventfd := os.NewFile(fd, "eventfd") 307 308 eventControlPath := filepath.Join(dir, "cgroup.event_control") 309 data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd()) 310 if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil { 311 eventfd.Close() 312 oomControl.Close() 313 return nil, err 314 } 315 ch := make(chan struct{}) 316 go func() { 317 defer func() { 318 close(ch) 319 eventfd.Close() 320 oomControl.Close() 321 }() 322 buf := make([]byte, 8) 323 for { 324 if _, err := eventfd.Read(buf); err != nil { 325 return 326 } 327 // When a cgroup is destroyed, an event is sent to eventfd. 328 // So if the control path is gone, return instead of notifying. 329 if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) { 330 return 331 } 332 ch <- struct{}{} 333 } 334 }() 335 return ch, nil 336 } 337 338 // createContainer populates and configures the container type with the 339 // data provided by the execdriver.Command 340 func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) { 341 container := execdriver.InitContainer(c) 342 if err := execdriver.SetupCgroups(container, c); err != nil { 343 return nil, err 344 } 345 return container, nil 346 } 347 348 // Return an map of susbystem -> container cgroup 349 func cgroupPaths(containerId string) (map[string]string, error) { 350 subsystems, err := cgroups.GetAllSubsystems() 351 if err != nil { 352 return nil, err 353 } 354 logrus.Debugf("subsystems: %s", subsystems) 355 paths := make(map[string]string) 356 for _, subsystem := range subsystems { 357 cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) 358 logrus.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir) 359 if err != nil { 360 //unsupported subystem 361 continue 362 } 363 path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId) 364 paths[subsystem] = path 365 } 366 367 return paths, nil 368 } 369 370 // this is copy from old libcontainer nodes.go 371 func createDeviceNodes(rootfs string, nodesToCreate []*configs.Device) error { 372 oldMask := syscall.Umask(0000) 373 defer syscall.Umask(oldMask) 374 375 for _, node := range nodesToCreate { 376 if err := createDeviceNode(rootfs, node); err != nil { 377 return err 378 } 379 } 380 return nil 381 } 382 383 // Creates the device node in the rootfs of the container. 384 func createDeviceNode(rootfs string, node *configs.Device) error { 385 var ( 386 dest = filepath.Join(rootfs, node.Path) 387 parent = filepath.Dir(dest) 388 ) 389 390 if err := os.MkdirAll(parent, 0755); err != nil { 391 return err 392 } 393 394 fileMode := node.FileMode 395 switch node.Type { 396 case 'c': 397 fileMode |= syscall.S_IFCHR 398 case 'b': 399 fileMode |= syscall.S_IFBLK 400 default: 401 return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path) 402 } 403 404 if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil && !os.IsExist(err) { 405 return fmt.Errorf("mknod %s %s", node.Path, err) 406 } 407 408 if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil { 409 return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid) 410 } 411 412 return nil 413 } 414 415 // setupUser changes the groups, gid, and uid for the user inside the container 416 // copy from libcontainer, cause not it's private 417 func setupUser(userSpec string) error { 418 // Set up defaults. 419 defaultExecUser := user.ExecUser{ 420 Uid: syscall.Getuid(), 421 Gid: syscall.Getgid(), 422 Home: "/", 423 } 424 passwdPath, err := user.GetPasswdPath() 425 if err != nil { 426 return err 427 } 428 groupPath, err := user.GetGroupPath() 429 if err != nil { 430 return err 431 } 432 execUser, err := user.GetExecUserPath(userSpec, &defaultExecUser, passwdPath, groupPath) 433 if err != nil { 434 return err 435 } 436 if err := syscall.Setgroups(execUser.Sgids); err != nil { 437 return err 438 } 439 if err := system.Setgid(execUser.Gid); err != nil { 440 return err 441 } 442 if err := system.Setuid(execUser.Uid); err != nil { 443 return err 444 } 445 // if we didn't get HOME already, set it based on the user's HOME 446 if envHome := os.Getenv("HOME"); envHome == "" { 447 if err := os.Setenv("HOME", execUser.Home); err != nil { 448 return err 449 } 450 } 451 return nil 452 } 453 454 /// Return the exit code of the process 455 // if the process has not exited -1 will be returned 456 func getExitCode(c *execdriver.Command) int { 457 if c.ProcessConfig.ProcessState == nil { 458 return -1 459 } 460 return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() 461 } 462 463 func (d *driver) Kill(c *execdriver.Command, sig int) error { 464 return KillLxc(c.ID, sig) 465 } 466 467 func (d *driver) Pause(c *execdriver.Command) error { 468 _, err := exec.LookPath("lxc-freeze") 469 if err == nil { 470 output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput() 471 if errExec != nil { 472 return fmt.Errorf("Err: %s Output: %s", errExec, output) 473 } 474 } 475 476 return err 477 } 478 479 func (d *driver) Unpause(c *execdriver.Command) error { 480 _, err := exec.LookPath("lxc-unfreeze") 481 if err == nil { 482 output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput() 483 if errExec != nil { 484 return fmt.Errorf("Err: %s Output: %s", errExec, output) 485 } 486 } 487 488 return err 489 } 490 491 func (d *driver) Terminate(c *execdriver.Command) error { 492 return KillLxc(c.ID, 9) 493 } 494 495 func (d *driver) version() string { 496 var ( 497 version string 498 output []byte 499 err error 500 ) 501 if _, errPath := exec.LookPath("lxc-version"); errPath == nil { 502 output, err = exec.Command("lxc-version").CombinedOutput() 503 } else { 504 output, err = exec.Command("lxc-start", "--version").CombinedOutput() 505 } 506 if err == nil { 507 version = strings.TrimSpace(string(output)) 508 if parts := strings.SplitN(version, ":", 2); len(parts) == 2 { 509 version = strings.TrimSpace(parts[1]) 510 } 511 } 512 return version 513 } 514 515 func KillLxc(id string, sig int) error { 516 var ( 517 err error 518 output []byte 519 ) 520 _, err = exec.LookPath("lxc-kill") 521 if err == nil { 522 output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput() 523 } else { 524 output, err = exec.Command("lxc-stop", "-k", "-n", id, strconv.Itoa(sig)).CombinedOutput() 525 } 526 if err != nil { 527 return fmt.Errorf("Err: %s Output: %s", err, output) 528 } 529 return nil 530 } 531 532 // wait for the process to start and return the pid for the process 533 func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) { 534 var ( 535 err error 536 output []byte 537 ) 538 // We wait for the container to be fully running. 539 // Timeout after 5 seconds. In case of broken pipe, just retry. 540 // Note: The container can run and finish correctly before 541 // the end of this loop 542 for now := time.Now(); time.Since(now) < 5*time.Second; { 543 select { 544 case <-waitLock: 545 // If the process dies while waiting for it, just return 546 return -1, nil 547 default: 548 } 549 550 output, err = d.getInfo(c.ID) 551 if err == nil { 552 info, err := parseLxcInfo(string(output)) 553 if err != nil { 554 return -1, err 555 } 556 if info.Running { 557 return info.Pid, nil 558 } 559 } 560 time.Sleep(50 * time.Millisecond) 561 } 562 return -1, execdriver.ErrNotRunning 563 } 564 565 func (d *driver) getInfo(id string) ([]byte, error) { 566 return exec.Command("lxc-info", "-n", id).CombinedOutput() 567 } 568 569 type info struct { 570 ID string 571 driver *driver 572 } 573 574 func (i *info) IsRunning() bool { 575 var running bool 576 577 output, err := i.driver.getInfo(i.ID) 578 if err != nil { 579 logrus.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output) 580 return false 581 } 582 if strings.Contains(string(output), "RUNNING") { 583 running = true 584 } 585 return running 586 } 587 588 func (d *driver) Info(id string) execdriver.Info { 589 return &info{ 590 ID: id, 591 driver: d, 592 } 593 } 594 595 func findCgroupRootAndDir(subsystem string) (string, string, error) { 596 cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem) 597 if err != nil { 598 return "", "", err 599 } 600 601 cgroupDir, err := cgroups.GetThisCgroupDir(subsystem) 602 if err != nil { 603 return "", "", err 604 } 605 return cgroupRoot, cgroupDir, nil 606 } 607 608 func (d *driver) GetPidsForContainer(id string) ([]int, error) { 609 pids := []int{} 610 611 // cpu is chosen because it is the only non optional subsystem in cgroups 612 subsystem := "cpu" 613 cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem) 614 if err != nil { 615 return pids, err 616 } 617 618 filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks") 619 if _, err := os.Stat(filename); os.IsNotExist(err) { 620 // With more recent lxc versions use, cgroup will be in lxc/ 621 filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks") 622 } 623 624 output, err := ioutil.ReadFile(filename) 625 if err != nil { 626 return pids, err 627 } 628 for _, p := range strings.Split(string(output), "\n") { 629 if len(p) == 0 { 630 continue 631 } 632 pid, err := strconv.Atoi(p) 633 if err != nil { 634 return pids, fmt.Errorf("Invalid pid '%s': %s", p, err) 635 } 636 pids = append(pids, pid) 637 } 638 return pids, nil 639 } 640 641 func linkLxcStart(root string) error { 642 sourcePath, err := exec.LookPath("lxc-start") 643 if err != nil { 644 return err 645 } 646 targetPath := path.Join(root, "lxc-start-unconfined") 647 648 if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) { 649 return err 650 } else if err == nil { 651 if err := os.Remove(targetPath); err != nil { 652 return err 653 } 654 } 655 return os.Symlink(sourcePath, targetPath) 656 } 657 658 // TODO: This can be moved to the mountinfo reader in the mount pkg 659 func rootIsShared() bool { 660 if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil { 661 for _, line := range strings.Split(string(data), "\n") { 662 cols := strings.Split(line, " ") 663 if len(cols) >= 6 && cols[4] == "/" { 664 return strings.HasPrefix(cols[6], "shared") 665 } 666 } 667 } 668 669 // No idea, probably safe to assume so 670 return true 671 } 672 673 func (d *driver) containerDir(containerId string) string { 674 return path.Join(d.libPath, "containers", containerId) 675 } 676 677 func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) { 678 root := path.Join(d.containerDir(c.ID), "config.lxc") 679 680 fo, err := os.Create(root) 681 if err != nil { 682 return "", err 683 } 684 defer fo.Close() 685 686 if err := LxcTemplateCompiled.Execute(fo, struct { 687 *execdriver.Command 688 AppArmor bool 689 }{ 690 Command: c, 691 AppArmor: d.apparmor, 692 }); err != nil { 693 return "", err 694 } 695 696 return root, nil 697 } 698 699 func (d *driver) generateEnvConfig(c *execdriver.Command) error { 700 data, err := json.Marshal(c.ProcessConfig.Env) 701 if err != nil { 702 return err 703 } 704 p := path.Join(d.libPath, "containers", c.ID, "config.env") 705 c.Mounts = append(c.Mounts, execdriver.Mount{ 706 Source: p, 707 Destination: "/.dockerenv", 708 Writable: false, 709 Private: true, 710 }) 711 712 return ioutil.WriteFile(p, data, 0600) 713 } 714 715 // Clean not implemented for lxc 716 func (d *driver) Clean(id string) error { 717 return nil 718 } 719 720 type TtyConsole struct { 721 MasterPty *os.File 722 SlavePty *os.File 723 } 724 725 func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) { 726 // lxc is special in that we cannot create the master outside of the container without 727 // opening the slave because we have nothing to provide to the cmd. We have to open both then do 728 // the crazy setup on command right now instead of passing the console path to lxc and telling it 729 // to open up that console. we save a couple of openfiles in the native driver because we can do 730 // this. 731 ptyMaster, ptySlave, err := pty.Open() 732 if err != nil { 733 return nil, err 734 } 735 736 tty := &TtyConsole{ 737 MasterPty: ptyMaster, 738 SlavePty: ptySlave, 739 } 740 741 if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil { 742 tty.Close() 743 return nil, err 744 } 745 746 processConfig.Console = tty.SlavePty.Name() 747 748 return tty, nil 749 } 750 751 func (t *TtyConsole) Master() *os.File { 752 return t.MasterPty 753 } 754 755 func (t *TtyConsole) Resize(h, w int) error { 756 return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)}) 757 } 758 759 func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error { 760 command.Stdout = t.SlavePty 761 command.Stderr = t.SlavePty 762 763 go func() { 764 if wb, ok := pipes.Stdout.(interface { 765 CloseWriters() error 766 }); ok { 767 defer wb.CloseWriters() 768 } 769 770 io.Copy(pipes.Stdout, t.MasterPty) 771 }() 772 773 if pipes.Stdin != nil { 774 command.Stdin = t.SlavePty 775 command.SysProcAttr.Setctty = true 776 777 go func() { 778 io.Copy(t.MasterPty, pipes.Stdin) 779 780 pipes.Stdin.Close() 781 }() 782 } 783 return nil 784 } 785 786 func (t *TtyConsole) Close() error { 787 t.SlavePty.Close() 788 return t.MasterPty.Close() 789 } 790 791 func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) { 792 return -1, ErrExec 793 } 794 795 func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) { 796 if _, ok := d.activeContainers[id]; !ok { 797 return nil, fmt.Errorf("%s is not a key in active containers", id) 798 } 799 return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory) 800 }