github.com/rentongzhang/docker@v1.8.2-rc1/daemon/execdriver/lxc/driver.go (about)

     1  // +build linux
     2  
     3  package lxc
     4  
     5  import (
     6  	"encoding/json"
     7  	"errors"
     8  	"fmt"
     9  	"io"
    10  	"io/ioutil"
    11  	"os"
    12  	"os/exec"
    13  	"path"
    14  	"path/filepath"
    15  	"runtime"
    16  	"strconv"
    17  	"strings"
    18  	"sync"
    19  	"syscall"
    20  	"time"
    21  
    22  	"github.com/Sirupsen/logrus"
    23  	"github.com/docker/docker/daemon/execdriver"
    24  	"github.com/docker/docker/pkg/stringutils"
    25  	sysinfo "github.com/docker/docker/pkg/system"
    26  	"github.com/docker/docker/pkg/term"
    27  	"github.com/docker/docker/pkg/version"
    28  	"github.com/kr/pty"
    29  	"github.com/opencontainers/runc/libcontainer"
    30  	"github.com/opencontainers/runc/libcontainer/cgroups"
    31  	"github.com/opencontainers/runc/libcontainer/configs"
    32  	"github.com/opencontainers/runc/libcontainer/system"
    33  	"github.com/opencontainers/runc/libcontainer/user"
    34  	"github.com/vishvananda/netns"
    35  )
    36  
    37  const DriverName = "lxc"
    38  
    39  var ErrExec = errors.New("Unsupported: Exec is not supported by the lxc driver")
    40  
    41  type driver struct {
    42  	root             string // root path for the driver to use
    43  	libPath          string
    44  	initPath         string
    45  	apparmor         bool
    46  	sharedRoot       bool
    47  	activeContainers map[string]*activeContainer
    48  	machineMemory    int64
    49  	sync.Mutex
    50  }
    51  
    52  type activeContainer struct {
    53  	container *configs.Config
    54  	cmd       *exec.Cmd
    55  }
    56  
    57  func NewDriver(root, libPath, initPath string, apparmor bool) (*driver, error) {
    58  	if err := os.MkdirAll(root, 0700); err != nil {
    59  		return nil, err
    60  	}
    61  	// setup unconfined symlink
    62  	if err := linkLxcStart(root); err != nil {
    63  		return nil, err
    64  	}
    65  	meminfo, err := sysinfo.ReadMemInfo()
    66  	if err != nil {
    67  		return nil, err
    68  	}
    69  	return &driver{
    70  		apparmor:         apparmor,
    71  		root:             root,
    72  		libPath:          libPath,
    73  		initPath:         initPath,
    74  		sharedRoot:       rootIsShared(),
    75  		activeContainers: make(map[string]*activeContainer),
    76  		machineMemory:    meminfo.MemTotal,
    77  	}, nil
    78  }
    79  
    80  func (d *driver) Name() string {
    81  	version := d.version()
    82  	return fmt.Sprintf("%s-%s", DriverName, version)
    83  }
    84  
    85  func setupNetNs(nsPath string) (*os.Process, error) {
    86  	runtime.LockOSThread()
    87  	defer runtime.UnlockOSThread()
    88  
    89  	origns, err := netns.Get()
    90  	if err != nil {
    91  		return nil, err
    92  	}
    93  	defer origns.Close()
    94  
    95  	f, err := os.OpenFile(nsPath, os.O_RDONLY, 0)
    96  	if err != nil {
    97  		return nil, fmt.Errorf("failed to get network namespace %q: %v", nsPath, err)
    98  	}
    99  	defer f.Close()
   100  
   101  	nsFD := f.Fd()
   102  	if err := netns.Set(netns.NsHandle(nsFD)); err != nil {
   103  		return nil, fmt.Errorf("failed to set network namespace %q: %v", nsPath, err)
   104  	}
   105  	defer netns.Set(origns)
   106  
   107  	cmd := exec.Command("/bin/sh", "-c", "while true; do sleep 1; done")
   108  	if err := cmd.Start(); err != nil {
   109  		return nil, fmt.Errorf("failed to start netns process: %v", err)
   110  	}
   111  
   112  	return cmd.Process, nil
   113  }
   114  
   115  func killNetNsProc(proc *os.Process) {
   116  	proc.Kill()
   117  	proc.Wait()
   118  }
   119  
   120  func (d *driver) Run(c *execdriver.Command, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (execdriver.ExitStatus, error) {
   121  	var (
   122  		term     execdriver.Terminal
   123  		err      error
   124  		dataPath = d.containerDir(c.ID)
   125  	)
   126  
   127  	if c.Network == nil || (c.Network.NamespacePath == "" && c.Network.ContainerID == "") {
   128  		return execdriver.ExitStatus{ExitCode: -1}, fmt.Errorf("empty namespace path for non-container network")
   129  	}
   130  
   131  	container, err := d.createContainer(c)
   132  	if err != nil {
   133  		return execdriver.ExitStatus{ExitCode: -1}, err
   134  	}
   135  
   136  	if c.ProcessConfig.Tty {
   137  		term, err = NewTtyConsole(&c.ProcessConfig, pipes)
   138  	} else {
   139  		term, err = execdriver.NewStdConsole(&c.ProcessConfig, pipes)
   140  	}
   141  	if err != nil {
   142  		return execdriver.ExitStatus{ExitCode: -1}, err
   143  	}
   144  	c.ProcessConfig.Terminal = term
   145  
   146  	d.Lock()
   147  	d.activeContainers[c.ID] = &activeContainer{
   148  		container: container,
   149  		cmd:       &c.ProcessConfig.Cmd,
   150  	}
   151  	d.Unlock()
   152  
   153  	c.Mounts = append(c.Mounts, execdriver.Mount{
   154  		Source:      d.initPath,
   155  		Destination: c.InitPath,
   156  		Writable:    false,
   157  		Private:     true,
   158  	})
   159  
   160  	if err := d.generateEnvConfig(c); err != nil {
   161  		return execdriver.ExitStatus{ExitCode: -1}, err
   162  	}
   163  	configPath, err := d.generateLXCConfig(c)
   164  	if err != nil {
   165  		return execdriver.ExitStatus{ExitCode: -1}, err
   166  	}
   167  	params := []string{
   168  		"lxc-start",
   169  		"-n", c.ID,
   170  		"-f", configPath,
   171  		"-q",
   172  	}
   173  
   174  	// From lxc>=1.1 the default behavior is to daemonize containers after start
   175  	lxcVersion := version.Version(d.version())
   176  	if lxcVersion.GreaterThanOrEqualTo(version.Version("1.1")) {
   177  		params = append(params, "-F")
   178  	}
   179  
   180  	proc := &os.Process{}
   181  	if c.Network.ContainerID != "" {
   182  		params = append(params,
   183  			"--share-net", c.Network.ContainerID,
   184  		)
   185  	} else {
   186  		proc, err = setupNetNs(c.Network.NamespacePath)
   187  		if err != nil {
   188  			return execdriver.ExitStatus{ExitCode: -1}, err
   189  		}
   190  
   191  		pidStr := fmt.Sprintf("%d", proc.Pid)
   192  		params = append(params,
   193  			"--share-net", pidStr)
   194  	}
   195  	if c.Ipc != nil {
   196  		if c.Ipc.ContainerID != "" {
   197  			params = append(params,
   198  				"--share-ipc", c.Ipc.ContainerID,
   199  			)
   200  		} else if c.Ipc.HostIpc {
   201  			params = append(params,
   202  				"--share-ipc", "1",
   203  			)
   204  		}
   205  	}
   206  
   207  	params = append(params,
   208  		"--",
   209  		c.InitPath,
   210  	)
   211  
   212  	if c.ProcessConfig.User != "" {
   213  		params = append(params, "-u", c.ProcessConfig.User)
   214  	}
   215  
   216  	if c.ProcessConfig.Privileged {
   217  		if d.apparmor {
   218  			params[0] = path.Join(d.root, "lxc-start-unconfined")
   219  
   220  		}
   221  		params = append(params, "-privileged")
   222  	}
   223  
   224  	if c.WorkingDir != "" {
   225  		params = append(params, "-w", c.WorkingDir)
   226  	}
   227  
   228  	params = append(params, "--", c.ProcessConfig.Entrypoint)
   229  	params = append(params, c.ProcessConfig.Arguments...)
   230  
   231  	if d.sharedRoot {
   232  		// lxc-start really needs / to be non-shared, or all kinds of stuff break
   233  		// when lxc-start unmount things and those unmounts propagate to the main
   234  		// mount namespace.
   235  		// What we really want is to clone into a new namespace and then
   236  		// mount / MS_REC|MS_SLAVE, but since we can't really clone or fork
   237  		// without exec in go we have to do this horrible shell hack...
   238  		shellString :=
   239  			"mount --make-rslave /; exec " +
   240  				stringutils.ShellQuoteArguments(params)
   241  
   242  		params = []string{
   243  			"unshare", "-m", "--", "/bin/sh", "-c", shellString,
   244  		}
   245  	}
   246  	logrus.Debugf("lxc params %s", params)
   247  	var (
   248  		name = params[0]
   249  		arg  = params[1:]
   250  	)
   251  	aname, err := exec.LookPath(name)
   252  	if err != nil {
   253  		aname = name
   254  	}
   255  	c.ProcessConfig.Path = aname
   256  	c.ProcessConfig.Args = append([]string{name}, arg...)
   257  
   258  	if err := createDeviceNodes(c.Rootfs, c.AutoCreatedDevices); err != nil {
   259  		killNetNsProc(proc)
   260  		return execdriver.ExitStatus{ExitCode: -1}, err
   261  	}
   262  
   263  	if err := c.ProcessConfig.Start(); err != nil {
   264  		killNetNsProc(proc)
   265  		return execdriver.ExitStatus{ExitCode: -1}, err
   266  	}
   267  
   268  	var (
   269  		waitErr  error
   270  		waitLock = make(chan struct{})
   271  	)
   272  
   273  	go func() {
   274  		if err := c.ProcessConfig.Wait(); err != nil {
   275  			if _, ok := err.(*exec.ExitError); !ok { // Do not propagate the error if it's simply a status code != 0
   276  				waitErr = err
   277  			}
   278  		}
   279  		close(waitLock)
   280  	}()
   281  
   282  	terminate := func(terr error) (execdriver.ExitStatus, error) {
   283  		if c.ProcessConfig.Process != nil {
   284  			c.ProcessConfig.Process.Kill()
   285  			c.ProcessConfig.Wait()
   286  		}
   287  		return execdriver.ExitStatus{ExitCode: -1}, terr
   288  	}
   289  	// Poll lxc for RUNNING status
   290  	pid, err := d.waitForStart(c, waitLock)
   291  	if err != nil {
   292  		killNetNsProc(proc)
   293  		return terminate(err)
   294  	}
   295  	killNetNsProc(proc)
   296  
   297  	cgroupPaths, err := cgroupPaths(c.ID)
   298  	if err != nil {
   299  		return terminate(err)
   300  	}
   301  
   302  	state := &libcontainer.State{
   303  		InitProcessPid: pid,
   304  		CgroupPaths:    cgroupPaths,
   305  	}
   306  
   307  	f, err := os.Create(filepath.Join(dataPath, "state.json"))
   308  	if err != nil {
   309  		return terminate(err)
   310  	}
   311  	defer f.Close()
   312  
   313  	if err := json.NewEncoder(f).Encode(state); err != nil {
   314  		return terminate(err)
   315  	}
   316  
   317  	c.ContainerPid = pid
   318  
   319  	if startCallback != nil {
   320  		logrus.Debugf("Invoking startCallback")
   321  		startCallback(&c.ProcessConfig, pid)
   322  	}
   323  
   324  	oomKill := false
   325  	oomKillNotification, err := notifyOnOOM(cgroupPaths)
   326  
   327  	<-waitLock
   328  	exitCode := getExitCode(c)
   329  
   330  	if err == nil {
   331  		_, oomKill = <-oomKillNotification
   332  		logrus.Debugf("oomKill error: %v, waitErr: %v", oomKill, waitErr)
   333  	} else {
   334  		logrus.Warnf("Your kernel does not support OOM notifications: %s", err)
   335  	}
   336  
   337  	// check oom error
   338  	if oomKill {
   339  		exitCode = 137
   340  	}
   341  
   342  	return execdriver.ExitStatus{ExitCode: exitCode, OOMKilled: oomKill}, waitErr
   343  }
   344  
   345  // copy from libcontainer
   346  func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
   347  	dir := paths["memory"]
   348  	if dir == "" {
   349  		return nil, fmt.Errorf("There is no path for %q in state", "memory")
   350  	}
   351  	oomControl, err := os.Open(filepath.Join(dir, "memory.oom_control"))
   352  	if err != nil {
   353  		return nil, err
   354  	}
   355  	fd, _, syserr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
   356  	if syserr != 0 {
   357  		oomControl.Close()
   358  		return nil, syserr
   359  	}
   360  
   361  	eventfd := os.NewFile(fd, "eventfd")
   362  
   363  	eventControlPath := filepath.Join(dir, "cgroup.event_control")
   364  	data := fmt.Sprintf("%d %d", eventfd.Fd(), oomControl.Fd())
   365  	if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil {
   366  		eventfd.Close()
   367  		oomControl.Close()
   368  		return nil, err
   369  	}
   370  	ch := make(chan struct{})
   371  	go func() {
   372  		defer func() {
   373  			close(ch)
   374  			eventfd.Close()
   375  			oomControl.Close()
   376  		}()
   377  		buf := make([]byte, 8)
   378  		for {
   379  			if _, err := eventfd.Read(buf); err != nil {
   380  				return
   381  			}
   382  			// When a cgroup is destroyed, an event is sent to eventfd.
   383  			// So if the control path is gone, return instead of notifying.
   384  			if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
   385  				return
   386  			}
   387  			ch <- struct{}{}
   388  		}
   389  	}()
   390  	return ch, nil
   391  }
   392  
   393  // createContainer populates and configures the container type with the
   394  // data provided by the execdriver.Command
   395  func (d *driver) createContainer(c *execdriver.Command) (*configs.Config, error) {
   396  	container := execdriver.InitContainer(c)
   397  	if err := execdriver.SetupCgroups(container, c); err != nil {
   398  		return nil, err
   399  	}
   400  	return container, nil
   401  }
   402  
   403  // Return an map of susbystem -> container cgroup
   404  func cgroupPaths(containerId string) (map[string]string, error) {
   405  	subsystems, err := cgroups.GetAllSubsystems()
   406  	if err != nil {
   407  		return nil, err
   408  	}
   409  	logrus.Debugf("subsystems: %s", subsystems)
   410  	paths := make(map[string]string)
   411  	for _, subsystem := range subsystems {
   412  		cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
   413  		logrus.Debugf("cgroup path %s %s", cgroupRoot, cgroupDir)
   414  		if err != nil {
   415  			//unsupported subystem
   416  			continue
   417  		}
   418  		path := filepath.Join(cgroupRoot, cgroupDir, "lxc", containerId)
   419  		paths[subsystem] = path
   420  	}
   421  
   422  	return paths, nil
   423  }
   424  
   425  // this is copy from old libcontainer nodes.go
   426  func createDeviceNodes(rootfs string, nodesToCreate []*configs.Device) error {
   427  	oldMask := syscall.Umask(0000)
   428  	defer syscall.Umask(oldMask)
   429  
   430  	for _, node := range nodesToCreate {
   431  		if err := createDeviceNode(rootfs, node); err != nil {
   432  			return err
   433  		}
   434  	}
   435  	return nil
   436  }
   437  
   438  // Creates the device node in the rootfs of the container.
   439  func createDeviceNode(rootfs string, node *configs.Device) error {
   440  	var (
   441  		dest   = filepath.Join(rootfs, node.Path)
   442  		parent = filepath.Dir(dest)
   443  	)
   444  
   445  	if err := os.MkdirAll(parent, 0755); err != nil {
   446  		return err
   447  	}
   448  
   449  	fileMode := node.FileMode
   450  	switch node.Type {
   451  	case 'c':
   452  		fileMode |= syscall.S_IFCHR
   453  	case 'b':
   454  		fileMode |= syscall.S_IFBLK
   455  	default:
   456  		return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
   457  	}
   458  
   459  	if err := syscall.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil && !os.IsExist(err) {
   460  		return fmt.Errorf("mknod %s %s", node.Path, err)
   461  	}
   462  
   463  	if err := syscall.Chown(dest, int(node.Uid), int(node.Gid)); err != nil {
   464  		return fmt.Errorf("chown %s to %d:%d", node.Path, node.Uid, node.Gid)
   465  	}
   466  
   467  	return nil
   468  }
   469  
   470  // setupUser changes the groups, gid, and uid for the user inside the container
   471  // copy from libcontainer, cause not it's private
   472  func setupUser(userSpec string) error {
   473  	// Set up defaults.
   474  	defaultExecUser := user.ExecUser{
   475  		Uid:  syscall.Getuid(),
   476  		Gid:  syscall.Getgid(),
   477  		Home: "/",
   478  	}
   479  	passwdPath, err := user.GetPasswdPath()
   480  	if err != nil {
   481  		return err
   482  	}
   483  	groupPath, err := user.GetGroupPath()
   484  	if err != nil {
   485  		return err
   486  	}
   487  	execUser, err := user.GetExecUserPath(userSpec, &defaultExecUser, passwdPath, groupPath)
   488  	if err != nil {
   489  		return err
   490  	}
   491  	if err := syscall.Setgroups(execUser.Sgids); err != nil {
   492  		return err
   493  	}
   494  	if err := system.Setgid(execUser.Gid); err != nil {
   495  		return err
   496  	}
   497  	if err := system.Setuid(execUser.Uid); err != nil {
   498  		return err
   499  	}
   500  	// if we didn't get HOME already, set it based on the user's HOME
   501  	if envHome := os.Getenv("HOME"); envHome == "" {
   502  		if err := os.Setenv("HOME", execUser.Home); err != nil {
   503  			return err
   504  		}
   505  	}
   506  	return nil
   507  }
   508  
   509  /// Return the exit code of the process
   510  // if the process has not exited -1 will be returned
   511  func getExitCode(c *execdriver.Command) int {
   512  	if c.ProcessConfig.ProcessState == nil {
   513  		return -1
   514  	}
   515  	return c.ProcessConfig.ProcessState.Sys().(syscall.WaitStatus).ExitStatus()
   516  }
   517  
   518  func (d *driver) Kill(c *execdriver.Command, sig int) error {
   519  	if sig == 9 || c.ProcessConfig.Process == nil {
   520  		return KillLxc(c.ID, sig)
   521  	}
   522  
   523  	return c.ProcessConfig.Process.Signal(syscall.Signal(sig))
   524  }
   525  
   526  func (d *driver) Pause(c *execdriver.Command) error {
   527  	_, err := exec.LookPath("lxc-freeze")
   528  	if err == nil {
   529  		output, errExec := exec.Command("lxc-freeze", "-n", c.ID).CombinedOutput()
   530  		if errExec != nil {
   531  			return fmt.Errorf("Err: %s Output: %s", errExec, output)
   532  		}
   533  	}
   534  
   535  	return err
   536  }
   537  
   538  func (d *driver) Unpause(c *execdriver.Command) error {
   539  	_, err := exec.LookPath("lxc-unfreeze")
   540  	if err == nil {
   541  		output, errExec := exec.Command("lxc-unfreeze", "-n", c.ID).CombinedOutput()
   542  		if errExec != nil {
   543  			return fmt.Errorf("Err: %s Output: %s", errExec, output)
   544  		}
   545  	}
   546  
   547  	return err
   548  }
   549  
   550  func (d *driver) Terminate(c *execdriver.Command) error {
   551  	return KillLxc(c.ID, 9)
   552  }
   553  
   554  func (d *driver) version() string {
   555  	var (
   556  		version string
   557  		output  []byte
   558  		err     error
   559  	)
   560  	if _, errPath := exec.LookPath("lxc-version"); errPath == nil {
   561  		output, err = exec.Command("lxc-version").CombinedOutput()
   562  	} else {
   563  		output, err = exec.Command("lxc-start", "--version").CombinedOutput()
   564  	}
   565  	if err == nil {
   566  		version = strings.TrimSpace(string(output))
   567  		if parts := strings.SplitN(version, ":", 2); len(parts) == 2 {
   568  			version = strings.TrimSpace(parts[1])
   569  		}
   570  	}
   571  	return version
   572  }
   573  
   574  func KillLxc(id string, sig int) error {
   575  	var (
   576  		err    error
   577  		output []byte
   578  	)
   579  	_, err = exec.LookPath("lxc-kill")
   580  	if err == nil {
   581  		output, err = exec.Command("lxc-kill", "-n", id, strconv.Itoa(sig)).CombinedOutput()
   582  	} else {
   583  		// lxc-stop does not take arbitrary signals like lxc-kill does
   584  		output, err = exec.Command("lxc-stop", "-k", "-n", id).CombinedOutput()
   585  	}
   586  	if err != nil {
   587  		return fmt.Errorf("Err: %s Output: %s", err, output)
   588  	}
   589  	return nil
   590  }
   591  
   592  // wait for the process to start and return the pid for the process
   593  func (d *driver) waitForStart(c *execdriver.Command, waitLock chan struct{}) (int, error) {
   594  	var (
   595  		err    error
   596  		output []byte
   597  	)
   598  	// We wait for the container to be fully running.
   599  	// Timeout after 5 seconds. In case of broken pipe, just retry.
   600  	// Note: The container can run and finish correctly before
   601  	// the end of this loop
   602  	for now := time.Now(); time.Since(now) < 5*time.Second; {
   603  		select {
   604  		case <-waitLock:
   605  			// If the process dies while waiting for it, just return
   606  			return -1, nil
   607  		default:
   608  		}
   609  
   610  		output, err = d.getInfo(c.ID)
   611  		if err == nil {
   612  			info, err := parseLxcInfo(string(output))
   613  			if err != nil {
   614  				return -1, err
   615  			}
   616  			if info.Running {
   617  				return info.Pid, nil
   618  			}
   619  		}
   620  		time.Sleep(50 * time.Millisecond)
   621  	}
   622  	return -1, execdriver.ErrNotRunning
   623  }
   624  
   625  func (d *driver) getInfo(id string) ([]byte, error) {
   626  	return exec.Command("lxc-info", "-n", id).CombinedOutput()
   627  }
   628  
   629  type info struct {
   630  	ID     string
   631  	driver *driver
   632  }
   633  
   634  func (i *info) IsRunning() bool {
   635  	var running bool
   636  
   637  	output, err := i.driver.getInfo(i.ID)
   638  	if err != nil {
   639  		logrus.Errorf("Error getting info for lxc container %s: %s (%s)", i.ID, err, output)
   640  		return false
   641  	}
   642  	if strings.Contains(string(output), "RUNNING") {
   643  		running = true
   644  	}
   645  	return running
   646  }
   647  
   648  func (d *driver) Info(id string) execdriver.Info {
   649  	return &info{
   650  		ID:     id,
   651  		driver: d,
   652  	}
   653  }
   654  
   655  func findCgroupRootAndDir(subsystem string) (string, string, error) {
   656  	cgroupRoot, err := cgroups.FindCgroupMountpoint(subsystem)
   657  	if err != nil {
   658  		return "", "", err
   659  	}
   660  
   661  	cgroupDir, err := cgroups.GetThisCgroupDir(subsystem)
   662  	if err != nil {
   663  		return "", "", err
   664  	}
   665  	return cgroupRoot, cgroupDir, nil
   666  }
   667  
   668  func (d *driver) GetPidsForContainer(id string) ([]int, error) {
   669  	pids := []int{}
   670  
   671  	// cpu is chosen because it is the only non optional subsystem in cgroups
   672  	subsystem := "cpu"
   673  	cgroupRoot, cgroupDir, err := findCgroupRootAndDir(subsystem)
   674  	if err != nil {
   675  		return pids, err
   676  	}
   677  
   678  	filename := filepath.Join(cgroupRoot, cgroupDir, id, "tasks")
   679  	if _, err := os.Stat(filename); os.IsNotExist(err) {
   680  		// With more recent lxc versions use, cgroup will be in lxc/
   681  		filename = filepath.Join(cgroupRoot, cgroupDir, "lxc", id, "tasks")
   682  	}
   683  
   684  	output, err := ioutil.ReadFile(filename)
   685  	if err != nil {
   686  		return pids, err
   687  	}
   688  	for _, p := range strings.Split(string(output), "\n") {
   689  		if len(p) == 0 {
   690  			continue
   691  		}
   692  		pid, err := strconv.Atoi(p)
   693  		if err != nil {
   694  			return pids, fmt.Errorf("Invalid pid '%s': %s", p, err)
   695  		}
   696  		pids = append(pids, pid)
   697  	}
   698  	return pids, nil
   699  }
   700  
   701  func linkLxcStart(root string) error {
   702  	sourcePath, err := exec.LookPath("lxc-start")
   703  	if err != nil {
   704  		return err
   705  	}
   706  	targetPath := path.Join(root, "lxc-start-unconfined")
   707  
   708  	if _, err := os.Lstat(targetPath); err != nil && !os.IsNotExist(err) {
   709  		return err
   710  	} else if err == nil {
   711  		if err := os.Remove(targetPath); err != nil {
   712  			return err
   713  		}
   714  	}
   715  	return os.Symlink(sourcePath, targetPath)
   716  }
   717  
   718  // TODO: This can be moved to the mountinfo reader in the mount pkg
   719  func rootIsShared() bool {
   720  	if data, err := ioutil.ReadFile("/proc/self/mountinfo"); err == nil {
   721  		for _, line := range strings.Split(string(data), "\n") {
   722  			cols := strings.Split(line, " ")
   723  			if len(cols) >= 6 && cols[4] == "/" {
   724  				return strings.HasPrefix(cols[6], "shared")
   725  			}
   726  		}
   727  	}
   728  
   729  	// No idea, probably safe to assume so
   730  	return true
   731  }
   732  
   733  func (d *driver) containerDir(containerId string) string {
   734  	return path.Join(d.libPath, "containers", containerId)
   735  }
   736  
   737  func (d *driver) generateLXCConfig(c *execdriver.Command) (string, error) {
   738  	root := path.Join(d.containerDir(c.ID), "config.lxc")
   739  
   740  	fo, err := os.Create(root)
   741  	if err != nil {
   742  		return "", err
   743  	}
   744  	defer fo.Close()
   745  
   746  	if err := LxcTemplateCompiled.Execute(fo, struct {
   747  		*execdriver.Command
   748  		AppArmor bool
   749  	}{
   750  		Command:  c,
   751  		AppArmor: d.apparmor,
   752  	}); err != nil {
   753  		return "", err
   754  	}
   755  
   756  	return root, nil
   757  }
   758  
   759  func (d *driver) generateEnvConfig(c *execdriver.Command) error {
   760  	data, err := json.Marshal(c.ProcessConfig.Env)
   761  	if err != nil {
   762  		return err
   763  	}
   764  	p := path.Join(d.libPath, "containers", c.ID, "config.env")
   765  	c.Mounts = append(c.Mounts, execdriver.Mount{
   766  		Source:      p,
   767  		Destination: "/.dockerenv",
   768  		Writable:    false,
   769  		Private:     true,
   770  	})
   771  
   772  	return ioutil.WriteFile(p, data, 0600)
   773  }
   774  
   775  // Clean not implemented for lxc
   776  func (d *driver) Clean(id string) error {
   777  	return nil
   778  }
   779  
   780  type TtyConsole struct {
   781  	MasterPty *os.File
   782  	SlavePty  *os.File
   783  }
   784  
   785  func NewTtyConsole(processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes) (*TtyConsole, error) {
   786  	// lxc is special in that we cannot create the master outside of the container without
   787  	// opening the slave because we have nothing to provide to the cmd.  We have to open both then do
   788  	// the crazy setup on command right now instead of passing the console path to lxc and telling it
   789  	// to open up that console.  we save a couple of openfiles in the native driver because we can do
   790  	// this.
   791  	ptyMaster, ptySlave, err := pty.Open()
   792  	if err != nil {
   793  		return nil, err
   794  	}
   795  
   796  	tty := &TtyConsole{
   797  		MasterPty: ptyMaster,
   798  		SlavePty:  ptySlave,
   799  	}
   800  
   801  	if err := tty.AttachPipes(&processConfig.Cmd, pipes); err != nil {
   802  		tty.Close()
   803  		return nil, err
   804  	}
   805  
   806  	processConfig.Console = tty.SlavePty.Name()
   807  
   808  	return tty, nil
   809  }
   810  
   811  func (t *TtyConsole) Resize(h, w int) error {
   812  	return term.SetWinsize(t.MasterPty.Fd(), &term.Winsize{Height: uint16(h), Width: uint16(w)})
   813  }
   814  
   815  func (t *TtyConsole) AttachPipes(command *exec.Cmd, pipes *execdriver.Pipes) error {
   816  	command.Stdout = t.SlavePty
   817  	command.Stderr = t.SlavePty
   818  
   819  	go func() {
   820  		if wb, ok := pipes.Stdout.(interface {
   821  			CloseWriters() error
   822  		}); ok {
   823  			defer wb.CloseWriters()
   824  		}
   825  
   826  		io.Copy(pipes.Stdout, t.MasterPty)
   827  	}()
   828  
   829  	if pipes.Stdin != nil {
   830  		command.Stdin = t.SlavePty
   831  		command.SysProcAttr.Setctty = true
   832  
   833  		go func() {
   834  			io.Copy(t.MasterPty, pipes.Stdin)
   835  
   836  			pipes.Stdin.Close()
   837  		}()
   838  	}
   839  	return nil
   840  }
   841  
   842  func (t *TtyConsole) Close() error {
   843  	t.SlavePty.Close()
   844  	return t.MasterPty.Close()
   845  }
   846  
   847  func (d *driver) Exec(c *execdriver.Command, processConfig *execdriver.ProcessConfig, pipes *execdriver.Pipes, startCallback execdriver.StartCallback) (int, error) {
   848  	return -1, ErrExec
   849  }
   850  
   851  func (d *driver) Stats(id string) (*execdriver.ResourceStats, error) {
   852  	if _, ok := d.activeContainers[id]; !ok {
   853  		return nil, fmt.Errorf("%s is not a key in active containers", id)
   854  	}
   855  	return execdriver.Stats(d.containerDir(id), d.activeContainers[id].container.Cgroups.Memory, d.machineMemory)
   856  }