github.com/DaoCloud/dao@v0.0.0-20161212064103-c3dbfd13ee36/libcontainerd/client_windows.go (about)

     1  package libcontainerd
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"path/filepath"
     8  	"strings"
     9  	"syscall"
    10  
    11  	"golang.org/x/net/context"
    12  
    13  	"github.com/Microsoft/hcsshim"
    14  	"github.com/Sirupsen/logrus"
    15  )
    16  
    17  type client struct {
    18  	clientCommon
    19  
    20  	// Platform specific properties below here (none presently on Windows)
    21  }
    22  
    23  // Win32 error codes that are used for various workarounds
    24  // These really should be ALL_CAPS to match golangs syscall library and standard
    25  // Win32 error conventions, but golint insists on CamelCase.
    26  const (
    27  	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
    28  	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
    29  	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
    30  	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
    31  )
    32  
    33  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    34  // container creator management stacks. We hard code "docker" in the case
    35  // of docker.
    36  const defaultOwner = "docker"
    37  
    38  // Create is the entrypoint to create a container from a spec, and if successfully
    39  // created, start it too.
    40  func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error {
    41  	logrus.Debugln("libcontainerd: client.Create() with spec", spec)
    42  
    43  	configuration := &hcsshim.ContainerConfig{
    44  		SystemType: "Container",
    45  		Name:       containerID,
    46  		Owner:      defaultOwner,
    47  
    48  		VolumePath:              spec.Root.Path,
    49  		IgnoreFlushesDuringBoot: spec.Windows.FirstStart,
    50  		LayerFolderPath:         spec.Windows.LayerFolder,
    51  		HostName:                spec.Hostname,
    52  	}
    53  
    54  	if spec.Windows.Networking != nil {
    55  		configuration.EndpointList = spec.Windows.Networking.EndpointList
    56  	}
    57  
    58  	if spec.Windows.Resources != nil {
    59  		if spec.Windows.Resources.CPU != nil {
    60  			if spec.Windows.Resources.CPU.Shares != nil {
    61  				configuration.ProcessorWeight = *spec.Windows.Resources.CPU.Shares
    62  			}
    63  			if spec.Windows.Resources.CPU.Percent != nil {
    64  				configuration.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000
    65  			}
    66  		}
    67  		if spec.Windows.Resources.Memory != nil {
    68  			if spec.Windows.Resources.Memory.Limit != nil {
    69  				configuration.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024
    70  			}
    71  		}
    72  		if spec.Windows.Resources.Storage != nil {
    73  			if spec.Windows.Resources.Storage.Bps != nil {
    74  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
    75  			}
    76  			if spec.Windows.Resources.Storage.Iops != nil {
    77  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
    78  			}
    79  			if spec.Windows.Resources.Storage.SandboxSize != nil {
    80  				configuration.StorageSandboxSize = *spec.Windows.Resources.Storage.SandboxSize
    81  			}
    82  		}
    83  	}
    84  
    85  	if spec.Windows.HvRuntime != nil {
    86  		configuration.VolumePath = "" // Always empty for Hyper-V containers
    87  		configuration.HvPartition = true
    88  		configuration.HvRuntime = &hcsshim.HvRuntime{
    89  			ImagePath: spec.Windows.HvRuntime.ImagePath,
    90  		}
    91  	}
    92  
    93  	if configuration.HvPartition {
    94  		configuration.SandboxPath = filepath.Dir(spec.Windows.LayerFolder)
    95  	} else {
    96  		configuration.VolumePath = spec.Root.Path
    97  		configuration.LayerFolderPath = spec.Windows.LayerFolder
    98  	}
    99  
   100  	for _, option := range options {
   101  		if s, ok := option.(*ServicingOption); ok {
   102  			configuration.Servicing = s.IsServicing
   103  			break
   104  		}
   105  	}
   106  
   107  	for _, layerPath := range spec.Windows.LayerPaths {
   108  		_, filename := filepath.Split(layerPath)
   109  		g, err := hcsshim.NameToGuid(filename)
   110  		if err != nil {
   111  			return err
   112  		}
   113  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   114  			ID:   g.ToString(),
   115  			Path: layerPath,
   116  		})
   117  	}
   118  
   119  	// Add the mounts (volumes, bind mounts etc) to the structure
   120  	mds := make([]hcsshim.MappedDir, len(spec.Mounts))
   121  	for i, mount := range spec.Mounts {
   122  		mds[i] = hcsshim.MappedDir{
   123  			HostPath:      mount.Source,
   124  			ContainerPath: mount.Destination,
   125  			ReadOnly:      mount.Readonly}
   126  	}
   127  	configuration.MappedDirectories = mds
   128  
   129  	hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
   130  	if err != nil {
   131  		return err
   132  	}
   133  
   134  	// Construct a container object for calling start on it.
   135  	container := &container{
   136  		containerCommon: containerCommon{
   137  			process: process{
   138  				processCommon: processCommon{
   139  					containerID:  containerID,
   140  					client:       clnt,
   141  					friendlyName: InitFriendlyName,
   142  				},
   143  				commandLine: strings.Join(spec.Process.Args, " "),
   144  			},
   145  			processes: make(map[string]*process),
   146  		},
   147  		ociSpec:      spec,
   148  		hcsContainer: hcsContainer,
   149  	}
   150  
   151  	container.options = options
   152  	for _, option := range options {
   153  		if err := option.Apply(container); err != nil {
   154  			logrus.Errorf("libcontainerd: %v", err)
   155  		}
   156  	}
   157  
   158  	// Call start, and if it fails, delete the container from our
   159  	// internal structure, start will keep HCS in sync by deleting the
   160  	// container there.
   161  	logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID)
   162  	if err := container.start(); err != nil {
   163  		clnt.deleteContainer(containerID)
   164  		return err
   165  	}
   166  
   167  	logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID)
   168  	return nil
   169  
   170  }
   171  
   172  // AddProcess is the handler for adding a process to an already running
   173  // container. It's called through docker exec.
   174  func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process) error {
   175  	clnt.lock(containerID)
   176  	defer clnt.unlock(containerID)
   177  	container, err := clnt.getContainer(containerID)
   178  	if err != nil {
   179  		return err
   180  	}
   181  	// Note we always tell HCS to
   182  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   183  	// docker can always grab the output through logs. We also tell HCS to always
   184  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   185  	// is only created if it we're not -t.
   186  	createProcessParms := hcsshim.ProcessConfig{
   187  		EmulateConsole:   procToAdd.Terminal,
   188  		ConsoleSize:      procToAdd.InitialConsoleSize,
   189  		CreateStdInPipe:  true,
   190  		CreateStdOutPipe: true,
   191  		CreateStdErrPipe: !procToAdd.Terminal,
   192  	}
   193  
   194  	// Take working directory from the process to add if it is defined,
   195  	// otherwise take from the first process.
   196  	if procToAdd.Cwd != "" {
   197  		createProcessParms.WorkingDirectory = procToAdd.Cwd
   198  	} else {
   199  		createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
   200  	}
   201  
   202  	// Configure the environment for the process
   203  	createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
   204  	createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
   205  
   206  	logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
   207  
   208  	// Start the command running in the container.
   209  	var stdout, stderr io.ReadCloser
   210  	var stdin io.WriteCloser
   211  	newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
   212  	if err != nil {
   213  		logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
   214  		return err
   215  	}
   216  
   217  	stdin, stdout, stderr, err = newProcess.Stdio()
   218  	if err != nil {
   219  		logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
   220  		return err
   221  	}
   222  
   223  	iopipe := &IOPipe{Terminal: procToAdd.Terminal}
   224  	iopipe.Stdin = createStdInCloser(stdin, newProcess)
   225  
   226  	// TEMP: Work around Windows BS/DEL behavior.
   227  	iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, container.ociSpec.Platform.OSVersion, procToAdd.Terminal)
   228  
   229  	// Convert io.ReadClosers to io.Readers
   230  	if stdout != nil {
   231  		iopipe.Stdout = openReaderFromPipe(stdout)
   232  	}
   233  	if stderr != nil {
   234  		iopipe.Stderr = openReaderFromPipe(stderr)
   235  	}
   236  
   237  	pid := newProcess.Pid()
   238  
   239  	proc := &process{
   240  		processCommon: processCommon{
   241  			containerID:  containerID,
   242  			friendlyName: processFriendlyName,
   243  			client:       clnt,
   244  			systemPid:    uint32(pid),
   245  		},
   246  		commandLine: createProcessParms.CommandLine,
   247  		hcsProcess:  newProcess,
   248  	}
   249  
   250  	// Add the process to the container's list of processes
   251  	container.processes[processFriendlyName] = proc
   252  
   253  	// Make sure the lock is not held while calling back into the daemon
   254  	clnt.unlock(containerID)
   255  
   256  	// Tell the engine to attach streams back to the client
   257  	if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil {
   258  		clnt.lock(containerID)
   259  		return err
   260  	}
   261  
   262  	// Lock again so that the defer unlock doesn't fail. (I really don't like this code)
   263  	clnt.lock(containerID)
   264  
   265  	// Spin up a go routine waiting for exit to handle cleanup
   266  	go container.waitExit(proc, false)
   267  
   268  	return nil
   269  }
   270  
   271  // Signal handles `docker stop` on Windows. While Linux has support for
   272  // the full range of signals, signals aren't really implemented on Windows.
   273  // We fake supporting regular stop and -9 to force kill.
   274  func (clnt *client) Signal(containerID string, sig int) error {
   275  	var (
   276  		cont *container
   277  		err  error
   278  	)
   279  
   280  	// Get the container as we need it to find the pid of the process.
   281  	clnt.lock(containerID)
   282  	defer clnt.unlock(containerID)
   283  	if cont, err = clnt.getContainer(containerID); err != nil {
   284  		return err
   285  	}
   286  
   287  	cont.manualStopRequested = true
   288  
   289  	logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
   290  
   291  	if syscall.Signal(sig) == syscall.SIGKILL {
   292  		// Terminate the compute system
   293  		if err := cont.hcsContainer.Terminate(); err != nil {
   294  			if err != hcsshim.ErrVmcomputeOperationPending {
   295  				logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
   296  			}
   297  		}
   298  	} else {
   299  		// Terminate Process
   300  		if err := cont.hcsProcess.Kill(); err != nil {
   301  			// ignore errors
   302  			logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err)
   303  		}
   304  	}
   305  
   306  	return nil
   307  }
   308  
   309  // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
   310  // We try to terminate the specified process whatever signal is requested.
   311  func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
   312  	clnt.lock(containerID)
   313  	defer clnt.unlock(containerID)
   314  	cont, err := clnt.getContainer(containerID)
   315  	if err != nil {
   316  		return err
   317  	}
   318  
   319  	for _, p := range cont.processes {
   320  		if p.friendlyName == processFriendlyName {
   321  			return hcsshim.TerminateProcessInComputeSystem(containerID, p.systemPid)
   322  		}
   323  	}
   324  
   325  	return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
   326  }
   327  
   328  // Resize handles a CLI event to resize an interactive docker run or docker exec
   329  // window.
   330  func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
   331  	// Get the libcontainerd container object
   332  	clnt.lock(containerID)
   333  	defer clnt.unlock(containerID)
   334  	cont, err := clnt.getContainer(containerID)
   335  	if err != nil {
   336  		return err
   337  	}
   338  
   339  	h, w := uint16(height), uint16(width)
   340  
   341  	if processFriendlyName == InitFriendlyName {
   342  		logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
   343  		return cont.process.hcsProcess.ResizeConsole(w, h)
   344  	}
   345  
   346  	for _, p := range cont.processes {
   347  		if p.friendlyName == processFriendlyName {
   348  			logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
   349  			return p.hcsProcess.ResizeConsole(w, h)
   350  		}
   351  	}
   352  
   353  	return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
   354  
   355  }
   356  
   357  // Pause handles pause requests for containers
   358  func (clnt *client) Pause(containerID string) error {
   359  	return errors.New("Windows: Containers cannot be paused")
   360  }
   361  
   362  // Resume handles resume requests for containers
   363  func (clnt *client) Resume(containerID string) error {
   364  	return errors.New("Windows: Containers cannot be paused")
   365  }
   366  
   367  // Stats handles stats requests for containers
   368  func (clnt *client) Stats(containerID string) (*Stats, error) {
   369  	return nil, errors.New("Windows: Stats not implemented")
   370  }
   371  
   372  // Restore is the handler for restoring a container
   373  func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error {
   374  	// TODO Windows: Implement this. For now, just tell the backend the container exited.
   375  	logrus.Debugf("libcontainerd: Restore(%s)", containerID)
   376  	return clnt.backend.StateChanged(containerID, StateInfo{
   377  		CommonStateInfo: CommonStateInfo{
   378  			State:    StateExit,
   379  			ExitCode: 1 << 31,
   380  		}})
   381  }
   382  
   383  // GetPidsForContainer returns a list of process IDs running in a container.
   384  // Although implemented, this is not used in Windows.
   385  func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
   386  	var pids []int
   387  	clnt.lock(containerID)
   388  	defer clnt.unlock(containerID)
   389  	cont, err := clnt.getContainer(containerID)
   390  	if err != nil {
   391  		return nil, err
   392  	}
   393  
   394  	// Add the first process
   395  	pids = append(pids, int(cont.containerCommon.systemPid))
   396  	// And add all the exec'd processes
   397  	for _, p := range cont.processes {
   398  		pids = append(pids, int(p.processCommon.systemPid))
   399  	}
   400  	return pids, nil
   401  }
   402  
   403  // Summary returns a summary of the processes running in a container.
   404  // This is present in Windows to support docker top. In linux, the
   405  // engine shells out to ps to get process information. On Windows, as
   406  // the containers could be Hyper-V containers, they would not be
   407  // visible on the container host. However, libcontainerd does have
   408  // that information.
   409  func (clnt *client) Summary(containerID string) ([]Summary, error) {
   410  	var s []Summary
   411  	clnt.lock(containerID)
   412  	defer clnt.unlock(containerID)
   413  	cont, err := clnt.getContainer(containerID)
   414  	if err != nil {
   415  		return nil, err
   416  	}
   417  
   418  	// Add the first process
   419  	s = append(s, Summary{
   420  		Pid:     cont.containerCommon.systemPid,
   421  		Command: cont.ociSpec.Process.Args[0]})
   422  	// And add all the exec'd processes
   423  	for _, p := range cont.processes {
   424  		s = append(s, Summary{
   425  			Pid:     p.processCommon.systemPid,
   426  			Command: p.commandLine})
   427  	}
   428  	return s, nil
   429  
   430  }
   431  
   432  // UpdateResources updates resources for a running container.
   433  func (clnt *client) UpdateResources(containerID string, resources Resources) error {
   434  	// Updating resource isn't supported on Windows
   435  	// but we should return nil for enabling updating container
   436  	return nil
   437  }