github.com/noxiouz/docker@v0.7.3-0.20160629055221-3d231c78e8c5/libcontainerd/client_windows.go (about)

     1  package libcontainerd
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"path/filepath"
     8  	"strings"
     9  	"syscall"
    10  
    11  	"github.com/Microsoft/hcsshim"
    12  	"github.com/Sirupsen/logrus"
    13  )
    14  
    15  type client struct {
    16  	clientCommon
    17  
    18  	// Platform specific properties below here (none presently on Windows)
    19  }
    20  
    21  // Win32 error codes that are used for various workarounds
    22  // These really should be ALL_CAPS to match golangs syscall library and standard
    23  // Win32 error conventions, but golint insists on CamelCase.
    24  const (
    25  	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
    26  	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
    27  	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
    28  	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
    29  )
    30  
    31  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    32  // container creator management stacks. We hard code "docker" in the case
    33  // of docker.
    34  const defaultOwner = "docker"
    35  
    36  // Create is the entrypoint to create a container from a spec, and if successfully
    37  // created, start it too.
    38  func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error {
    39  	logrus.Debugln("LCD client.Create() with spec", spec)
    40  
    41  	configuration := &hcsshim.ContainerConfig{
    42  		SystemType: "Container",
    43  		Name:       containerID,
    44  		Owner:      defaultOwner,
    45  
    46  		VolumePath:              spec.Root.Path,
    47  		IgnoreFlushesDuringBoot: spec.Windows.FirstStart,
    48  		LayerFolderPath:         spec.Windows.LayerFolder,
    49  		HostName:                spec.Hostname,
    50  	}
    51  
    52  	if spec.Windows.Networking != nil {
    53  		configuration.EndpointList = spec.Windows.Networking.EndpointList
    54  	}
    55  
    56  	if spec.Windows.Resources != nil {
    57  		if spec.Windows.Resources.CPU != nil {
    58  			if spec.Windows.Resources.CPU.Shares != nil {
    59  				configuration.ProcessorWeight = *spec.Windows.Resources.CPU.Shares
    60  			}
    61  			if spec.Windows.Resources.CPU.Percent != nil {
    62  				configuration.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000
    63  			}
    64  		}
    65  		if spec.Windows.Resources.Memory != nil {
    66  			if spec.Windows.Resources.Memory.Limit != nil {
    67  				configuration.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024
    68  			}
    69  		}
    70  		if spec.Windows.Resources.Storage != nil {
    71  			if spec.Windows.Resources.Storage.Bps != nil {
    72  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
    73  			}
    74  			if spec.Windows.Resources.Storage.Iops != nil {
    75  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
    76  			}
    77  			if spec.Windows.Resources.Storage.SandboxSize != nil {
    78  				configuration.StorageSandboxSize = *spec.Windows.Resources.Storage.SandboxSize
    79  			}
    80  		}
    81  	}
    82  
    83  	if spec.Windows.HvRuntime != nil {
    84  		configuration.HvPartition = true
    85  		configuration.HvRuntime = &hcsshim.HvRuntime{
    86  			ImagePath: spec.Windows.HvRuntime.ImagePath,
    87  		}
    88  
    89  		// Images with build verison < 14350 don't support running with clone, but
    90  		// Windows cannot automatically detect this. Explicitly block cloning in this
    91  		// case.
    92  		if build := buildFromVersion(spec.Platform.OSVersion); build > 0 && build < 14350 {
    93  			configuration.HvRuntime.SkipTemplate = true
    94  		}
    95  	}
    96  
    97  	if configuration.HvPartition {
    98  		configuration.SandboxPath = filepath.Dir(spec.Windows.LayerFolder)
    99  	} else {
   100  		configuration.VolumePath = spec.Root.Path
   101  		configuration.LayerFolderPath = spec.Windows.LayerFolder
   102  	}
   103  
   104  	for _, option := range options {
   105  		if s, ok := option.(*ServicingOption); ok {
   106  			configuration.Servicing = s.IsServicing
   107  			break
   108  		}
   109  	}
   110  
   111  	for _, layerPath := range spec.Windows.LayerPaths {
   112  		_, filename := filepath.Split(layerPath)
   113  		g, err := hcsshim.NameToGuid(filename)
   114  		if err != nil {
   115  			return err
   116  		}
   117  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   118  			ID:   g.ToString(),
   119  			Path: layerPath,
   120  		})
   121  	}
   122  
   123  	// Add the mounts (volumes, bind mounts etc) to the structure
   124  	mds := make([]hcsshim.MappedDir, len(spec.Mounts))
   125  	for i, mount := range spec.Mounts {
   126  		mds[i] = hcsshim.MappedDir{
   127  			HostPath:      mount.Source,
   128  			ContainerPath: mount.Destination,
   129  			ReadOnly:      mount.Readonly}
   130  	}
   131  	configuration.MappedDirectories = mds
   132  
   133  	hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
   134  	if err != nil {
   135  		return err
   136  	}
   137  
   138  	// Construct a container object for calling start on it.
   139  	container := &container{
   140  		containerCommon: containerCommon{
   141  			process: process{
   142  				processCommon: processCommon{
   143  					containerID:  containerID,
   144  					client:       clnt,
   145  					friendlyName: InitFriendlyName,
   146  				},
   147  				commandLine: strings.Join(spec.Process.Args, " "),
   148  			},
   149  			processes: make(map[string]*process),
   150  		},
   151  		ociSpec:      spec,
   152  		hcsContainer: hcsContainer,
   153  	}
   154  
   155  	container.options = options
   156  	for _, option := range options {
   157  		if err := option.Apply(container); err != nil {
   158  			logrus.Error(err)
   159  		}
   160  	}
   161  
   162  	// Call start, and if it fails, delete the container from our
   163  	// internal structure, start will keep HCS in sync by deleting the
   164  	// container there.
   165  	logrus.Debugf("Create() id=%s, Calling start()", containerID)
   166  	if err := container.start(); err != nil {
   167  		clnt.deleteContainer(containerID)
   168  		return err
   169  	}
   170  
   171  	logrus.Debugf("Create() id=%s completed successfully", containerID)
   172  	return nil
   173  
   174  }
   175  
   176  // AddProcess is the handler for adding a process to an already running
   177  // container. It's called through docker exec.
   178  func (clnt *client) AddProcess(containerID, processFriendlyName string, procToAdd Process) error {
   179  
   180  	clnt.lock(containerID)
   181  	defer clnt.unlock(containerID)
   182  	container, err := clnt.getContainer(containerID)
   183  	if err != nil {
   184  		return err
   185  	}
   186  	// Note we always tell HCS to
   187  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   188  	// docker can always grab the output through logs. We also tell HCS to always
   189  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   190  	// is only created if it we're not -t.
   191  	createProcessParms := hcsshim.ProcessConfig{
   192  		EmulateConsole:   procToAdd.Terminal,
   193  		ConsoleSize:      procToAdd.InitialConsoleSize,
   194  		CreateStdInPipe:  true,
   195  		CreateStdOutPipe: true,
   196  		CreateStdErrPipe: !procToAdd.Terminal,
   197  	}
   198  
   199  	// Take working directory from the process to add if it is defined,
   200  	// otherwise take from the first process.
   201  	if procToAdd.Cwd != "" {
   202  		createProcessParms.WorkingDirectory = procToAdd.Cwd
   203  	} else {
   204  		createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
   205  	}
   206  
   207  	// Configure the environment for the process
   208  	createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
   209  	createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
   210  
   211  	logrus.Debugf("commandLine: %s", createProcessParms.CommandLine)
   212  
   213  	// Start the command running in the container.
   214  	var stdout, stderr io.ReadCloser
   215  	var stdin io.WriteCloser
   216  	newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
   217  	if err != nil {
   218  		logrus.Errorf("AddProcess %s CreateProcess() failed %s", containerID, err)
   219  		return err
   220  	}
   221  
   222  	stdin, stdout, stderr, err = newProcess.Stdio()
   223  	if err != nil {
   224  		logrus.Errorf("%s getting std pipes failed %s", containerID, err)
   225  		return err
   226  	}
   227  
   228  	iopipe := &IOPipe{Terminal: procToAdd.Terminal}
   229  	iopipe.Stdin = createStdInCloser(stdin, newProcess)
   230  
   231  	// TEMP: Work around Windows BS/DEL behavior.
   232  	iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, container.ociSpec.Platform.OSVersion, procToAdd.Terminal)
   233  
   234  	// Convert io.ReadClosers to io.Readers
   235  	if stdout != nil {
   236  		iopipe.Stdout = openReaderFromPipe(stdout)
   237  	}
   238  	if stderr != nil {
   239  		iopipe.Stderr = openReaderFromPipe(stderr)
   240  	}
   241  
   242  	pid := newProcess.Pid()
   243  
   244  	proc := &process{
   245  		processCommon: processCommon{
   246  			containerID:  containerID,
   247  			friendlyName: processFriendlyName,
   248  			client:       clnt,
   249  			systemPid:    uint32(pid),
   250  		},
   251  		commandLine: createProcessParms.CommandLine,
   252  		hcsProcess:  newProcess,
   253  	}
   254  
   255  	// Add the process to the container's list of processes
   256  	container.processes[processFriendlyName] = proc
   257  
   258  	// Make sure the lock is not held while calling back into the daemon
   259  	clnt.unlock(containerID)
   260  
   261  	// Tell the engine to attach streams back to the client
   262  	if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil {
   263  		return err
   264  	}
   265  
   266  	// Lock again so that the defer unlock doesn't fail. (I really don't like this code)
   267  	clnt.lock(containerID)
   268  
   269  	// Spin up a go routine waiting for exit to handle cleanup
   270  	go container.waitExit(proc, false)
   271  
   272  	return nil
   273  }
   274  
   275  // Signal handles `docker stop` on Windows. While Linux has support for
   276  // the full range of signals, signals aren't really implemented on Windows.
   277  // We fake supporting regular stop and -9 to force kill.
   278  func (clnt *client) Signal(containerID string, sig int) error {
   279  	var (
   280  		cont *container
   281  		err  error
   282  	)
   283  
   284  	// Get the container as we need it to find the pid of the process.
   285  	clnt.lock(containerID)
   286  	defer clnt.unlock(containerID)
   287  	if cont, err = clnt.getContainer(containerID); err != nil {
   288  		return err
   289  	}
   290  
   291  	cont.manualStopRequested = true
   292  
   293  	logrus.Debugf("lcd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
   294  
   295  	if syscall.Signal(sig) == syscall.SIGKILL {
   296  		// Terminate the compute system
   297  		if err := cont.hcsContainer.Terminate(); err != nil {
   298  			if err != hcsshim.ErrVmcomputeOperationPending {
   299  				logrus.Errorf("Failed to terminate %s - %q", containerID, err)
   300  			}
   301  		}
   302  	} else {
   303  		// Terminate Process
   304  		if err := cont.hcsProcess.Kill(); err != nil {
   305  			// ignore errors
   306  			logrus.Warnf("Failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err)
   307  		}
   308  	}
   309  
   310  	return nil
   311  }
   312  
   313  // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
   314  // We try to terminate the specified process whatever signal is requested.
   315  func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
   316  	clnt.lock(containerID)
   317  	defer clnt.unlock(containerID)
   318  	cont, err := clnt.getContainer(containerID)
   319  	if err != nil {
   320  		return err
   321  	}
   322  
   323  	for _, p := range cont.processes {
   324  		if p.friendlyName == processFriendlyName {
   325  			return hcsshim.TerminateProcessInComputeSystem(containerID, p.systemPid)
   326  		}
   327  	}
   328  
   329  	return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
   330  }
   331  
   332  // Resize handles a CLI event to resize an interactive docker run or docker exec
   333  // window.
   334  func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
   335  	// Get the libcontainerd container object
   336  	clnt.lock(containerID)
   337  	defer clnt.unlock(containerID)
   338  	cont, err := clnt.getContainer(containerID)
   339  	if err != nil {
   340  		return err
   341  	}
   342  
   343  	h, w := uint16(height), uint16(width)
   344  
   345  	if processFriendlyName == InitFriendlyName {
   346  		logrus.Debugln("Resizing systemPID in", containerID, cont.process.systemPid)
   347  		return cont.process.hcsProcess.ResizeConsole(w, h)
   348  	}
   349  
   350  	for _, p := range cont.processes {
   351  		if p.friendlyName == processFriendlyName {
   352  			logrus.Debugln("Resizing exec'd process", containerID, p.systemPid)
   353  			return p.hcsProcess.ResizeConsole(w, h)
   354  		}
   355  	}
   356  
   357  	return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
   358  
   359  }
   360  
   361  // Pause handles pause requests for containers
   362  func (clnt *client) Pause(containerID string) error {
   363  	return errors.New("Windows: Containers cannot be paused")
   364  }
   365  
   366  // Resume handles resume requests for containers
   367  func (clnt *client) Resume(containerID string) error {
   368  	return errors.New("Windows: Containers cannot be paused")
   369  }
   370  
   371  // Stats handles stats requests for containers
   372  func (clnt *client) Stats(containerID string) (*Stats, error) {
   373  	return nil, errors.New("Windows: Stats not implemented")
   374  }
   375  
   376  // Restore is the handler for restoring a container
   377  func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error {
   378  	// TODO Windows: Implement this. For now, just tell the backend the container exited.
   379  	logrus.Debugf("lcd Restore %s", containerID)
   380  	return clnt.backend.StateChanged(containerID, StateInfo{
   381  		CommonStateInfo: CommonStateInfo{
   382  			State:    StateExit,
   383  			ExitCode: 1 << 31,
   384  		}})
   385  }
   386  
   387  // GetPidsForContainer returns a list of process IDs running in a container.
   388  // Although implemented, this is not used in Windows.
   389  func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
   390  	var pids []int
   391  	clnt.lock(containerID)
   392  	defer clnt.unlock(containerID)
   393  	cont, err := clnt.getContainer(containerID)
   394  	if err != nil {
   395  		return nil, err
   396  	}
   397  
   398  	// Add the first process
   399  	pids = append(pids, int(cont.containerCommon.systemPid))
   400  	// And add all the exec'd processes
   401  	for _, p := range cont.processes {
   402  		pids = append(pids, int(p.processCommon.systemPid))
   403  	}
   404  	return pids, nil
   405  }
   406  
   407  // Summary returns a summary of the processes running in a container.
   408  // This is present in Windows to support docker top. In linux, the
   409  // engine shells out to ps to get process information. On Windows, as
   410  // the containers could be Hyper-V containers, they would not be
   411  // visible on the container host. However, libcontainerd does have
   412  // that information.
   413  func (clnt *client) Summary(containerID string) ([]Summary, error) {
   414  	var s []Summary
   415  	clnt.lock(containerID)
   416  	defer clnt.unlock(containerID)
   417  	cont, err := clnt.getContainer(containerID)
   418  	if err != nil {
   419  		return nil, err
   420  	}
   421  
   422  	// Add the first process
   423  	s = append(s, Summary{
   424  		Pid:     cont.containerCommon.systemPid,
   425  		Command: cont.ociSpec.Process.Args[0]})
   426  	// And add all the exec'd processes
   427  	for _, p := range cont.processes {
   428  		s = append(s, Summary{
   429  			Pid:     p.processCommon.systemPid,
   430  			Command: p.commandLine})
   431  	}
   432  	return s, nil
   433  
   434  }
   435  
   436  // UpdateResources updates resources for a running container.
   437  func (clnt *client) UpdateResources(containerID string, resources Resources) error {
   438  	// Updating resource isn't supported on Windows
   439  	// but we should return nil for enabling updating container
   440  	return nil
   441  }