github.com/ph/moby@v1.13.1/libcontainerd/client_windows.go (about)

     1  package libcontainerd
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"io"
     7  	"io/ioutil"
     8  	"os"
     9  	"path/filepath"
    10  	"strings"
    11  	"syscall"
    12  
    13  	"golang.org/x/net/context"
    14  
    15  	"github.com/Microsoft/hcsshim"
    16  	"github.com/Sirupsen/logrus"
    17  	"github.com/docker/docker/pkg/sysinfo"
    18  	"github.com/opencontainers/runtime-spec/specs-go"
    19  )
    20  
    21  type client struct {
    22  	clientCommon
    23  
    24  	// Platform specific properties below here (none presently on Windows)
    25  }
    26  
    27  // Win32 error codes that are used for various workarounds
    28  // These really should be ALL_CAPS to match golangs syscall library and standard
    29  // Win32 error conventions, but golint insists on CamelCase.
    30  const (
    31  	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
    32  	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
    33  	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
    34  	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
    35  )
    36  
    37  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    38  // container creator management stacks. We hard code "docker" in the case
    39  // of docker.
    40  const defaultOwner = "docker"
    41  
    42  // Create is the entrypoint to create a container from a spec, and if successfully
    43  // created, start it too. Table below shows the fields required for HCS JSON calling parameters,
    44  // where if not populated, is omitted.
    45  // +-----------------+--------------------------------------------+---------------------------------------------------+
    46  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
    47  // +-----------------+--------------------------------------------+---------------------------------------------------+
    48  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
    49  // | LayerFolderPath | %root%\windowsfilter\containerID           | %root%\windowsfilter\containerID (servicing only) |
    50  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
    51  // | SandboxPath     |                                            | %root%\windowsfilter                              |
    52  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
    53  // +-----------------+--------------------------------------------+---------------------------------------------------+
    54  //
    55  // Isolation=Process example:
    56  //
    57  // {
    58  //	"SystemType": "Container",
    59  //	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
    60  //	"Owner": "docker",
    61  //	"IsDummy": false,
    62  //	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
    63  //	"IgnoreFlushesDuringBoot": true,
    64  //	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
    65  //	"Layers": [{
    66  //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
    67  //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
    68  //	}],
    69  //	"HostName": "5e0055c814a6",
    70  //	"MappedDirectories": [],
    71  //	"HvPartition": false,
    72  //	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
    73  //	"Servicing": false
    74  //}
    75  //
    76  // Isolation=Hyper-V example:
    77  //
    78  //{
    79  //	"SystemType": "Container",
    80  //	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
    81  //	"Owner": "docker",
    82  //	"IsDummy": false,
    83  //	"IgnoreFlushesDuringBoot": true,
    84  //	"Layers": [{
    85  //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
    86  //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
    87  //	}],
    88  //	"HostName": "475c2c58933b",
    89  //	"MappedDirectories": [],
    90  //	"SandboxPath": "C:\\\\control\\\\windowsfilter",
    91  //	"HvPartition": true,
    92  //	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
    93  //	"HvRuntime": {
    94  //		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
    95  //	},
    96  //	"Servicing": false
    97  //}
    98  func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
    99  	clnt.lock(containerID)
   100  	defer clnt.unlock(containerID)
   101  	logrus.Debugln("libcontainerd: client.Create() with spec", spec)
   102  
   103  	configuration := &hcsshim.ContainerConfig{
   104  		SystemType: "Container",
   105  		Name:       containerID,
   106  		Owner:      defaultOwner,
   107  		IgnoreFlushesDuringBoot: false,
   108  		HostName:                spec.Hostname,
   109  		HvPartition:             false,
   110  	}
   111  
   112  	if spec.Windows.Resources != nil {
   113  		if spec.Windows.Resources.CPU != nil {
   114  			if spec.Windows.Resources.CPU.Count != nil {
   115  				// This check is being done here rather than in adaptContainerSettings
   116  				// because we don't want to update the HostConfig in case this container
   117  				// is moved to a host with more CPUs than this one.
   118  				cpuCount := *spec.Windows.Resources.CPU.Count
   119  				hostCPUCount := uint64(sysinfo.NumCPU())
   120  				if cpuCount > hostCPUCount {
   121  					logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   122  					cpuCount = hostCPUCount
   123  				}
   124  				configuration.ProcessorCount = uint32(cpuCount)
   125  			}
   126  			if spec.Windows.Resources.CPU.Shares != nil {
   127  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   128  			}
   129  			if spec.Windows.Resources.CPU.Percent != nil {
   130  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Percent) * 100 // ProcessorMaximum is a value between 1 and 10000
   131  			}
   132  		}
   133  		if spec.Windows.Resources.Memory != nil {
   134  			if spec.Windows.Resources.Memory.Limit != nil {
   135  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   136  			}
   137  		}
   138  		if spec.Windows.Resources.Storage != nil {
   139  			if spec.Windows.Resources.Storage.Bps != nil {
   140  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   141  			}
   142  			if spec.Windows.Resources.Storage.Iops != nil {
   143  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   144  			}
   145  		}
   146  	}
   147  
   148  	var layerOpt *LayerOption
   149  	for _, option := range options {
   150  		if s, ok := option.(*ServicingOption); ok {
   151  			configuration.Servicing = s.IsServicing
   152  			continue
   153  		}
   154  		if f, ok := option.(*FlushOption); ok {
   155  			configuration.IgnoreFlushesDuringBoot = f.IgnoreFlushesDuringBoot
   156  			continue
   157  		}
   158  		if h, ok := option.(*HyperVIsolationOption); ok {
   159  			configuration.HvPartition = h.IsHyperV
   160  			configuration.SandboxPath = h.SandboxPath
   161  			continue
   162  		}
   163  		if l, ok := option.(*LayerOption); ok {
   164  			layerOpt = l
   165  		}
   166  		if n, ok := option.(*NetworkEndpointsOption); ok {
   167  			configuration.EndpointList = n.Endpoints
   168  			configuration.AllowUnqualifiedDNSQuery = n.AllowUnqualifiedDNSQuery
   169  			continue
   170  		}
   171  		if c, ok := option.(*CredentialsOption); ok {
   172  			configuration.Credentials = c.Credentials
   173  			continue
   174  		}
   175  	}
   176  
   177  	// We must have a layer option with at least one path
   178  	if layerOpt == nil || layerOpt.LayerPaths == nil {
   179  		return fmt.Errorf("no layer option or paths were supplied to the runtime")
   180  	}
   181  
   182  	if configuration.HvPartition {
   183  		// Find the upper-most utility VM image, since the utility VM does not
   184  		// use layering in RS1.
   185  		// TODO @swernli/jhowardmsft at some point post RS1 this may be re-locatable.
   186  		var uvmImagePath string
   187  		for _, path := range layerOpt.LayerPaths {
   188  			fullPath := filepath.Join(path, "UtilityVM")
   189  			_, err := os.Stat(fullPath)
   190  			if err == nil {
   191  				uvmImagePath = fullPath
   192  				break
   193  			}
   194  			if !os.IsNotExist(err) {
   195  				return err
   196  			}
   197  		}
   198  		if uvmImagePath == "" {
   199  			return errors.New("utility VM image could not be found")
   200  		}
   201  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   202  	} else {
   203  		configuration.VolumePath = spec.Root.Path
   204  	}
   205  
   206  	configuration.LayerFolderPath = layerOpt.LayerFolderPath
   207  
   208  	for _, layerPath := range layerOpt.LayerPaths {
   209  		_, filename := filepath.Split(layerPath)
   210  		g, err := hcsshim.NameToGuid(filename)
   211  		if err != nil {
   212  			return err
   213  		}
   214  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   215  			ID:   g.ToString(),
   216  			Path: layerPath,
   217  		})
   218  	}
   219  
   220  	// Add the mounts (volumes, bind mounts etc) to the structure
   221  	mds := make([]hcsshim.MappedDir, len(spec.Mounts))
   222  	for i, mount := range spec.Mounts {
   223  		mds[i] = hcsshim.MappedDir{
   224  			HostPath:      mount.Source,
   225  			ContainerPath: mount.Destination,
   226  			ReadOnly:      false,
   227  		}
   228  		for _, o := range mount.Options {
   229  			if strings.ToLower(o) == "ro" {
   230  				mds[i].ReadOnly = true
   231  			}
   232  		}
   233  	}
   234  	configuration.MappedDirectories = mds
   235  
   236  	hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
   237  	if err != nil {
   238  		return err
   239  	}
   240  
   241  	// Construct a container object for calling start on it.
   242  	container := &container{
   243  		containerCommon: containerCommon{
   244  			process: process{
   245  				processCommon: processCommon{
   246  					containerID:  containerID,
   247  					client:       clnt,
   248  					friendlyName: InitFriendlyName,
   249  				},
   250  				commandLine: strings.Join(spec.Process.Args, " "),
   251  			},
   252  			processes: make(map[string]*process),
   253  		},
   254  		ociSpec:      spec,
   255  		hcsContainer: hcsContainer,
   256  	}
   257  
   258  	container.options = options
   259  	for _, option := range options {
   260  		if err := option.Apply(container); err != nil {
   261  			logrus.Errorf("libcontainerd: %v", err)
   262  		}
   263  	}
   264  
   265  	// Call start, and if it fails, delete the container from our
   266  	// internal structure, start will keep HCS in sync by deleting the
   267  	// container there.
   268  	logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID)
   269  	if err := container.start(attachStdio); err != nil {
   270  		clnt.deleteContainer(containerID)
   271  		return err
   272  	}
   273  
   274  	logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID)
   275  	return nil
   276  
   277  }
   278  
   279  // AddProcess is the handler for adding a process to an already running
   280  // container. It's called through docker exec. It returns the system pid of the
   281  // exec'd process.
   282  func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) {
   283  	clnt.lock(containerID)
   284  	defer clnt.unlock(containerID)
   285  	container, err := clnt.getContainer(containerID)
   286  	if err != nil {
   287  		return -1, err
   288  	}
   289  	// Note we always tell HCS to
   290  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   291  	// docker can always grab the output through logs. We also tell HCS to always
   292  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   293  	// is only created if it we're not -t.
   294  	createProcessParms := hcsshim.ProcessConfig{
   295  		EmulateConsole:   procToAdd.Terminal,
   296  		CreateStdInPipe:  true,
   297  		CreateStdOutPipe: true,
   298  		CreateStdErrPipe: !procToAdd.Terminal,
   299  	}
   300  	createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height)
   301  	createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width)
   302  
   303  	// Take working directory from the process to add if it is defined,
   304  	// otherwise take from the first process.
   305  	if procToAdd.Cwd != "" {
   306  		createProcessParms.WorkingDirectory = procToAdd.Cwd
   307  	} else {
   308  		createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
   309  	}
   310  
   311  	// Configure the environment for the process
   312  	createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
   313  	createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
   314  	createProcessParms.User = procToAdd.User.Username
   315  
   316  	logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
   317  
   318  	// Start the command running in the container.
   319  	var stdout, stderr io.ReadCloser
   320  	var stdin io.WriteCloser
   321  	newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
   322  	if err != nil {
   323  		logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
   324  		return -1, err
   325  	}
   326  
   327  	pid := newProcess.Pid()
   328  
   329  	stdin, stdout, stderr, err = newProcess.Stdio()
   330  	if err != nil {
   331  		logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
   332  		return -1, err
   333  	}
   334  
   335  	iopipe := &IOPipe{Terminal: procToAdd.Terminal}
   336  	iopipe.Stdin = createStdInCloser(stdin, newProcess)
   337  
   338  	// Convert io.ReadClosers to io.Readers
   339  	if stdout != nil {
   340  		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
   341  	}
   342  	if stderr != nil {
   343  		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
   344  	}
   345  
   346  	proc := &process{
   347  		processCommon: processCommon{
   348  			containerID:  containerID,
   349  			friendlyName: processFriendlyName,
   350  			client:       clnt,
   351  			systemPid:    uint32(pid),
   352  		},
   353  		commandLine: createProcessParms.CommandLine,
   354  		hcsProcess:  newProcess,
   355  	}
   356  
   357  	// Add the process to the container's list of processes
   358  	container.processes[processFriendlyName] = proc
   359  
   360  	// Tell the engine to attach streams back to the client
   361  	if err := attachStdio(*iopipe); err != nil {
   362  		return -1, err
   363  	}
   364  
   365  	// Spin up a go routine waiting for exit to handle cleanup
   366  	go container.waitExit(proc, false)
   367  
   368  	return pid, nil
   369  }
   370  
   371  // Signal handles `docker stop` on Windows. While Linux has support for
   372  // the full range of signals, signals aren't really implemented on Windows.
   373  // We fake supporting regular stop and -9 to force kill.
   374  func (clnt *client) Signal(containerID string, sig int) error {
   375  	var (
   376  		cont *container
   377  		err  error
   378  	)
   379  
   380  	// Get the container as we need it to get the container handle.
   381  	clnt.lock(containerID)
   382  	defer clnt.unlock(containerID)
   383  	if cont, err = clnt.getContainer(containerID); err != nil {
   384  		return err
   385  	}
   386  
   387  	cont.manualStopRequested = true
   388  
   389  	logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
   390  
   391  	if syscall.Signal(sig) == syscall.SIGKILL {
   392  		// Terminate the compute system
   393  		if err := cont.hcsContainer.Terminate(); err != nil {
   394  			if !hcsshim.IsPending(err) {
   395  				logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
   396  			}
   397  		}
   398  	} else {
   399  		// Terminate Process
   400  		if err := cont.hcsProcess.Kill(); err != nil && !hcsshim.IsAlreadyStopped(err) {
   401  			// ignore errors
   402  			logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err)
   403  		}
   404  	}
   405  
   406  	return nil
   407  }
   408  
   409  // While Linux has support for the full range of signals, signals aren't really implemented on Windows.
   410  // We try to terminate the specified process whatever signal is requested.
   411  func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
   412  	clnt.lock(containerID)
   413  	defer clnt.unlock(containerID)
   414  	cont, err := clnt.getContainer(containerID)
   415  	if err != nil {
   416  		return err
   417  	}
   418  
   419  	for _, p := range cont.processes {
   420  		if p.friendlyName == processFriendlyName {
   421  			return p.hcsProcess.Kill()
   422  		}
   423  	}
   424  
   425  	return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
   426  }
   427  
   428  // Resize handles a CLI event to resize an interactive docker run or docker exec
   429  // window.
   430  func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
   431  	// Get the libcontainerd container object
   432  	clnt.lock(containerID)
   433  	defer clnt.unlock(containerID)
   434  	cont, err := clnt.getContainer(containerID)
   435  	if err != nil {
   436  		return err
   437  	}
   438  
   439  	h, w := uint16(height), uint16(width)
   440  
   441  	if processFriendlyName == InitFriendlyName {
   442  		logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
   443  		return cont.process.hcsProcess.ResizeConsole(w, h)
   444  	}
   445  
   446  	for _, p := range cont.processes {
   447  		if p.friendlyName == processFriendlyName {
   448  			logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
   449  			return p.hcsProcess.ResizeConsole(w, h)
   450  		}
   451  	}
   452  
   453  	return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
   454  
   455  }
   456  
   457  // Pause handles pause requests for containers
   458  func (clnt *client) Pause(containerID string) error {
   459  	unlockContainer := true
   460  	// Get the libcontainerd container object
   461  	clnt.lock(containerID)
   462  	defer func() {
   463  		if unlockContainer {
   464  			clnt.unlock(containerID)
   465  		}
   466  	}()
   467  	container, err := clnt.getContainer(containerID)
   468  	if err != nil {
   469  		return err
   470  	}
   471  
   472  	for _, option := range container.options {
   473  		if h, ok := option.(*HyperVIsolationOption); ok {
   474  			if !h.IsHyperV {
   475  				return errors.New("cannot pause Windows Server Containers")
   476  			}
   477  			break
   478  		}
   479  	}
   480  
   481  	err = container.hcsContainer.Pause()
   482  	if err != nil {
   483  		return err
   484  	}
   485  
   486  	// Unlock container before calling back into the daemon
   487  	unlockContainer = false
   488  	clnt.unlock(containerID)
   489  
   490  	return clnt.backend.StateChanged(containerID, StateInfo{
   491  		CommonStateInfo: CommonStateInfo{
   492  			State: StatePause,
   493  		}})
   494  }
   495  
   496  // Resume handles resume requests for containers
   497  func (clnt *client) Resume(containerID string) error {
   498  	unlockContainer := true
   499  	// Get the libcontainerd container object
   500  	clnt.lock(containerID)
   501  	defer func() {
   502  		if unlockContainer {
   503  			clnt.unlock(containerID)
   504  		}
   505  	}()
   506  	container, err := clnt.getContainer(containerID)
   507  	if err != nil {
   508  		return err
   509  	}
   510  
   511  	// This should never happen, since Windows Server Containers cannot be paused
   512  	for _, option := range container.options {
   513  		if h, ok := option.(*HyperVIsolationOption); ok {
   514  			if !h.IsHyperV {
   515  				return errors.New("cannot resume Windows Server Containers")
   516  			}
   517  			break
   518  		}
   519  	}
   520  
   521  	err = container.hcsContainer.Resume()
   522  	if err != nil {
   523  		return err
   524  	}
   525  
   526  	// Unlock container before calling back into the daemon
   527  	unlockContainer = false
   528  	clnt.unlock(containerID)
   529  
   530  	return clnt.backend.StateChanged(containerID, StateInfo{
   531  		CommonStateInfo: CommonStateInfo{
   532  			State: StateResume,
   533  		}})
   534  }
   535  
   536  // Stats handles stats requests for containers
   537  func (clnt *client) Stats(containerID string) (*Stats, error) {
   538  	// Get the libcontainerd container object
   539  	clnt.lock(containerID)
   540  	defer clnt.unlock(containerID)
   541  	container, err := clnt.getContainer(containerID)
   542  	if err != nil {
   543  		return nil, err
   544  	}
   545  	s, err := container.hcsContainer.Statistics()
   546  	if err != nil {
   547  		return nil, err
   548  	}
   549  	st := Stats(s)
   550  	return &st, nil
   551  }
   552  
   553  // Restore is the handler for restoring a container
   554  func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error {
   555  	// TODO Windows: Implement this. For now, just tell the backend the container exited.
   556  	logrus.Debugf("libcontainerd: Restore(%s)", containerID)
   557  	return clnt.backend.StateChanged(containerID, StateInfo{
   558  		CommonStateInfo: CommonStateInfo{
   559  			State:    StateExit,
   560  			ExitCode: 1 << 31,
   561  		}})
   562  }
   563  
   564  // GetPidsForContainer returns a list of process IDs running in a container.
   565  // Although implemented, this is not used in Windows.
   566  func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
   567  	var pids []int
   568  	clnt.lock(containerID)
   569  	defer clnt.unlock(containerID)
   570  	cont, err := clnt.getContainer(containerID)
   571  	if err != nil {
   572  		return nil, err
   573  	}
   574  
   575  	// Add the first process
   576  	pids = append(pids, int(cont.containerCommon.systemPid))
   577  	// And add all the exec'd processes
   578  	for _, p := range cont.processes {
   579  		pids = append(pids, int(p.processCommon.systemPid))
   580  	}
   581  	return pids, nil
   582  }
   583  
   584  // Summary returns a summary of the processes running in a container.
   585  // This is present in Windows to support docker top. In linux, the
   586  // engine shells out to ps to get process information. On Windows, as
   587  // the containers could be Hyper-V containers, they would not be
   588  // visible on the container host. However, libcontainerd does have
   589  // that information.
   590  func (clnt *client) Summary(containerID string) ([]Summary, error) {
   591  
   592  	// Get the libcontainerd container object
   593  	clnt.lock(containerID)
   594  	defer clnt.unlock(containerID)
   595  	container, err := clnt.getContainer(containerID)
   596  	if err != nil {
   597  		return nil, err
   598  	}
   599  	p, err := container.hcsContainer.ProcessList()
   600  	if err != nil {
   601  		return nil, err
   602  	}
   603  	pl := make([]Summary, len(p))
   604  	for i := range p {
   605  		pl[i] = Summary(p[i])
   606  	}
   607  	return pl, nil
   608  }
   609  
   610  // UpdateResources updates resources for a running container.
   611  func (clnt *client) UpdateResources(containerID string, resources Resources) error {
   612  	// Updating resource isn't supported on Windows
   613  	// but we should return nil for enabling updating container
   614  	return nil
   615  }
   616  
   617  func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
   618  	return errors.New("Windows: Containers do not support checkpoints")
   619  }
   620  
   621  func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
   622  	return errors.New("Windows: Containers do not support checkpoints")
   623  }
   624  
   625  func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
   626  	return nil, errors.New("Windows: Containers do not support checkpoints")
   627  }
   628  
   629  func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
   630  	return &ServerVersion{}, nil
   631  }