github.com/jwhonce/docker@v0.6.7-0.20190327063223-da823cf3a5a3/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/docker/docker/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"os"
    12  	"path"
    13  	"path/filepath"
    14  	"regexp"
    15  	"strings"
    16  	"sync"
    17  	"syscall"
    18  	"time"
    19  
    20  	"github.com/Microsoft/hcsshim"
    21  	opengcs "github.com/Microsoft/opengcs/client"
    22  	"github.com/containerd/containerd"
    23  	"github.com/containerd/containerd/cio"
    24  
    25  	"github.com/docker/docker/errdefs"
    26  	"github.com/docker/docker/libcontainerd/queue"
    27  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    28  	"github.com/docker/docker/pkg/sysinfo"
    29  	"github.com/docker/docker/pkg/system"
    30  	specs "github.com/opencontainers/runtime-spec/specs-go"
    31  	"github.com/pkg/errors"
    32  	"github.com/sirupsen/logrus"
    33  	"golang.org/x/sys/windows"
    34  )
    35  
    36  type process struct {
    37  	id         string
    38  	pid        int
    39  	hcsProcess hcsshim.Process
    40  }
    41  
    42  type container struct {
    43  	sync.Mutex
    44  
    45  	// The ociSpec is required, as client.Create() needs a spec, but can
    46  	// be called from the RestartManager context which does not otherwise
    47  	// have access to the Spec
    48  	ociSpec *specs.Spec
    49  
    50  	isWindows    bool
    51  	hcsContainer hcsshim.Container
    52  
    53  	id               string
    54  	status           libcontainerdtypes.Status
    55  	exitedAt         time.Time
    56  	exitCode         uint32
    57  	waitCh           chan struct{}
    58  	init             *process
    59  	execs            map[string]*process
    60  	terminateInvoked bool
    61  }
    62  
    63  // Win32 error codes that are used for various workarounds
    64  // These really should be ALL_CAPS to match golangs syscall library and standard
    65  // Win32 error conventions, but golint insists on CamelCase.
    66  const (
    67  	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
    68  	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
    69  	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
    70  	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
    71  )
    72  
    73  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    74  // container creator management stacks. We hard code "docker" in the case
    75  // of docker.
    76  const defaultOwner = "docker"
    77  
    78  type client struct {
    79  	sync.Mutex
    80  
    81  	stateDir   string
    82  	backend    libcontainerdtypes.Backend
    83  	logger     *logrus.Entry
    84  	eventQ     queue.Queue
    85  	containers map[string]*container
    86  }
    87  
    88  // NewClient creates a new local executor for windows
    89  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    90  	c := &client{
    91  		stateDir:   stateDir,
    92  		backend:    b,
    93  		logger:     logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
    94  		containers: make(map[string]*container),
    95  	}
    96  
    97  	return c, nil
    98  }
    99  
   100  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
   101  	return containerd.Version{}, errors.New("not implemented on Windows")
   102  }
   103  
   104  // Create is the entrypoint to create a container from a spec.
   105  // Table below shows the fields required for HCS JSON calling parameters,
   106  // where if not populated, is omitted.
   107  // +-----------------+--------------------------------------------+---------------------------------------------------+
   108  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
   109  // +-----------------+--------------------------------------------+---------------------------------------------------+
   110  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
   111  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
   112  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   113  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   114  // +-----------------+--------------------------------------------+---------------------------------------------------+
   115  //
   116  // Isolation=Process example:
   117  //
   118  // {
   119  //	"SystemType": "Container",
   120  //	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   121  //	"Owner": "docker",
   122  //	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   123  //	"IgnoreFlushesDuringBoot": true,
   124  //	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   125  //	"Layers": [{
   126  //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   127  //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   128  //	}],
   129  //	"HostName": "5e0055c814a6",
   130  //	"MappedDirectories": [],
   131  //	"HvPartition": false,
   132  //	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   133  //}
   134  //
   135  // Isolation=Hyper-V example:
   136  //
   137  //{
   138  //	"SystemType": "Container",
   139  //	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   140  //	"Owner": "docker",
   141  //	"IgnoreFlushesDuringBoot": true,
   142  //	"Layers": [{
   143  //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   144  //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   145  //	}],
   146  //	"HostName": "475c2c58933b",
   147  //	"MappedDirectories": [],
   148  //	"HvPartition": true,
   149  //	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   150  //	"DNSSearchList": "a.com,b.com,c.com",
   151  //	"HvRuntime": {
   152  //		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   153  //	},
   154  //}
   155  func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error {
   156  	if ctr := c.getContainer(id); ctr != nil {
   157  		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   158  	}
   159  
   160  	var err error
   161  	if spec.Linux == nil {
   162  		err = c.createWindows(id, spec, runtimeOptions)
   163  	} else {
   164  		err = c.createLinux(id, spec, runtimeOptions)
   165  	}
   166  
   167  	if err == nil {
   168  		c.eventQ.Append(id, func() {
   169  			ei := libcontainerdtypes.EventInfo{
   170  				ContainerID: id,
   171  			}
   172  			c.logger.WithFields(logrus.Fields{
   173  				"container": id,
   174  				"event":     libcontainerdtypes.EventCreate,
   175  			}).Info("sending event")
   176  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   177  			if err != nil {
   178  				c.logger.WithError(err).WithFields(logrus.Fields{
   179  					"container": id,
   180  					"event":     libcontainerdtypes.EventCreate,
   181  				}).Error("failed to process event")
   182  			}
   183  		})
   184  	}
   185  	return err
   186  }
   187  
   188  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   189  	logger := c.logger.WithField("container", id)
   190  	configuration := &hcsshim.ContainerConfig{
   191  		SystemType:              "Container",
   192  		Name:                    id,
   193  		Owner:                   defaultOwner,
   194  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   195  		HostName:                spec.Hostname,
   196  		HvPartition:             false,
   197  	}
   198  
   199  	c.extractResourcesFromSpec(spec, configuration)
   200  
   201  	if spec.Windows.Resources != nil {
   202  		if spec.Windows.Resources.Storage != nil {
   203  			if spec.Windows.Resources.Storage.Bps != nil {
   204  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   205  			}
   206  			if spec.Windows.Resources.Storage.Iops != nil {
   207  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   208  			}
   209  		}
   210  	}
   211  
   212  	if spec.Windows.HyperV != nil {
   213  		configuration.HvPartition = true
   214  	}
   215  
   216  	if spec.Windows.Network != nil {
   217  		configuration.EndpointList = spec.Windows.Network.EndpointList
   218  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   219  		if spec.Windows.Network.DNSSearchList != nil {
   220  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   221  		}
   222  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   223  	}
   224  
   225  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   226  		configuration.Credentials = cs
   227  	}
   228  
   229  	// We must have least two layers in the spec, the bottom one being a
   230  	// base image, the top one being the RW layer.
   231  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   232  		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   233  	}
   234  
   235  	// Strip off the top-most layer as that's passed in separately to HCS
   236  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   237  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   238  
   239  	if configuration.HvPartition {
   240  		// We don't currently support setting the utility VM image explicitly.
   241  		// TODO @swernli/jhowardmsft circa RS5, this may be re-locatable.
   242  		if spec.Windows.HyperV.UtilityVMPath != "" {
   243  			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   244  		}
   245  
   246  		// Find the upper-most utility VM image.
   247  		var uvmImagePath string
   248  		for _, path := range layerFolders {
   249  			fullPath := filepath.Join(path, "UtilityVM")
   250  			_, err := os.Stat(fullPath)
   251  			if err == nil {
   252  				uvmImagePath = fullPath
   253  				break
   254  			}
   255  			if !os.IsNotExist(err) {
   256  				return err
   257  			}
   258  		}
   259  		if uvmImagePath == "" {
   260  			return errors.New("utility VM image could not be found")
   261  		}
   262  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   263  
   264  		if spec.Root.Path != "" {
   265  			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   266  		}
   267  	} else {
   268  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   269  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   270  			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   271  		}
   272  		// HCS API requires the trailing backslash to be removed
   273  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   274  	}
   275  
   276  	if spec.Root.Readonly {
   277  		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   278  	}
   279  
   280  	for _, layerPath := range layerFolders {
   281  		_, filename := filepath.Split(layerPath)
   282  		g, err := hcsshim.NameToGuid(filename)
   283  		if err != nil {
   284  			return err
   285  		}
   286  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   287  			ID:   g.ToString(),
   288  			Path: layerPath,
   289  		})
   290  	}
   291  
   292  	// Add the mounts (volumes, bind mounts etc) to the structure
   293  	var mds []hcsshim.MappedDir
   294  	var mps []hcsshim.MappedPipe
   295  	for _, mount := range spec.Mounts {
   296  		const pipePrefix = `\\.\pipe\`
   297  		if mount.Type != "" {
   298  			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   299  		}
   300  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   301  			mp := hcsshim.MappedPipe{
   302  				HostPath:          mount.Source,
   303  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   304  			}
   305  			mps = append(mps, mp)
   306  		} else {
   307  			md := hcsshim.MappedDir{
   308  				HostPath:      mount.Source,
   309  				ContainerPath: mount.Destination,
   310  				ReadOnly:      false,
   311  			}
   312  			for _, o := range mount.Options {
   313  				if strings.ToLower(o) == "ro" {
   314  					md.ReadOnly = true
   315  				}
   316  			}
   317  			mds = append(mds, md)
   318  		}
   319  	}
   320  	configuration.MappedDirectories = mds
   321  	if len(mps) > 0 && system.GetOSVersion().Build < 16299 { // RS3
   322  		return errors.New("named pipe mounts are not supported on this version of Windows")
   323  	}
   324  	configuration.MappedPipes = mps
   325  
   326  	if len(spec.Windows.Devices) > 0 {
   327  		// Add any device assignments
   328  		if configuration.HvPartition {
   329  			return errors.New("device assignment is not supported for HyperV containers")
   330  		}
   331  		if system.GetOSVersion().Build < 17763 { // RS5
   332  			return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
   333  		}
   334  		for _, d := range spec.Windows.Devices {
   335  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   336  		}
   337  	}
   338  
   339  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   340  	if err != nil {
   341  		return err
   342  	}
   343  
   344  	// Construct a container object for calling start on it.
   345  	ctr := &container{
   346  		id:           id,
   347  		execs:        make(map[string]*process),
   348  		isWindows:    true,
   349  		ociSpec:      spec,
   350  		hcsContainer: hcsContainer,
   351  		status:       libcontainerdtypes.StatusCreated,
   352  		waitCh:       make(chan struct{}),
   353  	}
   354  
   355  	logger.Debug("starting container")
   356  	if err = hcsContainer.Start(); err != nil {
   357  		c.logger.WithError(err).Error("failed to start container")
   358  		ctr.Lock()
   359  		if err := c.terminateContainer(ctr); err != nil {
   360  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   361  		} else {
   362  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   363  		}
   364  		ctr.Unlock()
   365  		return err
   366  	}
   367  
   368  	c.Lock()
   369  	c.containers[id] = ctr
   370  	c.Unlock()
   371  
   372  	logger.Debug("createWindows() completed successfully")
   373  	return nil
   374  
   375  }
   376  
   377  func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   378  	logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id)
   379  	logger := c.logger.WithField("container", id)
   380  
   381  	if runtimeOptions == nil {
   382  		return fmt.Errorf("lcow option must be supplied to the runtime")
   383  	}
   384  	lcowConfig, ok := runtimeOptions.(*opengcs.Config)
   385  	if !ok {
   386  		return fmt.Errorf("lcow option must be supplied to the runtime")
   387  	}
   388  
   389  	configuration := &hcsshim.ContainerConfig{
   390  		HvPartition:                 true,
   391  		Name:                        id,
   392  		SystemType:                  "container",
   393  		ContainerType:               "linux",
   394  		Owner:                       defaultOwner,
   395  		TerminateOnLastHandleClosed: true,
   396  		HvRuntime: &hcsshim.HvRuntime{
   397  			ImagePath:           lcowConfig.KirdPath,
   398  			LinuxKernelFile:     lcowConfig.KernelFile,
   399  			LinuxInitrdFile:     lcowConfig.InitrdFile,
   400  			LinuxBootParameters: lcowConfig.BootParameters,
   401  		},
   402  	}
   403  
   404  	if spec.Windows == nil {
   405  		return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
   406  	}
   407  
   408  	c.extractResourcesFromSpec(spec, configuration)
   409  
   410  	// We must have least one layer in the spec
   411  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
   412  		return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
   413  	}
   414  
   415  	// Strip off the top-most layer as that's passed in separately to HCS
   416  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   417  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   418  
   419  	for _, layerPath := range layerFolders {
   420  		_, filename := filepath.Split(layerPath)
   421  		g, err := hcsshim.NameToGuid(filename)
   422  		if err != nil {
   423  			return err
   424  		}
   425  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   426  			ID:   g.ToString(),
   427  			Path: filepath.Join(layerPath, "layer.vhd"),
   428  		})
   429  	}
   430  
   431  	if spec.Windows.Network != nil {
   432  		configuration.EndpointList = spec.Windows.Network.EndpointList
   433  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   434  		if spec.Windows.Network.DNSSearchList != nil {
   435  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   436  		}
   437  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   438  	}
   439  
   440  	// Add the mounts (volumes, bind mounts etc) to the structure. We have to do
   441  	// some translation for both the mapped directories passed into HCS and in
   442  	// the spec.
   443  	//
   444  	// For HCS, we only pass in the mounts from the spec which are type "bind".
   445  	// Further, the "ContainerPath" field (which is a little mis-leadingly
   446  	// named when it applies to the utility VM rather than the container in the
   447  	// utility VM) is moved to under /tmp/gcs/<ID>/binds, where this is passed
   448  	// by the caller through a 'uvmpath' option.
   449  	//
   450  	// We do similar translation for the mounts in the spec by stripping out
   451  	// the uvmpath option, and translating the Source path to the location in the
   452  	// utility VM calculated above.
   453  	//
   454  	// From inside the utility VM, you would see a 9p mount such as in the following
   455  	// where a host folder has been mapped to /target. The line with /tmp/gcs/<ID>/binds
   456  	// specifically:
   457  	//
   458  	//	/ # mount
   459  	//	rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934)
   460  	//	proc on /proc type proc (rw,relatime)
   461  	//	sysfs on /sys type sysfs (rw,relatime)
   462  	//	udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755)
   463  	//	tmpfs on /run type tmpfs (rw,relatime)
   464  	//	cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma)
   465  	//	mqueue on /dev/mqueue type mqueue (rw,relatime)
   466  	//	devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000)
   467  	//	/binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6)
   468  	//	/dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
   469  	//	/dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
   470  	//	overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work)
   471  	//
   472  	//  /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l
   473  	//	total 16
   474  	//	drwx------    3 0        0               60 Sep  7 18:54 binds
   475  	//	-rw-r--r--    1 0        0             3345 Sep  7 18:54 config.json
   476  	//	drwxr-xr-x   10 0        0             4096 Sep  6 17:26 layer0
   477  	//	drwxr-xr-x    1 0        0             4096 Sep  7 18:54 rootfs
   478  	//	drwxr-xr-x    5 0        0             4096 Sep  7 18:54 scratch
   479  	//
   480  	//	/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds
   481  	//	total 0
   482  	//	drwxrwxrwt    2 0        0             4096 Sep  7 16:51 target
   483  
   484  	mds := []hcsshim.MappedDir{}
   485  	specMounts := []specs.Mount{}
   486  	for _, mount := range spec.Mounts {
   487  		specMount := mount
   488  		if mount.Type == "bind" {
   489  			// Strip out the uvmpath from the options
   490  			updatedOptions := []string{}
   491  			uvmPath := ""
   492  			readonly := false
   493  			for _, opt := range mount.Options {
   494  				dropOption := false
   495  				elements := strings.SplitN(opt, "=", 2)
   496  				switch elements[0] {
   497  				case "uvmpath":
   498  					uvmPath = elements[1]
   499  					dropOption = true
   500  				case "rw":
   501  				case "ro":
   502  					readonly = true
   503  				case "rbind":
   504  				default:
   505  					return fmt.Errorf("unsupported option %q", opt)
   506  				}
   507  				if !dropOption {
   508  					updatedOptions = append(updatedOptions, opt)
   509  				}
   510  			}
   511  			mount.Options = updatedOptions
   512  			if uvmPath == "" {
   513  				return fmt.Errorf("no uvmpath for bind mount %+v", mount)
   514  			}
   515  			md := hcsshim.MappedDir{
   516  				HostPath:          mount.Source,
   517  				ContainerPath:     path.Join(uvmPath, mount.Destination),
   518  				CreateInUtilityVM: true,
   519  				ReadOnly:          readonly,
   520  			}
   521  			// If we are 1803/RS4+ enable LinuxMetadata support by default
   522  			if system.GetOSVersion().Build >= 17134 {
   523  				md.LinuxMetadata = true
   524  			}
   525  			mds = append(mds, md)
   526  			specMount.Source = path.Join(uvmPath, mount.Destination)
   527  		}
   528  		specMounts = append(specMounts, specMount)
   529  	}
   530  	configuration.MappedDirectories = mds
   531  
   532  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   533  	if err != nil {
   534  		return err
   535  	}
   536  
   537  	spec.Mounts = specMounts
   538  
   539  	// Construct a container object for calling start on it.
   540  	ctr := &container{
   541  		id:           id,
   542  		execs:        make(map[string]*process),
   543  		isWindows:    false,
   544  		ociSpec:      spec,
   545  		hcsContainer: hcsContainer,
   546  		status:       libcontainerdtypes.StatusCreated,
   547  		waitCh:       make(chan struct{}),
   548  	}
   549  
   550  	// Start the container.
   551  	logger.Debug("starting container")
   552  	if err = hcsContainer.Start(); err != nil {
   553  		c.logger.WithError(err).Error("failed to start container")
   554  		ctr.debugGCS()
   555  		ctr.Lock()
   556  		if err := c.terminateContainer(ctr); err != nil {
   557  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   558  		} else {
   559  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   560  		}
   561  		ctr.Unlock()
   562  		return err
   563  	}
   564  	ctr.debugGCS()
   565  
   566  	c.Lock()
   567  	c.containers[id] = ctr
   568  	c.Unlock()
   569  
   570  	logger.Debug("createLinux() completed successfully")
   571  	return nil
   572  }
   573  
   574  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   575  	if spec.Windows.Resources != nil {
   576  		if spec.Windows.Resources.CPU != nil {
   577  			if spec.Windows.Resources.CPU.Count != nil {
   578  				// This check is being done here rather than in adaptContainerSettings
   579  				// because we don't want to update the HostConfig in case this container
   580  				// is moved to a host with more CPUs than this one.
   581  				cpuCount := *spec.Windows.Resources.CPU.Count
   582  				hostCPUCount := uint64(sysinfo.NumCPU())
   583  				if cpuCount > hostCPUCount {
   584  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   585  					cpuCount = hostCPUCount
   586  				}
   587  				configuration.ProcessorCount = uint32(cpuCount)
   588  			}
   589  			if spec.Windows.Resources.CPU.Shares != nil {
   590  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   591  			}
   592  			if spec.Windows.Resources.CPU.Maximum != nil {
   593  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   594  			}
   595  		}
   596  		if spec.Windows.Resources.Memory != nil {
   597  			if spec.Windows.Resources.Memory.Limit != nil {
   598  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   599  			}
   600  		}
   601  	}
   602  }
   603  
   604  func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   605  	ctr := c.getContainer(id)
   606  	switch {
   607  	case ctr == nil:
   608  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   609  	case ctr.init != nil:
   610  		return -1, errors.WithStack(errdefs.Conflict(errors.New("container already started")))
   611  	}
   612  
   613  	logger := c.logger.WithField("container", id)
   614  
   615  	// Note we always tell HCS to create stdout as it's required
   616  	// regardless of '-i' or '-t' options, so that docker can always grab
   617  	// the output through logs. We also tell HCS to always create stdin,
   618  	// even if it's not used - it will be closed shortly. Stderr is only
   619  	// created if it we're not -t.
   620  	var (
   621  		emulateConsole   bool
   622  		createStdErrPipe bool
   623  	)
   624  	if ctr.ociSpec.Process != nil {
   625  		emulateConsole = ctr.ociSpec.Process.Terminal
   626  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   627  	}
   628  
   629  	createProcessParms := &hcsshim.ProcessConfig{
   630  		EmulateConsole:   emulateConsole,
   631  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   632  		CreateStdInPipe:  true,
   633  		CreateStdOutPipe: true,
   634  		CreateStdErrPipe: createStdErrPipe,
   635  	}
   636  
   637  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   638  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   639  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   640  	}
   641  
   642  	// Configure the environment for the process
   643  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   644  
   645  	// Configure the CommandLine/CommandArgs
   646  	setCommandLineAndArgs(ctr.isWindows, ctr.ociSpec.Process, createProcessParms)
   647  	if ctr.isWindows {
   648  		logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   649  	}
   650  
   651  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   652  
   653  	// LCOW requires the raw OCI spec passed through HCS and onwards to
   654  	// GCS for the utility VM.
   655  	if !ctr.isWindows {
   656  		ociBuf, err := json.Marshal(ctr.ociSpec)
   657  		if err != nil {
   658  			return -1, err
   659  		}
   660  		ociRaw := json.RawMessage(ociBuf)
   661  		createProcessParms.OCISpecification = &ociRaw
   662  	}
   663  
   664  	ctr.Lock()
   665  
   666  	// Start the command running in the container.
   667  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   668  	if err != nil {
   669  		logger.WithError(err).Error("CreateProcess() failed")
   670  		// Fix for https://github.com/moby/moby/issues/38719.
   671  		// If the init process failed to launch, we still need to reap the
   672  		// container to avoid leaking it.
   673  		//
   674  		// Note we use the explicit exit code of 127 which is the
   675  		// Linux shell equivalent of "command not found". Windows cannot
   676  		// know ahead of time whether or not the command exists, especially
   677  		// in the case of Hyper-V containers.
   678  		ctr.Unlock()
   679  		exitedAt := time.Now()
   680  		p := &process{
   681  			id:  libcontainerdtypes.InitProcessName,
   682  			pid: 0,
   683  		}
   684  		c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
   685  		return -1, err
   686  	}
   687  
   688  	defer ctr.Unlock()
   689  
   690  	defer func() {
   691  		if err != nil {
   692  			if err := newProcess.Kill(); err != nil {
   693  				logger.WithError(err).Error("failed to kill process")
   694  			}
   695  			go func() {
   696  				if err := newProcess.Wait(); err != nil {
   697  					logger.WithError(err).Error("failed to wait for process")
   698  				}
   699  				if err := newProcess.Close(); err != nil {
   700  					logger.WithError(err).Error("failed to clean process resources")
   701  				}
   702  			}()
   703  		}
   704  	}()
   705  	p := &process{
   706  		hcsProcess: newProcess,
   707  		id:         libcontainerdtypes.InitProcessName,
   708  		pid:        newProcess.Pid(),
   709  	}
   710  	logger.WithField("pid", p.pid).Debug("init process started")
   711  
   712  	ctr.status = libcontainerdtypes.StatusRunning
   713  	ctr.init = p
   714  
   715  	// Spin up a go routine waiting for exit to handle cleanup
   716  	go c.reapProcess(ctr, p)
   717  
   718  	// Don't shadow err here due to our deferred clean-up.
   719  	var dio *cio.DirectIO
   720  	dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   721  	if err != nil {
   722  		logger.WithError(err).Error("failed to get stdio pipes")
   723  		return -1, err
   724  	}
   725  	_, err = attachStdio(dio)
   726  	if err != nil {
   727  		logger.WithError(err).Error("failed to attach stdio")
   728  		return -1, err
   729  	}
   730  
   731  	// Generate the associated event
   732  	c.eventQ.Append(id, func() {
   733  		ei := libcontainerdtypes.EventInfo{
   734  			ContainerID: id,
   735  			ProcessID:   libcontainerdtypes.InitProcessName,
   736  			Pid:         uint32(p.pid),
   737  		}
   738  		c.logger.WithFields(logrus.Fields{
   739  			"container":  ctr.id,
   740  			"event":      libcontainerdtypes.EventStart,
   741  			"event-info": ei,
   742  		}).Info("sending event")
   743  		err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   744  		if err != nil {
   745  			c.logger.WithError(err).WithFields(logrus.Fields{
   746  				"container":  id,
   747  				"event":      libcontainerdtypes.EventStart,
   748  				"event-info": ei,
   749  			}).Error("failed to process event")
   750  		}
   751  	})
   752  	logger.Debug("start() completed")
   753  	return p.pid, nil
   754  }
   755  
   756  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   757  func setCommandLineAndArgs(isWindows bool, process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   758  	if isWindows {
   759  		if process.CommandLine != "" {
   760  			createProcessParms.CommandLine = process.CommandLine
   761  		} else {
   762  			createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   763  		}
   764  	} else {
   765  		createProcessParms.CommandArgs = process.Args
   766  	}
   767  }
   768  
   769  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   770  	stdin, stdout, stderr, err := newProcess.Stdio()
   771  	if err != nil {
   772  		return nil, err
   773  	}
   774  
   775  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   776  
   777  	// Convert io.ReadClosers to io.Readers
   778  	if stdout != nil {
   779  		dio.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
   780  	}
   781  	if stderr != nil {
   782  		dio.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
   783  	}
   784  	return dio, nil
   785  }
   786  
   787  // Exec adds a process in an running container
   788  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   789  	ctr := c.getContainer(containerID)
   790  	switch {
   791  	case ctr == nil:
   792  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   793  	case ctr.hcsContainer == nil:
   794  		return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
   795  	case ctr.execs != nil && ctr.execs[processID] != nil:
   796  		return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   797  	}
   798  	logger := c.logger.WithFields(logrus.Fields{
   799  		"container": containerID,
   800  		"exec":      processID,
   801  	})
   802  
   803  	// Note we always tell HCS to
   804  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   805  	// docker can always grab the output through logs. We also tell HCS to always
   806  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   807  	// is only created if it we're not -t.
   808  	createProcessParms := &hcsshim.ProcessConfig{
   809  		CreateStdInPipe:  true,
   810  		CreateStdOutPipe: true,
   811  		CreateStdErrPipe: !spec.Terminal,
   812  	}
   813  	if spec.Terminal {
   814  		createProcessParms.EmulateConsole = true
   815  		if spec.ConsoleSize != nil {
   816  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   817  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   818  		}
   819  	}
   820  
   821  	// Take working directory from the process to add if it is defined,
   822  	// otherwise take from the first process.
   823  	if spec.Cwd != "" {
   824  		createProcessParms.WorkingDirectory = spec.Cwd
   825  	} else {
   826  		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
   827  	}
   828  
   829  	// Configure the environment for the process
   830  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   831  
   832  	// Configure the CommandLine/CommandArgs
   833  	setCommandLineAndArgs(ctr.isWindows, spec, createProcessParms)
   834  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   835  
   836  	createProcessParms.User = spec.User.Username
   837  
   838  	// Start the command running in the container.
   839  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   840  	if err != nil {
   841  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   842  		return -1, err
   843  	}
   844  	pid := newProcess.Pid()
   845  	defer func() {
   846  		if err != nil {
   847  			if err := newProcess.Kill(); err != nil {
   848  				logger.WithError(err).Error("failed to kill process")
   849  			}
   850  			go func() {
   851  				if err := newProcess.Wait(); err != nil {
   852  					logger.WithError(err).Error("failed to wait for process")
   853  				}
   854  				if err := newProcess.Close(); err != nil {
   855  					logger.WithError(err).Error("failed to clean process resources")
   856  				}
   857  			}()
   858  		}
   859  	}()
   860  
   861  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   862  	if err != nil {
   863  		logger.WithError(err).Error("failed to get stdio pipes")
   864  		return -1, err
   865  	}
   866  	// Tell the engine to attach streams back to the client
   867  	_, err = attachStdio(dio)
   868  	if err != nil {
   869  		return -1, err
   870  	}
   871  
   872  	p := &process{
   873  		id:         processID,
   874  		pid:        pid,
   875  		hcsProcess: newProcess,
   876  	}
   877  
   878  	// Add the process to the container's list of processes
   879  	ctr.Lock()
   880  	ctr.execs[processID] = p
   881  	ctr.Unlock()
   882  
   883  	// Spin up a go routine waiting for exit to handle cleanup
   884  	go c.reapProcess(ctr, p)
   885  
   886  	c.eventQ.Append(ctr.id, func() {
   887  		ei := libcontainerdtypes.EventInfo{
   888  			ContainerID: ctr.id,
   889  			ProcessID:   p.id,
   890  			Pid:         uint32(p.pid),
   891  		}
   892  		c.logger.WithFields(logrus.Fields{
   893  			"container":  ctr.id,
   894  			"event":      libcontainerdtypes.EventExecAdded,
   895  			"event-info": ei,
   896  		}).Info("sending event")
   897  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
   898  		if err != nil {
   899  			c.logger.WithError(err).WithFields(logrus.Fields{
   900  				"container":  ctr.id,
   901  				"event":      libcontainerdtypes.EventExecAdded,
   902  				"event-info": ei,
   903  			}).Error("failed to process event")
   904  		}
   905  		err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
   906  		if err != nil {
   907  			c.logger.WithError(err).WithFields(logrus.Fields{
   908  				"container":  ctr.id,
   909  				"event":      libcontainerdtypes.EventExecStarted,
   910  				"event-info": ei,
   911  			}).Error("failed to process event")
   912  		}
   913  	})
   914  
   915  	return pid, nil
   916  }
   917  
   918  // Signal handles `docker stop` on Windows. While Linux has support for
   919  // the full range of signals, signals aren't really implemented on Windows.
   920  // We fake supporting regular stop and -9 to force kill.
   921  func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
   922  	ctr, p, err := c.getProcess(containerID, processID)
   923  	if err != nil {
   924  		return err
   925  	}
   926  
   927  	logger := c.logger.WithFields(logrus.Fields{
   928  		"container": containerID,
   929  		"process":   processID,
   930  		"pid":       p.pid,
   931  		"signal":    signal,
   932  	})
   933  	logger.Debug("Signal()")
   934  
   935  	if processID == libcontainerdtypes.InitProcessName {
   936  		if syscall.Signal(signal) == syscall.SIGKILL {
   937  			// Terminate the compute system
   938  			ctr.Lock()
   939  			ctr.terminateInvoked = true
   940  			if err := ctr.hcsContainer.Terminate(); err != nil {
   941  				if !hcsshim.IsPending(err) {
   942  					logger.WithError(err).Error("failed to terminate hccshim container")
   943  				}
   944  			}
   945  			ctr.Unlock()
   946  		} else {
   947  			// Shut down the container
   948  			if err := ctr.hcsContainer.Shutdown(); err != nil {
   949  				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   950  					// ignore errors
   951  					logger.WithError(err).Error("failed to shutdown hccshim container")
   952  				}
   953  			}
   954  		}
   955  	} else {
   956  		return p.hcsProcess.Kill()
   957  	}
   958  
   959  	return nil
   960  }
   961  
   962  // Resize handles a CLI event to resize an interactive docker run or docker
   963  // exec window.
   964  func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
   965  	_, p, err := c.getProcess(containerID, processID)
   966  	if err != nil {
   967  		return err
   968  	}
   969  
   970  	c.logger.WithFields(logrus.Fields{
   971  		"container": containerID,
   972  		"process":   processID,
   973  		"height":    height,
   974  		"width":     width,
   975  		"pid":       p.pid,
   976  	}).Debug("resizing")
   977  	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
   978  }
   979  
   980  func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
   981  	_, p, err := c.getProcess(containerID, processID)
   982  	if err != nil {
   983  		return err
   984  	}
   985  
   986  	return p.hcsProcess.CloseStdin()
   987  }
   988  
   989  // Pause handles pause requests for containers
   990  func (c *client) Pause(_ context.Context, containerID string) error {
   991  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   992  	if err != nil {
   993  		return err
   994  	}
   995  
   996  	if ctr.ociSpec.Windows.HyperV == nil {
   997  		return errors.New("cannot pause Windows Server Containers")
   998  	}
   999  
  1000  	ctr.Lock()
  1001  	defer ctr.Unlock()
  1002  
  1003  	if err = ctr.hcsContainer.Pause(); err != nil {
  1004  		return err
  1005  	}
  1006  
  1007  	ctr.status = libcontainerdtypes.StatusPaused
  1008  
  1009  	c.eventQ.Append(containerID, func() {
  1010  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  1011  			ContainerID: containerID,
  1012  			ProcessID:   libcontainerdtypes.InitProcessName,
  1013  		})
  1014  		c.logger.WithFields(logrus.Fields{
  1015  			"container": ctr.id,
  1016  			"event":     libcontainerdtypes.EventPaused,
  1017  		}).Info("sending event")
  1018  		if err != nil {
  1019  			c.logger.WithError(err).WithFields(logrus.Fields{
  1020  				"container": containerID,
  1021  				"event":     libcontainerdtypes.EventPaused,
  1022  			}).Error("failed to process event")
  1023  		}
  1024  	})
  1025  
  1026  	return nil
  1027  }
  1028  
  1029  // Resume handles resume requests for containers
  1030  func (c *client) Resume(_ context.Context, containerID string) error {
  1031  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  1032  	if err != nil {
  1033  		return err
  1034  	}
  1035  
  1036  	if ctr.ociSpec.Windows.HyperV == nil {
  1037  		return errors.New("cannot resume Windows Server Containers")
  1038  	}
  1039  
  1040  	ctr.Lock()
  1041  	defer ctr.Unlock()
  1042  
  1043  	if err = ctr.hcsContainer.Resume(); err != nil {
  1044  		return err
  1045  	}
  1046  
  1047  	ctr.status = libcontainerdtypes.StatusRunning
  1048  
  1049  	c.eventQ.Append(containerID, func() {
  1050  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  1051  			ContainerID: containerID,
  1052  			ProcessID:   libcontainerdtypes.InitProcessName,
  1053  		})
  1054  		c.logger.WithFields(logrus.Fields{
  1055  			"container": ctr.id,
  1056  			"event":     libcontainerdtypes.EventResumed,
  1057  		}).Info("sending event")
  1058  		if err != nil {
  1059  			c.logger.WithError(err).WithFields(logrus.Fields{
  1060  				"container": containerID,
  1061  				"event":     libcontainerdtypes.EventResumed,
  1062  			}).Error("failed to process event")
  1063  		}
  1064  	})
  1065  
  1066  	return nil
  1067  }
  1068  
  1069  // Stats handles stats requests for containers
  1070  func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
  1071  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  1072  	if err != nil {
  1073  		return nil, err
  1074  	}
  1075  
  1076  	readAt := time.Now()
  1077  	s, err := ctr.hcsContainer.Statistics()
  1078  	if err != nil {
  1079  		return nil, err
  1080  	}
  1081  	return &libcontainerdtypes.Stats{
  1082  		Read:     readAt,
  1083  		HCSStats: &s,
  1084  	}, nil
  1085  }
  1086  
  1087  // Restore is the handler for restoring a container
  1088  func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, error) {
  1089  	c.logger.WithField("container", id).Debug("restore()")
  1090  
  1091  	// TODO Windows: On RS1, a re-attach isn't possible.
  1092  	// However, there is a scenario in which there is an issue.
  1093  	// Consider a background container. The daemon dies unexpectedly.
  1094  	// HCS will still have the compute service alive and running.
  1095  	// For consistence, we call in to shoot it regardless if HCS knows about it
  1096  	// We explicitly just log a warning if the terminate fails.
  1097  	// Then we tell the backend the container exited.
  1098  	if hc, err := hcsshim.OpenContainer(id); err == nil {
  1099  		const terminateTimeout = time.Minute * 2
  1100  		err := hc.Terminate()
  1101  
  1102  		if hcsshim.IsPending(err) {
  1103  			err = hc.WaitTimeout(terminateTimeout)
  1104  		} else if hcsshim.IsAlreadyStopped(err) {
  1105  			err = nil
  1106  		}
  1107  
  1108  		if err != nil {
  1109  			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  1110  			return false, -1, err
  1111  		}
  1112  	}
  1113  	return false, -1, nil
  1114  }
  1115  
  1116  // GetPidsForContainer returns a list of process IDs running in a container.
  1117  // Not used on Windows.
  1118  func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  1119  	return nil, errors.New("not implemented on Windows")
  1120  }
  1121  
  1122  // Summary returns a summary of the processes running in a container.
  1123  // This is present in Windows to support docker top. In linux, the
  1124  // engine shells out to ps to get process information. On Windows, as
  1125  // the containers could be Hyper-V containers, they would not be
  1126  // visible on the container host. However, libcontainerd does have
  1127  // that information.
  1128  func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
  1129  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  1130  	if err != nil {
  1131  		return nil, err
  1132  	}
  1133  
  1134  	p, err := ctr.hcsContainer.ProcessList()
  1135  	if err != nil {
  1136  		return nil, err
  1137  	}
  1138  
  1139  	pl := make([]libcontainerdtypes.Summary, len(p))
  1140  	for i := range p {
  1141  		pl[i] = libcontainerdtypes.Summary{
  1142  			ImageName:                    p[i].ImageName,
  1143  			CreatedAt:                    p[i].CreateTimestamp,
  1144  			KernelTime_100Ns:             p[i].KernelTime100ns,
  1145  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
  1146  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
  1147  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
  1148  			ProcessID:                    p[i].ProcessId,
  1149  			UserTime_100Ns:               p[i].UserTime100ns,
  1150  			ExecID:                       "",
  1151  		}
  1152  	}
  1153  	return pl, nil
  1154  }
  1155  
  1156  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  1157  	ec := -1
  1158  	ctr := c.getContainer(containerID)
  1159  	if ctr == nil {
  1160  		return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1161  	}
  1162  
  1163  	select {
  1164  	case <-ctx.Done():
  1165  		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  1166  	case <-ctr.waitCh:
  1167  	default:
  1168  		return uint32(ec), time.Now(), errors.New("container is not stopped")
  1169  	}
  1170  
  1171  	ctr.Lock()
  1172  	defer ctr.Unlock()
  1173  	return ctr.exitCode, ctr.exitedAt, nil
  1174  }
  1175  
  1176  func (c *client) Delete(_ context.Context, containerID string) error {
  1177  	c.Lock()
  1178  	defer c.Unlock()
  1179  	ctr := c.containers[containerID]
  1180  	if ctr == nil {
  1181  		return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1182  	}
  1183  
  1184  	ctr.Lock()
  1185  	defer ctr.Unlock()
  1186  
  1187  	switch ctr.status {
  1188  	case libcontainerdtypes.StatusCreated:
  1189  		if err := c.shutdownContainer(ctr); err != nil {
  1190  			return err
  1191  		}
  1192  		fallthrough
  1193  	case libcontainerdtypes.StatusStopped:
  1194  		delete(c.containers, containerID)
  1195  		return nil
  1196  	}
  1197  
  1198  	return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
  1199  }
  1200  
  1201  func (c *client) Status(ctx context.Context, containerID string) (libcontainerdtypes.Status, error) {
  1202  	c.Lock()
  1203  	defer c.Unlock()
  1204  	ctr := c.containers[containerID]
  1205  	if ctr == nil {
  1206  		return libcontainerdtypes.StatusUnknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1207  	}
  1208  
  1209  	ctr.Lock()
  1210  	defer ctr.Unlock()
  1211  	return ctr.status, nil
  1212  }
  1213  
  1214  func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
  1215  	// Updating resource isn't supported on Windows
  1216  	// but we should return nil for enabling updating container
  1217  	return nil
  1218  }
  1219  
  1220  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1221  	return errors.New("Windows: Containers do not support checkpoints")
  1222  }
  1223  
  1224  func (c *client) getContainer(id string) *container {
  1225  	c.Lock()
  1226  	ctr := c.containers[id]
  1227  	c.Unlock()
  1228  
  1229  	return ctr
  1230  }
  1231  
  1232  func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1233  	ctr := c.getContainer(containerID)
  1234  	switch {
  1235  	case ctr == nil:
  1236  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1237  	case ctr.init == nil:
  1238  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  1239  	case processID == libcontainerdtypes.InitProcessName:
  1240  		return ctr, ctr.init, nil
  1241  	default:
  1242  		ctr.Lock()
  1243  		defer ctr.Unlock()
  1244  		if ctr.execs == nil {
  1245  			return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  1246  		}
  1247  	}
  1248  
  1249  	p := ctr.execs[processID]
  1250  	if p == nil {
  1251  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  1252  	}
  1253  
  1254  	return ctr, p, nil
  1255  }
  1256  
  1257  // ctr mutex must be held when calling this function.
  1258  func (c *client) shutdownContainer(ctr *container) error {
  1259  	var err error
  1260  	const waitTimeout = time.Minute * 5
  1261  
  1262  	if !ctr.terminateInvoked {
  1263  		err = ctr.hcsContainer.Shutdown()
  1264  	}
  1265  
  1266  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1267  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1268  	} else if hcsshim.IsAlreadyStopped(err) {
  1269  		err = nil
  1270  	}
  1271  
  1272  	if err != nil {
  1273  		c.logger.WithError(err).WithField("container", ctr.id).
  1274  			Debug("failed to shutdown container, terminating it")
  1275  		terminateErr := c.terminateContainer(ctr)
  1276  		if terminateErr != nil {
  1277  			c.logger.WithError(terminateErr).WithField("container", ctr.id).
  1278  				Error("failed to shutdown container, and subsequent terminate also failed")
  1279  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1280  		}
  1281  		return err
  1282  	}
  1283  
  1284  	return nil
  1285  }
  1286  
  1287  // ctr mutex must be held when calling this function.
  1288  func (c *client) terminateContainer(ctr *container) error {
  1289  	const terminateTimeout = time.Minute * 5
  1290  	ctr.terminateInvoked = true
  1291  	err := ctr.hcsContainer.Terminate()
  1292  
  1293  	if hcsshim.IsPending(err) {
  1294  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1295  	} else if hcsshim.IsAlreadyStopped(err) {
  1296  		err = nil
  1297  	}
  1298  
  1299  	if err != nil {
  1300  		c.logger.WithError(err).WithField("container", ctr.id).
  1301  			Debug("failed to terminate container")
  1302  		return err
  1303  	}
  1304  
  1305  	return nil
  1306  }
  1307  
  1308  func (c *client) reapProcess(ctr *container, p *process) int {
  1309  	logger := c.logger.WithFields(logrus.Fields{
  1310  		"container": ctr.id,
  1311  		"process":   p.id,
  1312  	})
  1313  
  1314  	var eventErr error
  1315  
  1316  	// Block indefinitely for the process to exit.
  1317  	if err := p.hcsProcess.Wait(); err != nil {
  1318  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1319  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1320  		}
  1321  		// Fall through here, do not return. This ensures we attempt to
  1322  		// continue the shutdown in HCS and tell the docker engine that the
  1323  		// process/container has exited to avoid a container being dropped on
  1324  		// the floor.
  1325  	}
  1326  	exitedAt := time.Now()
  1327  
  1328  	exitCode, err := p.hcsProcess.ExitCode()
  1329  	if err != nil {
  1330  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1331  			logger.WithError(err).Warnf("unable to get exit code for process")
  1332  		}
  1333  		// Since we got an error retrieving the exit code, make sure that the
  1334  		// code we return doesn't incorrectly indicate success.
  1335  		exitCode = -1
  1336  
  1337  		// Fall through here, do not return. This ensures we attempt to
  1338  		// continue the shutdown in HCS and tell the docker engine that the
  1339  		// process/container has exited to avoid a container being dropped on
  1340  		// the floor.
  1341  	}
  1342  
  1343  	if err := p.hcsProcess.Close(); err != nil {
  1344  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1345  		exitCode = -1
  1346  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1347  	}
  1348  
  1349  	if p.id == libcontainerdtypes.InitProcessName {
  1350  		exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  1351  	}
  1352  
  1353  	c.eventQ.Append(ctr.id, func() {
  1354  		ei := libcontainerdtypes.EventInfo{
  1355  			ContainerID: ctr.id,
  1356  			ProcessID:   p.id,
  1357  			Pid:         uint32(p.pid),
  1358  			ExitCode:    uint32(exitCode),
  1359  			ExitedAt:    exitedAt,
  1360  			Error:       eventErr,
  1361  		}
  1362  		c.logger.WithFields(logrus.Fields{
  1363  			"container":  ctr.id,
  1364  			"event":      libcontainerdtypes.EventExit,
  1365  			"event-info": ei,
  1366  		}).Info("sending event")
  1367  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1368  		if err != nil {
  1369  			c.logger.WithError(err).WithFields(logrus.Fields{
  1370  				"container":  ctr.id,
  1371  				"event":      libcontainerdtypes.EventExit,
  1372  				"event-info": ei,
  1373  			}).Error("failed to process event")
  1374  		}
  1375  		if p.id != libcontainerdtypes.InitProcessName {
  1376  			ctr.Lock()
  1377  			delete(ctr.execs, p.id)
  1378  			ctr.Unlock()
  1379  		}
  1380  	})
  1381  
  1382  	return exitCode
  1383  }
  1384  
  1385  // reapContainer shuts down the container and releases associated resources. It returns
  1386  // the error to be logged in the eventInfo sent back to the monitor.
  1387  func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1388  	// Update container status
  1389  	ctr.Lock()
  1390  	ctr.status = libcontainerdtypes.StatusStopped
  1391  	ctr.exitedAt = exitedAt
  1392  	ctr.exitCode = uint32(exitCode)
  1393  	close(ctr.waitCh)
  1394  
  1395  	if err := c.shutdownContainer(ctr); err != nil {
  1396  		exitCode = -1
  1397  		logger.WithError(err).Warn("failed to shutdown container")
  1398  		thisErr := errors.Wrap(err, "failed to shutdown container")
  1399  		if eventErr != nil {
  1400  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1401  		} else {
  1402  			eventErr = thisErr
  1403  		}
  1404  	} else {
  1405  		logger.Debug("completed container shutdown")
  1406  	}
  1407  	ctr.Unlock()
  1408  
  1409  	if err := ctr.hcsContainer.Close(); err != nil {
  1410  		exitCode = -1
  1411  		logger.WithError(err).Error("failed to clean hcs container resources")
  1412  		thisErr := errors.Wrap(err, "failed to terminate container")
  1413  		if eventErr != nil {
  1414  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1415  		} else {
  1416  			eventErr = thisErr
  1417  		}
  1418  	}
  1419  	return exitCode, eventErr
  1420  }