github.com/demonoid81/moby@v0.0.0-20200517203328-62dd8e17c460/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/demonoid81/moby/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"os"
    12  	"path"
    13  	"path/filepath"
    14  	"regexp"
    15  	"strings"
    16  	"sync"
    17  	"syscall"
    18  	"time"
    19  
    20  	"github.com/Microsoft/hcsshim"
    21  	"github.com/Microsoft/hcsshim/osversion"
    22  	opengcs "github.com/Microsoft/opengcs/client"
    23  	"github.com/containerd/containerd"
    24  	"github.com/containerd/containerd/cio"
    25  
    26  	"github.com/demonoid81/moby/errdefs"
    27  	"github.com/demonoid81/moby/libcontainerd/queue"
    28  	libcontainerdtypes "github.com/demonoid81/moby/libcontainerd/types"
    29  	"github.com/demonoid81/moby/pkg/sysinfo"
    30  	"github.com/demonoid81/moby/pkg/system"
    31  	specs "github.com/opencontainers/runtime-spec/specs-go"
    32  	"github.com/pkg/errors"
    33  	"github.com/sirupsen/logrus"
    34  	"golang.org/x/sys/windows"
    35  )
    36  
    37  type process struct {
    38  	id         string
    39  	pid        int
    40  	hcsProcess hcsshim.Process
    41  }
    42  
    43  type container struct {
    44  	sync.Mutex
    45  
    46  	// The ociSpec is required, as client.Create() needs a spec, but can
    47  	// be called from the RestartManager context which does not otherwise
    48  	// have access to the Spec
    49  	ociSpec *specs.Spec
    50  
    51  	isWindows    bool
    52  	hcsContainer hcsshim.Container
    53  
    54  	id               string
    55  	status           containerd.ProcessStatus
    56  	exitedAt         time.Time
    57  	exitCode         uint32
    58  	waitCh           chan struct{}
    59  	init             *process
    60  	execs            map[string]*process
    61  	terminateInvoked bool
    62  }
    63  
    64  // Win32 error codes that are used for various workarounds
    65  // These really should be ALL_CAPS to match golangs syscall library and standard
    66  // Win32 error conventions, but golint insists on CamelCase.
    67  const (
    68  	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
    69  	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
    70  	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
    71  	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
    72  )
    73  
    74  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    75  // container creator management stacks. We hard code "docker" in the case
    76  // of docker.
    77  const defaultOwner = "docker"
    78  
    79  type client struct {
    80  	sync.Mutex
    81  
    82  	stateDir   string
    83  	backend    libcontainerdtypes.Backend
    84  	logger     *logrus.Entry
    85  	eventQ     queue.Queue
    86  	containers map[string]*container
    87  }
    88  
    89  // NewClient creates a new local executor for windows
    90  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    91  	c := &client{
    92  		stateDir:   stateDir,
    93  		backend:    b,
    94  		logger:     logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
    95  		containers: make(map[string]*container),
    96  	}
    97  
    98  	return c, nil
    99  }
   100  
   101  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
   102  	return containerd.Version{}, errors.New("not implemented on Windows")
   103  }
   104  
   105  // Create is the entrypoint to create a container from a spec.
   106  // Table below shows the fields required for HCS JSON calling parameters,
   107  // where if not populated, is omitted.
   108  // +-----------------+--------------------------------------------+---------------------------------------------------+
   109  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
   110  // +-----------------+--------------------------------------------+---------------------------------------------------+
   111  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
   112  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
   113  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   114  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   115  // +-----------------+--------------------------------------------+---------------------------------------------------+
   116  //
   117  // Isolation=Process example:
   118  //
   119  // {
   120  // 	"SystemType": "Container",
   121  // 	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   122  // 	"Owner": "docker",
   123  // 	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   124  // 	"IgnoreFlushesDuringBoot": true,
   125  // 	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   126  // 	"Layers": [{
   127  // 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   128  // 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   129  // 	}],
   130  // 	"HostName": "5e0055c814a6",
   131  // 	"MappedDirectories": [],
   132  // 	"HvPartition": false,
   133  // 	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   134  // }
   135  //
   136  // Isolation=Hyper-V example:
   137  //
   138  // {
   139  // 	"SystemType": "Container",
   140  // 	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   141  // 	"Owner": "docker",
   142  // 	"IgnoreFlushesDuringBoot": true,
   143  // 	"Layers": [{
   144  // 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   145  // 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   146  // 	}],
   147  // 	"HostName": "475c2c58933b",
   148  // 	"MappedDirectories": [],
   149  // 	"HvPartition": true,
   150  // 	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   151  // 	"DNSSearchList": "a.com,b.com,c.com",
   152  // 	"HvRuntime": {
   153  // 		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   154  // 	},
   155  // }
   156  func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
   157  	if ctr := c.getContainer(id); ctr != nil {
   158  		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   159  	}
   160  
   161  	var err error
   162  	if spec.Linux == nil {
   163  		err = c.createWindows(id, spec, runtimeOptions)
   164  	} else {
   165  		err = c.createLinux(id, spec, runtimeOptions)
   166  	}
   167  
   168  	if err == nil {
   169  		c.eventQ.Append(id, func() {
   170  			ei := libcontainerdtypes.EventInfo{
   171  				ContainerID: id,
   172  			}
   173  			c.logger.WithFields(logrus.Fields{
   174  				"container": id,
   175  				"event":     libcontainerdtypes.EventCreate,
   176  			}).Info("sending event")
   177  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   178  			if err != nil {
   179  				c.logger.WithError(err).WithFields(logrus.Fields{
   180  					"container": id,
   181  					"event":     libcontainerdtypes.EventCreate,
   182  				}).Error("failed to process event")
   183  			}
   184  		})
   185  	}
   186  	return err
   187  }
   188  
   189  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   190  	logger := c.logger.WithField("container", id)
   191  	configuration := &hcsshim.ContainerConfig{
   192  		SystemType:              "Container",
   193  		Name:                    id,
   194  		Owner:                   defaultOwner,
   195  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   196  		HostName:                spec.Hostname,
   197  		HvPartition:             false,
   198  	}
   199  
   200  	c.extractResourcesFromSpec(spec, configuration)
   201  
   202  	if spec.Windows.Resources != nil {
   203  		if spec.Windows.Resources.Storage != nil {
   204  			if spec.Windows.Resources.Storage.Bps != nil {
   205  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   206  			}
   207  			if spec.Windows.Resources.Storage.Iops != nil {
   208  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   209  			}
   210  		}
   211  	}
   212  
   213  	if spec.Windows.HyperV != nil {
   214  		configuration.HvPartition = true
   215  	}
   216  
   217  	if spec.Windows.Network != nil {
   218  		configuration.EndpointList = spec.Windows.Network.EndpointList
   219  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   220  		if spec.Windows.Network.DNSSearchList != nil {
   221  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   222  		}
   223  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   224  	}
   225  
   226  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   227  		configuration.Credentials = cs
   228  	}
   229  
   230  	// We must have least two layers in the spec, the bottom one being a
   231  	// base image, the top one being the RW layer.
   232  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   233  		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   234  	}
   235  
   236  	// Strip off the top-most layer as that's passed in separately to HCS
   237  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   238  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   239  
   240  	if configuration.HvPartition {
   241  		// We don't currently support setting the utility VM image explicitly.
   242  		// TODO circa RS5, this may be re-locatable.
   243  		if spec.Windows.HyperV.UtilityVMPath != "" {
   244  			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   245  		}
   246  
   247  		// Find the upper-most utility VM image.
   248  		var uvmImagePath string
   249  		for _, path := range layerFolders {
   250  			fullPath := filepath.Join(path, "UtilityVM")
   251  			_, err := os.Stat(fullPath)
   252  			if err == nil {
   253  				uvmImagePath = fullPath
   254  				break
   255  			}
   256  			if !os.IsNotExist(err) {
   257  				return err
   258  			}
   259  		}
   260  		if uvmImagePath == "" {
   261  			return errors.New("utility VM image could not be found")
   262  		}
   263  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   264  
   265  		if spec.Root.Path != "" {
   266  			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   267  		}
   268  	} else {
   269  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   270  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   271  			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   272  		}
   273  		// HCS API requires the trailing backslash to be removed
   274  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   275  	}
   276  
   277  	if spec.Root.Readonly {
   278  		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   279  	}
   280  
   281  	for _, layerPath := range layerFolders {
   282  		_, filename := filepath.Split(layerPath)
   283  		g, err := hcsshim.NameToGuid(filename)
   284  		if err != nil {
   285  			return err
   286  		}
   287  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   288  			ID:   g.ToString(),
   289  			Path: layerPath,
   290  		})
   291  	}
   292  
   293  	// Add the mounts (volumes, bind mounts etc) to the structure
   294  	var mds []hcsshim.MappedDir
   295  	var mps []hcsshim.MappedPipe
   296  	for _, mount := range spec.Mounts {
   297  		const pipePrefix = `\\.\pipe\`
   298  		if mount.Type != "" {
   299  			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   300  		}
   301  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   302  			mp := hcsshim.MappedPipe{
   303  				HostPath:          mount.Source,
   304  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   305  			}
   306  			mps = append(mps, mp)
   307  		} else {
   308  			md := hcsshim.MappedDir{
   309  				HostPath:      mount.Source,
   310  				ContainerPath: mount.Destination,
   311  				ReadOnly:      false,
   312  			}
   313  			for _, o := range mount.Options {
   314  				if strings.ToLower(o) == "ro" {
   315  					md.ReadOnly = true
   316  				}
   317  			}
   318  			mds = append(mds, md)
   319  		}
   320  	}
   321  	configuration.MappedDirectories = mds
   322  	if len(mps) > 0 && osversion.Build() < osversion.RS3 {
   323  		return errors.New("named pipe mounts are not supported on this version of Windows")
   324  	}
   325  	configuration.MappedPipes = mps
   326  
   327  	if len(spec.Windows.Devices) > 0 {
   328  		// Add any device assignments
   329  		if configuration.HvPartition {
   330  			return errors.New("device assignment is not supported for HyperV containers")
   331  		}
   332  		if osversion.Build() < osversion.RS5 {
   333  			return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
   334  		}
   335  		for _, d := range spec.Windows.Devices {
   336  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   337  		}
   338  	}
   339  
   340  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   341  	if err != nil {
   342  		return err
   343  	}
   344  
   345  	// Construct a container object for calling start on it.
   346  	ctr := &container{
   347  		id:           id,
   348  		execs:        make(map[string]*process),
   349  		isWindows:    true,
   350  		ociSpec:      spec,
   351  		hcsContainer: hcsContainer,
   352  		status:       containerd.Created,
   353  		waitCh:       make(chan struct{}),
   354  	}
   355  
   356  	logger.Debug("starting container")
   357  	if err = hcsContainer.Start(); err != nil {
   358  		c.logger.WithError(err).Error("failed to start container")
   359  		ctr.Lock()
   360  		if err := c.terminateContainer(ctr); err != nil {
   361  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   362  		} else {
   363  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   364  		}
   365  		ctr.Unlock()
   366  		return err
   367  	}
   368  
   369  	c.Lock()
   370  	c.containers[id] = ctr
   371  	c.Unlock()
   372  
   373  	logger.Debug("createWindows() completed successfully")
   374  	return nil
   375  
   376  }
   377  
   378  func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   379  	logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id)
   380  	logger := c.logger.WithField("container", id)
   381  
   382  	if runtimeOptions == nil {
   383  		return fmt.Errorf("lcow option must be supplied to the runtime")
   384  	}
   385  	lcowConfig, ok := runtimeOptions.(*opengcs.Config)
   386  	if !ok {
   387  		return fmt.Errorf("lcow option must be supplied to the runtime")
   388  	}
   389  
   390  	configuration := &hcsshim.ContainerConfig{
   391  		HvPartition:                 true,
   392  		Name:                        id,
   393  		SystemType:                  "container",
   394  		ContainerType:               "linux",
   395  		Owner:                       defaultOwner,
   396  		TerminateOnLastHandleClosed: true,
   397  		HvRuntime: &hcsshim.HvRuntime{
   398  			ImagePath:           lcowConfig.KirdPath,
   399  			LinuxKernelFile:     lcowConfig.KernelFile,
   400  			LinuxInitrdFile:     lcowConfig.InitrdFile,
   401  			LinuxBootParameters: lcowConfig.BootParameters,
   402  		},
   403  	}
   404  
   405  	if spec.Windows == nil {
   406  		return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
   407  	}
   408  
   409  	c.extractResourcesFromSpec(spec, configuration)
   410  
   411  	// We must have least one layer in the spec
   412  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
   413  		return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
   414  	}
   415  
   416  	// Strip off the top-most layer as that's passed in separately to HCS
   417  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   418  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   419  
   420  	for _, layerPath := range layerFolders {
   421  		_, filename := filepath.Split(layerPath)
   422  		g, err := hcsshim.NameToGuid(filename)
   423  		if err != nil {
   424  			return err
   425  		}
   426  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   427  			ID:   g.ToString(),
   428  			Path: filepath.Join(layerPath, "layer.vhd"),
   429  		})
   430  	}
   431  
   432  	if spec.Windows.Network != nil {
   433  		configuration.EndpointList = spec.Windows.Network.EndpointList
   434  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   435  		if spec.Windows.Network.DNSSearchList != nil {
   436  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   437  		}
   438  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   439  	}
   440  
   441  	// Add the mounts (volumes, bind mounts etc) to the structure. We have to do
   442  	// some translation for both the mapped directories passed into HCS and in
   443  	// the spec.
   444  	//
   445  	// For HCS, we only pass in the mounts from the spec which are type "bind".
   446  	// Further, the "ContainerPath" field (which is a little mis-leadingly
   447  	// named when it applies to the utility VM rather than the container in the
   448  	// utility VM) is moved to under /tmp/gcs/<ID>/binds, where this is passed
   449  	// by the caller through a 'uvmpath' option.
   450  	//
   451  	// We do similar translation for the mounts in the spec by stripping out
   452  	// the uvmpath option, and translating the Source path to the location in the
   453  	// utility VM calculated above.
   454  	//
   455  	// From inside the utility VM, you would see a 9p mount such as in the following
   456  	// where a host folder has been mapped to /target. The line with /tmp/gcs/<ID>/binds
   457  	// specifically:
   458  	//
   459  	//	/ # mount
   460  	//	rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934)
   461  	//	proc on /proc type proc (rw,relatime)
   462  	//	sysfs on /sys type sysfs (rw,relatime)
   463  	//	udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755)
   464  	//	tmpfs on /run type tmpfs (rw,relatime)
   465  	//	cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma)
   466  	//	mqueue on /dev/mqueue type mqueue (rw,relatime)
   467  	//	devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000)
   468  	//	/binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6)
   469  	//	/dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
   470  	//	/dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
   471  	//	overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work)
   472  	//
   473  	//  /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l
   474  	//	total 16
   475  	//	drwx------    3 0        0               60 Sep  7 18:54 binds
   476  	//	-rw-r--r--    1 0        0             3345 Sep  7 18:54 config.json
   477  	//	drwxr-xr-x   10 0        0             4096 Sep  6 17:26 layer0
   478  	//	drwxr-xr-x    1 0        0             4096 Sep  7 18:54 rootfs
   479  	//	drwxr-xr-x    5 0        0             4096 Sep  7 18:54 scratch
   480  	//
   481  	//	/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds
   482  	//	total 0
   483  	//	drwxrwxrwt    2 0        0             4096 Sep  7 16:51 target
   484  
   485  	mds := []hcsshim.MappedDir{}
   486  	specMounts := []specs.Mount{}
   487  	for _, mount := range spec.Mounts {
   488  		specMount := mount
   489  		if mount.Type == "bind" {
   490  			// Strip out the uvmpath from the options
   491  			updatedOptions := []string{}
   492  			uvmPath := ""
   493  			readonly := false
   494  			for _, opt := range mount.Options {
   495  				dropOption := false
   496  				elements := strings.SplitN(opt, "=", 2)
   497  				switch elements[0] {
   498  				case "uvmpath":
   499  					uvmPath = elements[1]
   500  					dropOption = true
   501  				case "rw":
   502  				case "ro":
   503  					readonly = true
   504  				case "rbind":
   505  				default:
   506  					return fmt.Errorf("unsupported option %q", opt)
   507  				}
   508  				if !dropOption {
   509  					updatedOptions = append(updatedOptions, opt)
   510  				}
   511  			}
   512  			mount.Options = updatedOptions
   513  			if uvmPath == "" {
   514  				return fmt.Errorf("no uvmpath for bind mount %+v", mount)
   515  			}
   516  			md := hcsshim.MappedDir{
   517  				HostPath:          mount.Source,
   518  				ContainerPath:     path.Join(uvmPath, mount.Destination),
   519  				CreateInUtilityVM: true,
   520  				ReadOnly:          readonly,
   521  			}
   522  			// If we are 1803/RS4+ enable LinuxMetadata support by default
   523  			if osversion.Build() >= osversion.RS4 {
   524  				md.LinuxMetadata = true
   525  			}
   526  			mds = append(mds, md)
   527  			specMount.Source = path.Join(uvmPath, mount.Destination)
   528  		}
   529  		specMounts = append(specMounts, specMount)
   530  	}
   531  	configuration.MappedDirectories = mds
   532  
   533  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   534  	if err != nil {
   535  		return err
   536  	}
   537  
   538  	spec.Mounts = specMounts
   539  
   540  	// Construct a container object for calling start on it.
   541  	ctr := &container{
   542  		id:           id,
   543  		execs:        make(map[string]*process),
   544  		isWindows:    false,
   545  		ociSpec:      spec,
   546  		hcsContainer: hcsContainer,
   547  		status:       containerd.Created,
   548  		waitCh:       make(chan struct{}),
   549  	}
   550  
   551  	// Start the container.
   552  	logger.Debug("starting container")
   553  	if err = hcsContainer.Start(); err != nil {
   554  		c.logger.WithError(err).Error("failed to start container")
   555  		ctr.debugGCS()
   556  		ctr.Lock()
   557  		if err := c.terminateContainer(ctr); err != nil {
   558  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   559  		} else {
   560  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   561  		}
   562  		ctr.Unlock()
   563  		return err
   564  	}
   565  	ctr.debugGCS()
   566  
   567  	c.Lock()
   568  	c.containers[id] = ctr
   569  	c.Unlock()
   570  
   571  	logger.Debug("createLinux() completed successfully")
   572  	return nil
   573  }
   574  
   575  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   576  	if spec.Windows.Resources != nil {
   577  		if spec.Windows.Resources.CPU != nil {
   578  			if spec.Windows.Resources.CPU.Count != nil {
   579  				// This check is being done here rather than in adaptContainerSettings
   580  				// because we don't want to update the HostConfig in case this container
   581  				// is moved to a host with more CPUs than this one.
   582  				cpuCount := *spec.Windows.Resources.CPU.Count
   583  				hostCPUCount := uint64(sysinfo.NumCPU())
   584  				if cpuCount > hostCPUCount {
   585  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   586  					cpuCount = hostCPUCount
   587  				}
   588  				configuration.ProcessorCount = uint32(cpuCount)
   589  			}
   590  			if spec.Windows.Resources.CPU.Shares != nil {
   591  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   592  			}
   593  			if spec.Windows.Resources.CPU.Maximum != nil {
   594  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   595  			}
   596  		}
   597  		if spec.Windows.Resources.Memory != nil {
   598  			if spec.Windows.Resources.Memory.Limit != nil {
   599  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   600  			}
   601  		}
   602  	}
   603  }
   604  
   605  func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   606  	ctr := c.getContainer(id)
   607  	switch {
   608  	case ctr == nil:
   609  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   610  	case ctr.init != nil:
   611  		return -1, errors.WithStack(errdefs.NotModified(errors.New("container already started")))
   612  	}
   613  
   614  	logger := c.logger.WithField("container", id)
   615  
   616  	// Note we always tell HCS to create stdout as it's required
   617  	// regardless of '-i' or '-t' options, so that docker can always grab
   618  	// the output through logs. We also tell HCS to always create stdin,
   619  	// even if it's not used - it will be closed shortly. Stderr is only
   620  	// created if it we're not -t.
   621  	var (
   622  		emulateConsole   bool
   623  		createStdErrPipe bool
   624  	)
   625  	if ctr.ociSpec.Process != nil {
   626  		emulateConsole = ctr.ociSpec.Process.Terminal
   627  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   628  	}
   629  
   630  	createProcessParms := &hcsshim.ProcessConfig{
   631  		EmulateConsole:   emulateConsole,
   632  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   633  		CreateStdInPipe:  true,
   634  		CreateStdOutPipe: true,
   635  		CreateStdErrPipe: createStdErrPipe,
   636  	}
   637  
   638  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   639  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   640  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   641  	}
   642  
   643  	// Configure the environment for the process
   644  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   645  
   646  	// Configure the CommandLine/CommandArgs
   647  	setCommandLineAndArgs(ctr.isWindows, ctr.ociSpec.Process, createProcessParms)
   648  	if ctr.isWindows {
   649  		logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   650  	}
   651  
   652  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   653  
   654  	// LCOW requires the raw OCI spec passed through HCS and onwards to
   655  	// GCS for the utility VM.
   656  	if !ctr.isWindows {
   657  		ociBuf, err := json.Marshal(ctr.ociSpec)
   658  		if err != nil {
   659  			return -1, err
   660  		}
   661  		ociRaw := json.RawMessage(ociBuf)
   662  		createProcessParms.OCISpecification = &ociRaw
   663  	}
   664  
   665  	ctr.Lock()
   666  
   667  	// Start the command running in the container.
   668  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   669  	if err != nil {
   670  		logger.WithError(err).Error("CreateProcess() failed")
   671  		// Fix for https://github.com/moby/moby/issues/38719.
   672  		// If the init process failed to launch, we still need to reap the
   673  		// container to avoid leaking it.
   674  		//
   675  		// Note we use the explicit exit code of 127 which is the
   676  		// Linux shell equivalent of "command not found". Windows cannot
   677  		// know ahead of time whether or not the command exists, especially
   678  		// in the case of Hyper-V containers.
   679  		ctr.Unlock()
   680  		exitedAt := time.Now()
   681  		p := &process{
   682  			id:  libcontainerdtypes.InitProcessName,
   683  			pid: 0,
   684  		}
   685  		c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
   686  		return -1, err
   687  	}
   688  
   689  	defer ctr.Unlock()
   690  
   691  	defer func() {
   692  		if err != nil {
   693  			if err := newProcess.Kill(); err != nil {
   694  				logger.WithError(err).Error("failed to kill process")
   695  			}
   696  			go func() {
   697  				if err := newProcess.Wait(); err != nil {
   698  					logger.WithError(err).Error("failed to wait for process")
   699  				}
   700  				if err := newProcess.Close(); err != nil {
   701  					logger.WithError(err).Error("failed to clean process resources")
   702  				}
   703  			}()
   704  		}
   705  	}()
   706  	p := &process{
   707  		hcsProcess: newProcess,
   708  		id:         libcontainerdtypes.InitProcessName,
   709  		pid:        newProcess.Pid(),
   710  	}
   711  	logger.WithField("pid", p.pid).Debug("init process started")
   712  
   713  	ctr.status = containerd.Running
   714  	ctr.init = p
   715  
   716  	// Spin up a go routine waiting for exit to handle cleanup
   717  	go c.reapProcess(ctr, p)
   718  
   719  	// Don't shadow err here due to our deferred clean-up.
   720  	var dio *cio.DirectIO
   721  	dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   722  	if err != nil {
   723  		logger.WithError(err).Error("failed to get stdio pipes")
   724  		return -1, err
   725  	}
   726  	_, err = attachStdio(dio)
   727  	if err != nil {
   728  		logger.WithError(err).Error("failed to attach stdio")
   729  		return -1, err
   730  	}
   731  
   732  	// Generate the associated event
   733  	c.eventQ.Append(id, func() {
   734  		ei := libcontainerdtypes.EventInfo{
   735  			ContainerID: id,
   736  			ProcessID:   libcontainerdtypes.InitProcessName,
   737  			Pid:         uint32(p.pid),
   738  		}
   739  		c.logger.WithFields(logrus.Fields{
   740  			"container":  ctr.id,
   741  			"event":      libcontainerdtypes.EventStart,
   742  			"event-info": ei,
   743  		}).Info("sending event")
   744  		err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   745  		if err != nil {
   746  			c.logger.WithError(err).WithFields(logrus.Fields{
   747  				"container":  id,
   748  				"event":      libcontainerdtypes.EventStart,
   749  				"event-info": ei,
   750  			}).Error("failed to process event")
   751  		}
   752  	})
   753  	logger.Debug("start() completed")
   754  	return p.pid, nil
   755  }
   756  
   757  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   758  func setCommandLineAndArgs(isWindows bool, process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   759  	if isWindows {
   760  		if process.CommandLine != "" {
   761  			createProcessParms.CommandLine = process.CommandLine
   762  		} else {
   763  			createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   764  		}
   765  	} else {
   766  		createProcessParms.CommandArgs = process.Args
   767  	}
   768  }
   769  
   770  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   771  	stdin, stdout, stderr, err := newProcess.Stdio()
   772  	if err != nil {
   773  		return nil, err
   774  	}
   775  
   776  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   777  
   778  	// Convert io.ReadClosers to io.Readers
   779  	if stdout != nil {
   780  		dio.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
   781  	}
   782  	if stderr != nil {
   783  		dio.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
   784  	}
   785  	return dio, nil
   786  }
   787  
   788  // Exec adds a process in an running container
   789  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   790  	ctr := c.getContainer(containerID)
   791  	switch {
   792  	case ctr == nil:
   793  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   794  	case ctr.hcsContainer == nil:
   795  		return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
   796  	case ctr.execs != nil && ctr.execs[processID] != nil:
   797  		return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   798  	}
   799  	logger := c.logger.WithFields(logrus.Fields{
   800  		"container": containerID,
   801  		"exec":      processID,
   802  	})
   803  
   804  	// Note we always tell HCS to
   805  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   806  	// docker can always grab the output through logs. We also tell HCS to always
   807  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   808  	// is only created if it we're not -t.
   809  	createProcessParms := &hcsshim.ProcessConfig{
   810  		CreateStdInPipe:  true,
   811  		CreateStdOutPipe: true,
   812  		CreateStdErrPipe: !spec.Terminal,
   813  	}
   814  	if spec.Terminal {
   815  		createProcessParms.EmulateConsole = true
   816  		if spec.ConsoleSize != nil {
   817  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   818  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   819  		}
   820  	}
   821  
   822  	// Take working directory from the process to add if it is defined,
   823  	// otherwise take from the first process.
   824  	if spec.Cwd != "" {
   825  		createProcessParms.WorkingDirectory = spec.Cwd
   826  	} else {
   827  		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
   828  	}
   829  
   830  	// Configure the environment for the process
   831  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   832  
   833  	// Configure the CommandLine/CommandArgs
   834  	setCommandLineAndArgs(ctr.isWindows, spec, createProcessParms)
   835  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   836  
   837  	createProcessParms.User = spec.User.Username
   838  
   839  	// Start the command running in the container.
   840  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   841  	if err != nil {
   842  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   843  		return -1, err
   844  	}
   845  	pid := newProcess.Pid()
   846  	defer func() {
   847  		if err != nil {
   848  			if err := newProcess.Kill(); err != nil {
   849  				logger.WithError(err).Error("failed to kill process")
   850  			}
   851  			go func() {
   852  				if err := newProcess.Wait(); err != nil {
   853  					logger.WithError(err).Error("failed to wait for process")
   854  				}
   855  				if err := newProcess.Close(); err != nil {
   856  					logger.WithError(err).Error("failed to clean process resources")
   857  				}
   858  			}()
   859  		}
   860  	}()
   861  
   862  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   863  	if err != nil {
   864  		logger.WithError(err).Error("failed to get stdio pipes")
   865  		return -1, err
   866  	}
   867  	// Tell the engine to attach streams back to the client
   868  	_, err = attachStdio(dio)
   869  	if err != nil {
   870  		return -1, err
   871  	}
   872  
   873  	p := &process{
   874  		id:         processID,
   875  		pid:        pid,
   876  		hcsProcess: newProcess,
   877  	}
   878  
   879  	// Add the process to the container's list of processes
   880  	ctr.Lock()
   881  	ctr.execs[processID] = p
   882  	ctr.Unlock()
   883  
   884  	// Spin up a go routine waiting for exit to handle cleanup
   885  	go c.reapProcess(ctr, p)
   886  
   887  	c.eventQ.Append(ctr.id, func() {
   888  		ei := libcontainerdtypes.EventInfo{
   889  			ContainerID: ctr.id,
   890  			ProcessID:   p.id,
   891  			Pid:         uint32(p.pid),
   892  		}
   893  		c.logger.WithFields(logrus.Fields{
   894  			"container":  ctr.id,
   895  			"event":      libcontainerdtypes.EventExecAdded,
   896  			"event-info": ei,
   897  		}).Info("sending event")
   898  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
   899  		if err != nil {
   900  			c.logger.WithError(err).WithFields(logrus.Fields{
   901  				"container":  ctr.id,
   902  				"event":      libcontainerdtypes.EventExecAdded,
   903  				"event-info": ei,
   904  			}).Error("failed to process event")
   905  		}
   906  		err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
   907  		if err != nil {
   908  			c.logger.WithError(err).WithFields(logrus.Fields{
   909  				"container":  ctr.id,
   910  				"event":      libcontainerdtypes.EventExecStarted,
   911  				"event-info": ei,
   912  			}).Error("failed to process event")
   913  		}
   914  	})
   915  
   916  	return pid, nil
   917  }
   918  
   919  // Signal handles `docker stop` on Windows. While Linux has support for
   920  // the full range of signals, signals aren't really implemented on Windows.
   921  // We fake supporting regular stop and -9 to force kill.
   922  func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
   923  	ctr, p, err := c.getProcess(containerID, processID)
   924  	if err != nil {
   925  		return err
   926  	}
   927  
   928  	logger := c.logger.WithFields(logrus.Fields{
   929  		"container": containerID,
   930  		"process":   processID,
   931  		"pid":       p.pid,
   932  		"signal":    signal,
   933  	})
   934  	logger.Debug("Signal()")
   935  
   936  	if processID == libcontainerdtypes.InitProcessName {
   937  		if syscall.Signal(signal) == syscall.SIGKILL {
   938  			// Terminate the compute system
   939  			ctr.Lock()
   940  			ctr.terminateInvoked = true
   941  			if err := ctr.hcsContainer.Terminate(); err != nil {
   942  				if !hcsshim.IsPending(err) {
   943  					logger.WithError(err).Error("failed to terminate hccshim container")
   944  				}
   945  			}
   946  			ctr.Unlock()
   947  		} else {
   948  			// Shut down the container
   949  			if err := ctr.hcsContainer.Shutdown(); err != nil {
   950  				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   951  					// ignore errors
   952  					logger.WithError(err).Error("failed to shutdown hccshim container")
   953  				}
   954  			}
   955  		}
   956  	} else {
   957  		return p.hcsProcess.Kill()
   958  	}
   959  
   960  	return nil
   961  }
   962  
   963  // Resize handles a CLI event to resize an interactive docker run or docker
   964  // exec window.
   965  func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
   966  	_, p, err := c.getProcess(containerID, processID)
   967  	if err != nil {
   968  		return err
   969  	}
   970  
   971  	c.logger.WithFields(logrus.Fields{
   972  		"container": containerID,
   973  		"process":   processID,
   974  		"height":    height,
   975  		"width":     width,
   976  		"pid":       p.pid,
   977  	}).Debug("resizing")
   978  	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
   979  }
   980  
   981  func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
   982  	_, p, err := c.getProcess(containerID, processID)
   983  	if err != nil {
   984  		return err
   985  	}
   986  
   987  	return p.hcsProcess.CloseStdin()
   988  }
   989  
   990  // Pause handles pause requests for containers
   991  func (c *client) Pause(_ context.Context, containerID string) error {
   992  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   993  	if err != nil {
   994  		return err
   995  	}
   996  
   997  	if ctr.ociSpec.Windows.HyperV == nil {
   998  		return errors.New("cannot pause Windows Server Containers")
   999  	}
  1000  
  1001  	ctr.Lock()
  1002  	defer ctr.Unlock()
  1003  
  1004  	if err = ctr.hcsContainer.Pause(); err != nil {
  1005  		return err
  1006  	}
  1007  
  1008  	ctr.status = containerd.Paused
  1009  
  1010  	c.eventQ.Append(containerID, func() {
  1011  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
  1012  			ContainerID: containerID,
  1013  			ProcessID:   libcontainerdtypes.InitProcessName,
  1014  		})
  1015  		c.logger.WithFields(logrus.Fields{
  1016  			"container": ctr.id,
  1017  			"event":     libcontainerdtypes.EventPaused,
  1018  		}).Info("sending event")
  1019  		if err != nil {
  1020  			c.logger.WithError(err).WithFields(logrus.Fields{
  1021  				"container": containerID,
  1022  				"event":     libcontainerdtypes.EventPaused,
  1023  			}).Error("failed to process event")
  1024  		}
  1025  	})
  1026  
  1027  	return nil
  1028  }
  1029  
  1030  // Resume handles resume requests for containers
  1031  func (c *client) Resume(_ context.Context, containerID string) error {
  1032  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  1033  	if err != nil {
  1034  		return err
  1035  	}
  1036  
  1037  	if ctr.ociSpec.Windows.HyperV == nil {
  1038  		return errors.New("cannot resume Windows Server Containers")
  1039  	}
  1040  
  1041  	ctr.Lock()
  1042  	defer ctr.Unlock()
  1043  
  1044  	if err = ctr.hcsContainer.Resume(); err != nil {
  1045  		return err
  1046  	}
  1047  
  1048  	ctr.status = containerd.Running
  1049  
  1050  	c.eventQ.Append(containerID, func() {
  1051  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
  1052  			ContainerID: containerID,
  1053  			ProcessID:   libcontainerdtypes.InitProcessName,
  1054  		})
  1055  		c.logger.WithFields(logrus.Fields{
  1056  			"container": ctr.id,
  1057  			"event":     libcontainerdtypes.EventResumed,
  1058  		}).Info("sending event")
  1059  		if err != nil {
  1060  			c.logger.WithError(err).WithFields(logrus.Fields{
  1061  				"container": containerID,
  1062  				"event":     libcontainerdtypes.EventResumed,
  1063  			}).Error("failed to process event")
  1064  		}
  1065  	})
  1066  
  1067  	return nil
  1068  }
  1069  
  1070  // Stats handles stats requests for containers
  1071  func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
  1072  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  1073  	if err != nil {
  1074  		return nil, err
  1075  	}
  1076  
  1077  	readAt := time.Now()
  1078  	s, err := ctr.hcsContainer.Statistics()
  1079  	if err != nil {
  1080  		return nil, err
  1081  	}
  1082  	return &libcontainerdtypes.Stats{
  1083  		Read:     readAt,
  1084  		HCSStats: &s,
  1085  	}, nil
  1086  }
  1087  
  1088  // Restore is the handler for restoring a container
  1089  func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, libcontainerdtypes.Process, error) {
  1090  	c.logger.WithField("container", id).Debug("restore()")
  1091  
  1092  	// TODO Windows: On RS1, a re-attach isn't possible.
  1093  	// However, there is a scenario in which there is an issue.
  1094  	// Consider a background container. The daemon dies unexpectedly.
  1095  	// HCS will still have the compute service alive and running.
  1096  	// For consistence, we call in to shoot it regardless if HCS knows about it
  1097  	// We explicitly just log a warning if the terminate fails.
  1098  	// Then we tell the backend the container exited.
  1099  	if hc, err := hcsshim.OpenContainer(id); err == nil {
  1100  		const terminateTimeout = time.Minute * 2
  1101  		err := hc.Terminate()
  1102  
  1103  		if hcsshim.IsPending(err) {
  1104  			err = hc.WaitTimeout(terminateTimeout)
  1105  		} else if hcsshim.IsAlreadyStopped(err) {
  1106  			err = nil
  1107  		}
  1108  
  1109  		if err != nil {
  1110  			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  1111  			return false, -1, nil, err
  1112  		}
  1113  	}
  1114  	return false, -1, &restoredProcess{
  1115  		c:  c,
  1116  		id: id,
  1117  	}, nil
  1118  }
  1119  
  1120  // GetPidsForContainer returns a list of process IDs running in a container.
  1121  // Not used on Windows.
  1122  func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  1123  	return nil, errors.New("not implemented on Windows")
  1124  }
  1125  
  1126  // Summary returns a summary of the processes running in a container.
  1127  // This is present in Windows to support docker top. In linux, the
  1128  // engine shells out to ps to get process information. On Windows, as
  1129  // the containers could be Hyper-V containers, they would not be
  1130  // visible on the container host. However, libcontainerd does have
  1131  // that information.
  1132  func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
  1133  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
  1134  	if err != nil {
  1135  		return nil, err
  1136  	}
  1137  
  1138  	p, err := ctr.hcsContainer.ProcessList()
  1139  	if err != nil {
  1140  		return nil, err
  1141  	}
  1142  
  1143  	pl := make([]libcontainerdtypes.Summary, len(p))
  1144  	for i := range p {
  1145  		pl[i] = libcontainerdtypes.Summary{
  1146  			ImageName:                    p[i].ImageName,
  1147  			CreatedAt:                    p[i].CreateTimestamp,
  1148  			KernelTime_100Ns:             p[i].KernelTime100ns,
  1149  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
  1150  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
  1151  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
  1152  			ProcessID:                    p[i].ProcessId,
  1153  			UserTime_100Ns:               p[i].UserTime100ns,
  1154  			ExecID:                       "",
  1155  		}
  1156  	}
  1157  	return pl, nil
  1158  }
  1159  
  1160  type restoredProcess struct {
  1161  	id string
  1162  	c  *client
  1163  }
  1164  
  1165  func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
  1166  	return p.c.DeleteTask(ctx, p.id)
  1167  }
  1168  
  1169  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  1170  	ec := -1
  1171  	ctr := c.getContainer(containerID)
  1172  	if ctr == nil {
  1173  		return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1174  	}
  1175  
  1176  	select {
  1177  	case <-ctx.Done():
  1178  		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  1179  	case <-ctr.waitCh:
  1180  	default:
  1181  		return uint32(ec), time.Now(), errors.New("container is not stopped")
  1182  	}
  1183  
  1184  	ctr.Lock()
  1185  	defer ctr.Unlock()
  1186  	return ctr.exitCode, ctr.exitedAt, nil
  1187  }
  1188  
  1189  func (c *client) Delete(_ context.Context, containerID string) error {
  1190  	c.Lock()
  1191  	defer c.Unlock()
  1192  	ctr := c.containers[containerID]
  1193  	if ctr == nil {
  1194  		return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1195  	}
  1196  
  1197  	ctr.Lock()
  1198  	defer ctr.Unlock()
  1199  
  1200  	switch ctr.status {
  1201  	case containerd.Created:
  1202  		if err := c.shutdownContainer(ctr); err != nil {
  1203  			return err
  1204  		}
  1205  		fallthrough
  1206  	case containerd.Stopped:
  1207  		delete(c.containers, containerID)
  1208  		return nil
  1209  	}
  1210  
  1211  	return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
  1212  }
  1213  
  1214  func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
  1215  	c.Lock()
  1216  	defer c.Unlock()
  1217  	ctr := c.containers[containerID]
  1218  	if ctr == nil {
  1219  		return containerd.Unknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1220  	}
  1221  
  1222  	ctr.Lock()
  1223  	defer ctr.Unlock()
  1224  	return ctr.status, nil
  1225  }
  1226  
  1227  func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
  1228  	// Updating resource isn't supported on Windows
  1229  	// but we should return nil for enabling updating container
  1230  	return nil
  1231  }
  1232  
  1233  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1234  	return errors.New("Windows: Containers do not support checkpoints")
  1235  }
  1236  
  1237  func (c *client) getContainer(id string) *container {
  1238  	c.Lock()
  1239  	ctr := c.containers[id]
  1240  	c.Unlock()
  1241  
  1242  	return ctr
  1243  }
  1244  
  1245  func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1246  	ctr := c.getContainer(containerID)
  1247  	switch {
  1248  	case ctr == nil:
  1249  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1250  	case ctr.init == nil:
  1251  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  1252  	case processID == libcontainerdtypes.InitProcessName:
  1253  		return ctr, ctr.init, nil
  1254  	default:
  1255  		ctr.Lock()
  1256  		defer ctr.Unlock()
  1257  		if ctr.execs == nil {
  1258  			return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  1259  		}
  1260  	}
  1261  
  1262  	p := ctr.execs[processID]
  1263  	if p == nil {
  1264  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  1265  	}
  1266  
  1267  	return ctr, p, nil
  1268  }
  1269  
  1270  // ctr mutex must be held when calling this function.
  1271  func (c *client) shutdownContainer(ctr *container) error {
  1272  	var err error
  1273  	const waitTimeout = time.Minute * 5
  1274  
  1275  	if !ctr.terminateInvoked {
  1276  		err = ctr.hcsContainer.Shutdown()
  1277  	}
  1278  
  1279  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1280  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1281  	} else if hcsshim.IsAlreadyStopped(err) {
  1282  		err = nil
  1283  	}
  1284  
  1285  	if err != nil {
  1286  		c.logger.WithError(err).WithField("container", ctr.id).
  1287  			Debug("failed to shutdown container, terminating it")
  1288  		terminateErr := c.terminateContainer(ctr)
  1289  		if terminateErr != nil {
  1290  			c.logger.WithError(terminateErr).WithField("container", ctr.id).
  1291  				Error("failed to shutdown container, and subsequent terminate also failed")
  1292  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1293  		}
  1294  		return err
  1295  	}
  1296  
  1297  	return nil
  1298  }
  1299  
  1300  // ctr mutex must be held when calling this function.
  1301  func (c *client) terminateContainer(ctr *container) error {
  1302  	const terminateTimeout = time.Minute * 5
  1303  	ctr.terminateInvoked = true
  1304  	err := ctr.hcsContainer.Terminate()
  1305  
  1306  	if hcsshim.IsPending(err) {
  1307  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1308  	} else if hcsshim.IsAlreadyStopped(err) {
  1309  		err = nil
  1310  	}
  1311  
  1312  	if err != nil {
  1313  		c.logger.WithError(err).WithField("container", ctr.id).
  1314  			Debug("failed to terminate container")
  1315  		return err
  1316  	}
  1317  
  1318  	return nil
  1319  }
  1320  
  1321  func (c *client) reapProcess(ctr *container, p *process) int {
  1322  	logger := c.logger.WithFields(logrus.Fields{
  1323  		"container": ctr.id,
  1324  		"process":   p.id,
  1325  	})
  1326  
  1327  	var eventErr error
  1328  
  1329  	// Block indefinitely for the process to exit.
  1330  	if err := p.hcsProcess.Wait(); err != nil {
  1331  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1332  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1333  		}
  1334  		// Fall through here, do not return. This ensures we attempt to
  1335  		// continue the shutdown in HCS and tell the docker engine that the
  1336  		// process/container has exited to avoid a container being dropped on
  1337  		// the floor.
  1338  	}
  1339  	exitedAt := time.Now()
  1340  
  1341  	exitCode, err := p.hcsProcess.ExitCode()
  1342  	if err != nil {
  1343  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1344  			logger.WithError(err).Warnf("unable to get exit code for process")
  1345  		}
  1346  		// Since we got an error retrieving the exit code, make sure that the
  1347  		// code we return doesn't incorrectly indicate success.
  1348  		exitCode = -1
  1349  
  1350  		// Fall through here, do not return. This ensures we attempt to
  1351  		// continue the shutdown in HCS and tell the docker engine that the
  1352  		// process/container has exited to avoid a container being dropped on
  1353  		// the floor.
  1354  	}
  1355  
  1356  	if err := p.hcsProcess.Close(); err != nil {
  1357  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1358  		exitCode = -1
  1359  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1360  	}
  1361  
  1362  	if p.id == libcontainerdtypes.InitProcessName {
  1363  		exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  1364  	}
  1365  
  1366  	c.eventQ.Append(ctr.id, func() {
  1367  		ei := libcontainerdtypes.EventInfo{
  1368  			ContainerID: ctr.id,
  1369  			ProcessID:   p.id,
  1370  			Pid:         uint32(p.pid),
  1371  			ExitCode:    uint32(exitCode),
  1372  			ExitedAt:    exitedAt,
  1373  			Error:       eventErr,
  1374  		}
  1375  		c.logger.WithFields(logrus.Fields{
  1376  			"container":  ctr.id,
  1377  			"event":      libcontainerdtypes.EventExit,
  1378  			"event-info": ei,
  1379  		}).Info("sending event")
  1380  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1381  		if err != nil {
  1382  			c.logger.WithError(err).WithFields(logrus.Fields{
  1383  				"container":  ctr.id,
  1384  				"event":      libcontainerdtypes.EventExit,
  1385  				"event-info": ei,
  1386  			}).Error("failed to process event")
  1387  		}
  1388  		if p.id != libcontainerdtypes.InitProcessName {
  1389  			ctr.Lock()
  1390  			delete(ctr.execs, p.id)
  1391  			ctr.Unlock()
  1392  		}
  1393  	})
  1394  
  1395  	return exitCode
  1396  }
  1397  
  1398  // reapContainer shuts down the container and releases associated resources. It returns
  1399  // the error to be logged in the eventInfo sent back to the monitor.
  1400  func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1401  	// Update container status
  1402  	ctr.Lock()
  1403  	ctr.status = containerd.Stopped
  1404  	ctr.exitedAt = exitedAt
  1405  	ctr.exitCode = uint32(exitCode)
  1406  	close(ctr.waitCh)
  1407  
  1408  	if err := c.shutdownContainer(ctr); err != nil {
  1409  		exitCode = -1
  1410  		logger.WithError(err).Warn("failed to shutdown container")
  1411  		thisErr := errors.Wrap(err, "failed to shutdown container")
  1412  		if eventErr != nil {
  1413  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1414  		} else {
  1415  			eventErr = thisErr
  1416  		}
  1417  	} else {
  1418  		logger.Debug("completed container shutdown")
  1419  	}
  1420  	ctr.Unlock()
  1421  
  1422  	if err := ctr.hcsContainer.Close(); err != nil {
  1423  		exitCode = -1
  1424  		logger.WithError(err).Error("failed to clean hcs container resources")
  1425  		thisErr := errors.Wrap(err, "failed to terminate container")
  1426  		if eventErr != nil {
  1427  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1428  		} else {
  1429  			eventErr = thisErr
  1430  		}
  1431  	}
  1432  	return exitCode, eventErr
  1433  }