github.com/ssdev-go/moby@v17.12.1-ce-rc2+incompatible/libcontainerd/client_local_windows.go (about)

     1  package libcontainerd
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"os"
    10  	"path"
    11  	"path/filepath"
    12  	"regexp"
    13  	"strings"
    14  	"sync"
    15  	"syscall"
    16  	"time"
    17  
    18  	"github.com/Microsoft/hcsshim"
    19  	opengcs "github.com/Microsoft/opengcs/client"
    20  	"github.com/containerd/containerd"
    21  	"github.com/docker/docker/pkg/sysinfo"
    22  	"github.com/docker/docker/pkg/system"
    23  	specs "github.com/opencontainers/runtime-spec/specs-go"
    24  	"github.com/pkg/errors"
    25  	"github.com/sirupsen/logrus"
    26  	"golang.org/x/sys/windows"
    27  )
    28  
    29  const InitProcessName = "init"
    30  
    31  type process struct {
    32  	id         string
    33  	pid        int
    34  	hcsProcess hcsshim.Process
    35  }
    36  
    37  type container struct {
    38  	sync.Mutex
    39  
    40  	// The ociSpec is required, as client.Create() needs a spec, but can
    41  	// be called from the RestartManager context which does not otherwise
    42  	// have access to the Spec
    43  	ociSpec *specs.Spec
    44  
    45  	isWindows           bool
    46  	manualStopRequested bool
    47  	hcsContainer        hcsshim.Container
    48  
    49  	id            string
    50  	status        Status
    51  	exitedAt      time.Time
    52  	exitCode      uint32
    53  	waitCh        chan struct{}
    54  	init          *process
    55  	execs         map[string]*process
    56  	updatePending bool
    57  }
    58  
    59  // Win32 error codes that are used for various workarounds
    60  // These really should be ALL_CAPS to match golangs syscall library and standard
    61  // Win32 error conventions, but golint insists on CamelCase.
    62  const (
    63  	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
    64  	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
    65  	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
    66  	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
    67  )
    68  
    69  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    70  // container creator management stacks. We hard code "docker" in the case
    71  // of docker.
    72  const defaultOwner = "docker"
    73  
    74  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    75  	return containerd.Version{}, errors.New("not implemented on Windows")
    76  }
    77  
    78  // Create is the entrypoint to create a container from a spec.
    79  // Table below shows the fields required for HCS JSON calling parameters,
    80  // where if not populated, is omitted.
    81  // +-----------------+--------------------------------------------+---------------------------------------------------+
    82  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
    83  // +-----------------+--------------------------------------------+---------------------------------------------------+
    84  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
    85  // | LayerFolderPath | %root%\windowsfilter\containerID           | %root%\windowsfilter\containerID (servicing only) |
    86  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
    87  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
    88  // +-----------------+--------------------------------------------+---------------------------------------------------+
    89  //
    90  // Isolation=Process example:
    91  //
    92  // {
    93  //	"SystemType": "Container",
    94  //	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
    95  //	"Owner": "docker",
    96  //	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
    97  //	"IgnoreFlushesDuringBoot": true,
    98  //	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
    99  //	"Layers": [{
   100  //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   101  //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   102  //	}],
   103  //	"HostName": "5e0055c814a6",
   104  //	"MappedDirectories": [],
   105  //	"HvPartition": false,
   106  //	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   107  //	"Servicing": false
   108  //}
   109  //
   110  // Isolation=Hyper-V example:
   111  //
   112  //{
   113  //	"SystemType": "Container",
   114  //	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   115  //	"Owner": "docker",
   116  //	"IgnoreFlushesDuringBoot": true,
   117  //	"Layers": [{
   118  //		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   119  //		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   120  //	}],
   121  //	"HostName": "475c2c58933b",
   122  //	"MappedDirectories": [],
   123  //	"HvPartition": true,
   124  //	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   125  //	"DNSSearchList": "a.com,b.com,c.com",
   126  //	"HvRuntime": {
   127  //		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   128  //	},
   129  //	"Servicing": false
   130  //}
   131  func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error {
   132  	if ctr := c.getContainer(id); ctr != nil {
   133  		return errors.WithStack(newConflictError("id already in use"))
   134  	}
   135  
   136  	// spec.Linux must be nil for Windows containers, but spec.Windows
   137  	// will be filled in regardless of container platform.  This is a
   138  	// temporary workaround due to LCOW requiring layer folder paths,
   139  	// which are stored under spec.Windows.
   140  	//
   141  	// TODO: @darrenstahlmsft fix this once the OCI spec is updated to
   142  	// support layer folder paths for LCOW
   143  	if spec.Linux == nil {
   144  		return c.createWindows(id, spec, runtimeOptions)
   145  	}
   146  	return c.createLinux(id, spec, runtimeOptions)
   147  }
   148  
   149  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   150  	logger := c.logger.WithField("container", id)
   151  	configuration := &hcsshim.ContainerConfig{
   152  		SystemType: "Container",
   153  		Name:       id,
   154  		Owner:      defaultOwner,
   155  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   156  		HostName:                spec.Hostname,
   157  		HvPartition:             false,
   158  		Servicing:               spec.Windows.Servicing,
   159  	}
   160  
   161  	if spec.Windows.Resources != nil {
   162  		if spec.Windows.Resources.CPU != nil {
   163  			if spec.Windows.Resources.CPU.Count != nil {
   164  				// This check is being done here rather than in adaptContainerSettings
   165  				// because we don't want to update the HostConfig in case this container
   166  				// is moved to a host with more CPUs than this one.
   167  				cpuCount := *spec.Windows.Resources.CPU.Count
   168  				hostCPUCount := uint64(sysinfo.NumCPU())
   169  				if cpuCount > hostCPUCount {
   170  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   171  					cpuCount = hostCPUCount
   172  				}
   173  				configuration.ProcessorCount = uint32(cpuCount)
   174  			}
   175  			if spec.Windows.Resources.CPU.Shares != nil {
   176  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   177  			}
   178  			if spec.Windows.Resources.CPU.Maximum != nil {
   179  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   180  			}
   181  		}
   182  		if spec.Windows.Resources.Memory != nil {
   183  			if spec.Windows.Resources.Memory.Limit != nil {
   184  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   185  			}
   186  		}
   187  		if spec.Windows.Resources.Storage != nil {
   188  			if spec.Windows.Resources.Storage.Bps != nil {
   189  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   190  			}
   191  			if spec.Windows.Resources.Storage.Iops != nil {
   192  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   193  			}
   194  		}
   195  	}
   196  
   197  	if spec.Windows.HyperV != nil {
   198  		configuration.HvPartition = true
   199  	}
   200  
   201  	if spec.Windows.Network != nil {
   202  		configuration.EndpointList = spec.Windows.Network.EndpointList
   203  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   204  		if spec.Windows.Network.DNSSearchList != nil {
   205  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   206  		}
   207  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   208  	}
   209  
   210  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   211  		configuration.Credentials = cs
   212  	}
   213  
   214  	// We must have least two layers in the spec, the bottom one being a
   215  	// base image, the top one being the RW layer.
   216  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   217  		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   218  	}
   219  
   220  	// Strip off the top-most layer as that's passed in separately to HCS
   221  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   222  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   223  
   224  	if configuration.HvPartition {
   225  		// We don't currently support setting the utility VM image explicitly.
   226  		// TODO @swernli/jhowardmsft circa RS3/4, this may be re-locatable.
   227  		if spec.Windows.HyperV.UtilityVMPath != "" {
   228  			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   229  		}
   230  
   231  		// Find the upper-most utility VM image.
   232  		var uvmImagePath string
   233  		for _, path := range layerFolders {
   234  			fullPath := filepath.Join(path, "UtilityVM")
   235  			_, err := os.Stat(fullPath)
   236  			if err == nil {
   237  				uvmImagePath = fullPath
   238  				break
   239  			}
   240  			if !os.IsNotExist(err) {
   241  				return err
   242  			}
   243  		}
   244  		if uvmImagePath == "" {
   245  			return errors.New("utility VM image could not be found")
   246  		}
   247  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   248  
   249  		if spec.Root.Path != "" {
   250  			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   251  		}
   252  	} else {
   253  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   254  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   255  			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   256  		}
   257  		// HCS API requires the trailing backslash to be removed
   258  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   259  	}
   260  
   261  	if spec.Root.Readonly {
   262  		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   263  	}
   264  
   265  	for _, layerPath := range layerFolders {
   266  		_, filename := filepath.Split(layerPath)
   267  		g, err := hcsshim.NameToGuid(filename)
   268  		if err != nil {
   269  			return err
   270  		}
   271  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   272  			ID:   g.ToString(),
   273  			Path: layerPath,
   274  		})
   275  	}
   276  
   277  	// Add the mounts (volumes, bind mounts etc) to the structure
   278  	var mds []hcsshim.MappedDir
   279  	var mps []hcsshim.MappedPipe
   280  	for _, mount := range spec.Mounts {
   281  		const pipePrefix = `\\.\pipe\`
   282  		if mount.Type != "" {
   283  			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   284  		}
   285  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   286  			mp := hcsshim.MappedPipe{
   287  				HostPath:          mount.Source,
   288  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   289  			}
   290  			mps = append(mps, mp)
   291  		} else {
   292  			md := hcsshim.MappedDir{
   293  				HostPath:      mount.Source,
   294  				ContainerPath: mount.Destination,
   295  				ReadOnly:      false,
   296  			}
   297  			for _, o := range mount.Options {
   298  				if strings.ToLower(o) == "ro" {
   299  					md.ReadOnly = true
   300  				}
   301  			}
   302  			mds = append(mds, md)
   303  		}
   304  	}
   305  	configuration.MappedDirectories = mds
   306  	if len(mps) > 0 && system.GetOSVersion().Build < 16210 { // replace with Win10 RS3 build number at RTM
   307  		return errors.New("named pipe mounts are not supported on this version of Windows")
   308  	}
   309  	configuration.MappedPipes = mps
   310  
   311  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   312  	if err != nil {
   313  		return err
   314  	}
   315  
   316  	// Construct a container object for calling start on it.
   317  	ctr := &container{
   318  		id:           id,
   319  		execs:        make(map[string]*process),
   320  		isWindows:    true,
   321  		ociSpec:      spec,
   322  		hcsContainer: hcsContainer,
   323  		status:       StatusCreated,
   324  		waitCh:       make(chan struct{}),
   325  	}
   326  
   327  	// Start the container. If this is a servicing container, this call
   328  	// will block until the container is done with the servicing
   329  	// execution.
   330  	logger.Debug("starting container")
   331  	if err = hcsContainer.Start(); err != nil {
   332  		c.logger.WithError(err).Error("failed to start container")
   333  		ctr.debugGCS()
   334  		if err := c.terminateContainer(ctr); err != nil {
   335  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   336  		} else {
   337  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   338  		}
   339  		return err
   340  	}
   341  	ctr.debugGCS()
   342  
   343  	c.Lock()
   344  	c.containers[id] = ctr
   345  	c.Unlock()
   346  
   347  	logger.Debug("createWindows() completed successfully")
   348  	return nil
   349  
   350  }
   351  
   352  func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   353  	logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id)
   354  	logger := c.logger.WithField("container", id)
   355  
   356  	if runtimeOptions == nil {
   357  		return fmt.Errorf("lcow option must be supplied to the runtime")
   358  	}
   359  	lcowConfig, ok := runtimeOptions.(*opengcs.Config)
   360  	if !ok {
   361  		return fmt.Errorf("lcow option must be supplied to the runtime")
   362  	}
   363  
   364  	configuration := &hcsshim.ContainerConfig{
   365  		HvPartition:   true,
   366  		Name:          id,
   367  		SystemType:    "container",
   368  		ContainerType: "linux",
   369  		Owner:         defaultOwner,
   370  		TerminateOnLastHandleClosed: true,
   371  	}
   372  
   373  	if lcowConfig.ActualMode == opengcs.ModeActualVhdx {
   374  		configuration.HvRuntime = &hcsshim.HvRuntime{
   375  			ImagePath:          lcowConfig.Vhdx,
   376  			BootSource:         "Vhd",
   377  			WritableBootSource: false,
   378  		}
   379  	} else {
   380  		configuration.HvRuntime = &hcsshim.HvRuntime{
   381  			ImagePath:           lcowConfig.KirdPath,
   382  			LinuxKernelFile:     lcowConfig.KernelFile,
   383  			LinuxInitrdFile:     lcowConfig.InitrdFile,
   384  			LinuxBootParameters: lcowConfig.BootParameters,
   385  		}
   386  	}
   387  
   388  	if spec.Windows == nil {
   389  		return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
   390  	}
   391  
   392  	// We must have least one layer in the spec
   393  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
   394  		return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
   395  	}
   396  
   397  	// Strip off the top-most layer as that's passed in separately to HCS
   398  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   399  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   400  
   401  	for _, layerPath := range layerFolders {
   402  		_, filename := filepath.Split(layerPath)
   403  		g, err := hcsshim.NameToGuid(filename)
   404  		if err != nil {
   405  			return err
   406  		}
   407  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   408  			ID:   g.ToString(),
   409  			Path: filepath.Join(layerPath, "layer.vhd"),
   410  		})
   411  	}
   412  
   413  	if spec.Windows.Network != nil {
   414  		configuration.EndpointList = spec.Windows.Network.EndpointList
   415  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   416  		if spec.Windows.Network.DNSSearchList != nil {
   417  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   418  		}
   419  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   420  	}
   421  
   422  	// Add the mounts (volumes, bind mounts etc) to the structure. We have to do
   423  	// some translation for both the mapped directories passed into HCS and in
   424  	// the spec.
   425  	//
   426  	// For HCS, we only pass in the mounts from the spec which are type "bind".
   427  	// Further, the "ContainerPath" field (which is a little mis-leadingly
   428  	// named when it applies to the utility VM rather than the container in the
   429  	// utility VM) is moved to under /tmp/gcs/<ID>/binds, where this is passed
   430  	// by the caller through a 'uvmpath' option.
   431  	//
   432  	// We do similar translation for the mounts in the spec by stripping out
   433  	// the uvmpath option, and translating the Source path to the location in the
   434  	// utility VM calculated above.
   435  	//
   436  	// From inside the utility VM, you would see a 9p mount such as in the following
   437  	// where a host folder has been mapped to /target. The line with /tmp/gcs/<ID>/binds
   438  	// specifically:
   439  	//
   440  	//	/ # mount
   441  	//	rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934)
   442  	//	proc on /proc type proc (rw,relatime)
   443  	//	sysfs on /sys type sysfs (rw,relatime)
   444  	//	udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755)
   445  	//	tmpfs on /run type tmpfs (rw,relatime)
   446  	//	cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma)
   447  	//	mqueue on /dev/mqueue type mqueue (rw,relatime)
   448  	//	devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000)
   449  	//	/binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6)
   450  	//	/dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
   451  	//	/dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
   452  	//	overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work)
   453  	//
   454  	//  /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l
   455  	//	total 16
   456  	//	drwx------    3 0        0               60 Sep  7 18:54 binds
   457  	//	-rw-r--r--    1 0        0             3345 Sep  7 18:54 config.json
   458  	//	drwxr-xr-x   10 0        0             4096 Sep  6 17:26 layer0
   459  	//	drwxr-xr-x    1 0        0             4096 Sep  7 18:54 rootfs
   460  	//	drwxr-xr-x    5 0        0             4096 Sep  7 18:54 scratch
   461  	//
   462  	//	/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds
   463  	//	total 0
   464  	//	drwxrwxrwt    2 0        0             4096 Sep  7 16:51 target
   465  
   466  	mds := []hcsshim.MappedDir{}
   467  	specMounts := []specs.Mount{}
   468  	for _, mount := range spec.Mounts {
   469  		specMount := mount
   470  		if mount.Type == "bind" {
   471  			// Strip out the uvmpath from the options
   472  			updatedOptions := []string{}
   473  			uvmPath := ""
   474  			readonly := false
   475  			for _, opt := range mount.Options {
   476  				dropOption := false
   477  				elements := strings.SplitN(opt, "=", 2)
   478  				switch elements[0] {
   479  				case "uvmpath":
   480  					uvmPath = elements[1]
   481  					dropOption = true
   482  				case "rw":
   483  				case "ro":
   484  					readonly = true
   485  				case "rbind":
   486  				default:
   487  					return fmt.Errorf("unsupported option %q", opt)
   488  				}
   489  				if !dropOption {
   490  					updatedOptions = append(updatedOptions, opt)
   491  				}
   492  			}
   493  			mount.Options = updatedOptions
   494  			if uvmPath == "" {
   495  				return fmt.Errorf("no uvmpath for bind mount %+v", mount)
   496  			}
   497  			md := hcsshim.MappedDir{
   498  				HostPath:          mount.Source,
   499  				ContainerPath:     path.Join(uvmPath, mount.Destination),
   500  				CreateInUtilityVM: true,
   501  				ReadOnly:          readonly,
   502  			}
   503  			mds = append(mds, md)
   504  			specMount.Source = path.Join(uvmPath, mount.Destination)
   505  		}
   506  		specMounts = append(specMounts, specMount)
   507  	}
   508  	configuration.MappedDirectories = mds
   509  
   510  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   511  	if err != nil {
   512  		return err
   513  	}
   514  
   515  	spec.Mounts = specMounts
   516  
   517  	// Construct a container object for calling start on it.
   518  	ctr := &container{
   519  		id:           id,
   520  		execs:        make(map[string]*process),
   521  		isWindows:    false,
   522  		ociSpec:      spec,
   523  		hcsContainer: hcsContainer,
   524  		status:       StatusCreated,
   525  		waitCh:       make(chan struct{}),
   526  	}
   527  
   528  	// Start the container. If this is a servicing container, this call
   529  	// will block until the container is done with the servicing
   530  	// execution.
   531  	logger.Debug("starting container")
   532  	if err = hcsContainer.Start(); err != nil {
   533  		c.logger.WithError(err).Error("failed to start container")
   534  		ctr.debugGCS()
   535  		if err := c.terminateContainer(ctr); err != nil {
   536  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   537  		} else {
   538  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   539  		}
   540  		return err
   541  	}
   542  	ctr.debugGCS()
   543  
   544  	c.Lock()
   545  	c.containers[id] = ctr
   546  	c.Unlock()
   547  
   548  	c.eventQ.append(id, func() {
   549  		ei := EventInfo{
   550  			ContainerID: id,
   551  		}
   552  		c.logger.WithFields(logrus.Fields{
   553  			"container": ctr.id,
   554  			"event":     EventCreate,
   555  		}).Info("sending event")
   556  		err := c.backend.ProcessEvent(id, EventCreate, ei)
   557  		if err != nil {
   558  			c.logger.WithError(err).WithFields(logrus.Fields{
   559  				"container": id,
   560  				"event":     EventCreate,
   561  			}).Error("failed to process event")
   562  		}
   563  	})
   564  
   565  	logger.Debug("createLinux() completed successfully")
   566  	return nil
   567  }
   568  
   569  func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
   570  	ctr := c.getContainer(id)
   571  	switch {
   572  	case ctr == nil:
   573  		return -1, errors.WithStack(newNotFoundError("no such container"))
   574  	case ctr.init != nil:
   575  		return -1, errors.WithStack(newConflictError("container already started"))
   576  	}
   577  
   578  	logger := c.logger.WithField("container", id)
   579  
   580  	// Note we always tell HCS to create stdout as it's required
   581  	// regardless of '-i' or '-t' options, so that docker can always grab
   582  	// the output through logs. We also tell HCS to always create stdin,
   583  	// even if it's not used - it will be closed shortly. Stderr is only
   584  	// created if it we're not -t.
   585  	var (
   586  		emulateConsole   bool
   587  		createStdErrPipe bool
   588  	)
   589  	if ctr.ociSpec.Process != nil {
   590  		emulateConsole = ctr.ociSpec.Process.Terminal
   591  		createStdErrPipe = !ctr.ociSpec.Process.Terminal && !ctr.ociSpec.Windows.Servicing
   592  	}
   593  
   594  	createProcessParms := &hcsshim.ProcessConfig{
   595  		EmulateConsole:   emulateConsole,
   596  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   597  		CreateStdInPipe:  !ctr.ociSpec.Windows.Servicing,
   598  		CreateStdOutPipe: !ctr.ociSpec.Windows.Servicing,
   599  		CreateStdErrPipe: createStdErrPipe,
   600  	}
   601  
   602  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   603  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   604  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   605  	}
   606  
   607  	// Configure the environment for the process
   608  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   609  	if ctr.isWindows {
   610  		createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ")
   611  	} else {
   612  		createProcessParms.CommandArgs = ctr.ociSpec.Process.Args
   613  	}
   614  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   615  
   616  	// LCOW requires the raw OCI spec passed through HCS and onwards to
   617  	// GCS for the utility VM.
   618  	if !ctr.isWindows {
   619  		ociBuf, err := json.Marshal(ctr.ociSpec)
   620  		if err != nil {
   621  			return -1, err
   622  		}
   623  		ociRaw := json.RawMessage(ociBuf)
   624  		createProcessParms.OCISpecification = &ociRaw
   625  	}
   626  
   627  	ctr.Lock()
   628  	defer ctr.Unlock()
   629  
   630  	// Start the command running in the container.
   631  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   632  	if err != nil {
   633  		logger.WithError(err).Error("CreateProcess() failed")
   634  		return -1, err
   635  	}
   636  	defer func() {
   637  		if err != nil {
   638  			if err := newProcess.Kill(); err != nil {
   639  				logger.WithError(err).Error("failed to kill process")
   640  			}
   641  			go func() {
   642  				if err := newProcess.Wait(); err != nil {
   643  					logger.WithError(err).Error("failed to wait for process")
   644  				}
   645  				if err := newProcess.Close(); err != nil {
   646  					logger.WithError(err).Error("failed to clean process resources")
   647  				}
   648  			}()
   649  		}
   650  	}()
   651  	p := &process{
   652  		hcsProcess: newProcess,
   653  		id:         InitProcessName,
   654  		pid:        newProcess.Pid(),
   655  	}
   656  	logger.WithField("pid", p.pid).Debug("init process started")
   657  
   658  	// If this is a servicing container, wait on the process synchronously here and
   659  	// if it succeeds, wait for it cleanly shutdown and merge into the parent container.
   660  	if ctr.ociSpec.Windows.Servicing {
   661  		// reapProcess takes the lock
   662  		ctr.Unlock()
   663  		defer ctr.Lock()
   664  		exitCode := c.reapProcess(ctr, p)
   665  
   666  		if exitCode != 0 {
   667  			return -1, errors.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.id, exitCode)
   668  		}
   669  
   670  		return p.pid, nil
   671  	}
   672  
   673  	var (
   674  		stdout, stderr io.ReadCloser
   675  		stdin          io.WriteCloser
   676  	)
   677  	stdin, stdout, stderr, err = newProcess.Stdio()
   678  	if err != nil {
   679  		logger.WithError(err).Error("failed to get stdio pipes")
   680  		return -1, err
   681  	}
   682  
   683  	iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal}
   684  	iopipe.Stdin = createStdInCloser(stdin, newProcess)
   685  
   686  	// Convert io.ReadClosers to io.Readers
   687  	if stdout != nil {
   688  		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
   689  	}
   690  	if stderr != nil {
   691  		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
   692  	}
   693  
   694  	_, err = attachStdio(iopipe)
   695  	if err != nil {
   696  		logger.WithError(err).Error("failed to attache stdio")
   697  		return -1, err
   698  	}
   699  	ctr.status = StatusRunning
   700  	ctr.init = p
   701  
   702  	// Spin up a go routine waiting for exit to handle cleanup
   703  	go c.reapProcess(ctr, p)
   704  
   705  	// Generate the associated event
   706  	c.eventQ.append(id, func() {
   707  		ei := EventInfo{
   708  			ContainerID: id,
   709  			ProcessID:   InitProcessName,
   710  			Pid:         uint32(p.pid),
   711  		}
   712  		c.logger.WithFields(logrus.Fields{
   713  			"container":  ctr.id,
   714  			"event":      EventStart,
   715  			"event-info": ei,
   716  		}).Info("sending event")
   717  		err := c.backend.ProcessEvent(ei.ContainerID, EventStart, ei)
   718  		if err != nil {
   719  			c.logger.WithError(err).WithFields(logrus.Fields{
   720  				"container":  id,
   721  				"event":      EventStart,
   722  				"event-info": ei,
   723  			}).Error("failed to process event")
   724  		}
   725  	})
   726  	logger.Debug("start() completed")
   727  	return p.pid, nil
   728  }
   729  
   730  // Exec adds a process in an running container
   731  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
   732  	ctr := c.getContainer(containerID)
   733  	switch {
   734  	case ctr == nil:
   735  		return -1, errors.WithStack(newNotFoundError("no such container"))
   736  	case ctr.hcsContainer == nil:
   737  		return -1, errors.WithStack(newInvalidParameterError("container is not running"))
   738  	case ctr.execs != nil && ctr.execs[processID] != nil:
   739  		return -1, errors.WithStack(newConflictError("id already in use"))
   740  	}
   741  	logger := c.logger.WithFields(logrus.Fields{
   742  		"container": containerID,
   743  		"exec":      processID,
   744  	})
   745  
   746  	// Note we always tell HCS to
   747  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   748  	// docker can always grab the output through logs. We also tell HCS to always
   749  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   750  	// is only created if it we're not -t.
   751  	createProcessParms := hcsshim.ProcessConfig{
   752  		CreateStdInPipe:  true,
   753  		CreateStdOutPipe: true,
   754  		CreateStdErrPipe: !spec.Terminal,
   755  	}
   756  	if spec.Terminal {
   757  		createProcessParms.EmulateConsole = true
   758  		if spec.ConsoleSize != nil {
   759  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   760  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   761  		}
   762  	}
   763  
   764  	// Take working directory from the process to add if it is defined,
   765  	// otherwise take from the first process.
   766  	if spec.Cwd != "" {
   767  		createProcessParms.WorkingDirectory = spec.Cwd
   768  	} else {
   769  		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
   770  	}
   771  
   772  	// Configure the environment for the process
   773  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   774  	if ctr.isWindows {
   775  		createProcessParms.CommandLine = strings.Join(spec.Args, " ")
   776  	} else {
   777  		createProcessParms.CommandArgs = spec.Args
   778  	}
   779  	createProcessParms.User = spec.User.Username
   780  
   781  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   782  
   783  	// Start the command running in the container.
   784  	var (
   785  		stdout, stderr io.ReadCloser
   786  		stdin          io.WriteCloser
   787  	)
   788  	newProcess, err := ctr.hcsContainer.CreateProcess(&createProcessParms)
   789  	if err != nil {
   790  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   791  		return -1, err
   792  	}
   793  	pid := newProcess.Pid()
   794  	defer func() {
   795  		if err != nil {
   796  			if err := newProcess.Kill(); err != nil {
   797  				logger.WithError(err).Error("failed to kill process")
   798  			}
   799  			go func() {
   800  				if err := newProcess.Wait(); err != nil {
   801  					logger.WithError(err).Error("failed to wait for process")
   802  				}
   803  				if err := newProcess.Close(); err != nil {
   804  					logger.WithError(err).Error("failed to clean process resources")
   805  				}
   806  			}()
   807  		}
   808  	}()
   809  
   810  	stdin, stdout, stderr, err = newProcess.Stdio()
   811  	if err != nil {
   812  		logger.WithError(err).Error("getting std pipes failed")
   813  		return -1, err
   814  	}
   815  
   816  	iopipe := &IOPipe{Terminal: spec.Terminal}
   817  	iopipe.Stdin = createStdInCloser(stdin, newProcess)
   818  
   819  	// Convert io.ReadClosers to io.Readers
   820  	if stdout != nil {
   821  		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
   822  	}
   823  	if stderr != nil {
   824  		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
   825  	}
   826  
   827  	// Tell the engine to attach streams back to the client
   828  	_, err = attachStdio(iopipe)
   829  	if err != nil {
   830  		return -1, err
   831  	}
   832  
   833  	p := &process{
   834  		id:         processID,
   835  		pid:        pid,
   836  		hcsProcess: newProcess,
   837  	}
   838  
   839  	// Add the process to the container's list of processes
   840  	ctr.Lock()
   841  	ctr.execs[processID] = p
   842  	ctr.Unlock()
   843  
   844  	// Spin up a go routine waiting for exit to handle cleanup
   845  	go c.reapProcess(ctr, p)
   846  
   847  	c.eventQ.append(ctr.id, func() {
   848  		ei := EventInfo{
   849  			ContainerID: ctr.id,
   850  			ProcessID:   p.id,
   851  			Pid:         uint32(p.pid),
   852  		}
   853  		c.logger.WithFields(logrus.Fields{
   854  			"container":  ctr.id,
   855  			"event":      EventExecAdded,
   856  			"event-info": ei,
   857  		}).Info("sending event")
   858  		err := c.backend.ProcessEvent(ctr.id, EventExecAdded, ei)
   859  		if err != nil {
   860  			c.logger.WithError(err).WithFields(logrus.Fields{
   861  				"container":  ctr.id,
   862  				"event":      EventExecAdded,
   863  				"event-info": ei,
   864  			}).Error("failed to process event")
   865  		}
   866  		err = c.backend.ProcessEvent(ctr.id, EventExecStarted, ei)
   867  		if err != nil {
   868  			c.logger.WithError(err).WithFields(logrus.Fields{
   869  				"container":  ctr.id,
   870  				"event":      EventExecStarted,
   871  				"event-info": ei,
   872  			}).Error("failed to process event")
   873  		}
   874  	})
   875  
   876  	return pid, nil
   877  }
   878  
   879  // Signal handles `docker stop` on Windows. While Linux has support for
   880  // the full range of signals, signals aren't really implemented on Windows.
   881  // We fake supporting regular stop and -9 to force kill.
   882  func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
   883  	ctr, p, err := c.getProcess(containerID, processID)
   884  	if err != nil {
   885  		return err
   886  	}
   887  
   888  	ctr.manualStopRequested = true
   889  
   890  	logger := c.logger.WithFields(logrus.Fields{
   891  		"container": containerID,
   892  		"process":   processID,
   893  		"pid":       p.pid,
   894  		"signal":    signal,
   895  	})
   896  	logger.Debug("Signal()")
   897  
   898  	if processID == InitProcessName {
   899  		if syscall.Signal(signal) == syscall.SIGKILL {
   900  			// Terminate the compute system
   901  			if err := ctr.hcsContainer.Terminate(); err != nil {
   902  				if !hcsshim.IsPending(err) {
   903  					logger.WithError(err).Error("failed to terminate hccshim container")
   904  				}
   905  			}
   906  		} else {
   907  			// Shut down the container
   908  			if err := ctr.hcsContainer.Shutdown(); err != nil {
   909  				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   910  					// ignore errors
   911  					logger.WithError(err).Error("failed to shutdown hccshim container")
   912  				}
   913  			}
   914  		}
   915  	} else {
   916  		return p.hcsProcess.Kill()
   917  	}
   918  
   919  	return nil
   920  }
   921  
   922  // Resize handles a CLI event to resize an interactive docker run or docker
   923  // exec window.
   924  func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
   925  	_, p, err := c.getProcess(containerID, processID)
   926  	if err != nil {
   927  		return err
   928  	}
   929  
   930  	c.logger.WithFields(logrus.Fields{
   931  		"container": containerID,
   932  		"process":   processID,
   933  		"height":    height,
   934  		"width":     width,
   935  		"pid":       p.pid,
   936  	}).Debug("resizing")
   937  	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
   938  }
   939  
   940  func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
   941  	_, p, err := c.getProcess(containerID, processID)
   942  	if err != nil {
   943  		return err
   944  	}
   945  
   946  	return p.hcsProcess.CloseStdin()
   947  }
   948  
   949  // Pause handles pause requests for containers
   950  func (c *client) Pause(_ context.Context, containerID string) error {
   951  	ctr, _, err := c.getProcess(containerID, InitProcessName)
   952  	if err != nil {
   953  		return err
   954  	}
   955  
   956  	if ctr.ociSpec.Windows.HyperV == nil {
   957  		return errors.New("cannot pause Windows Server Containers")
   958  	}
   959  
   960  	ctr.Lock()
   961  	defer ctr.Unlock()
   962  
   963  	if err = ctr.hcsContainer.Pause(); err != nil {
   964  		return err
   965  	}
   966  
   967  	ctr.status = StatusPaused
   968  
   969  	c.eventQ.append(containerID, func() {
   970  		err := c.backend.ProcessEvent(containerID, EventPaused, EventInfo{
   971  			ContainerID: containerID,
   972  			ProcessID:   InitProcessName,
   973  		})
   974  		c.logger.WithFields(logrus.Fields{
   975  			"container": ctr.id,
   976  			"event":     EventPaused,
   977  		}).Info("sending event")
   978  		if err != nil {
   979  			c.logger.WithError(err).WithFields(logrus.Fields{
   980  				"container": containerID,
   981  				"event":     EventPaused,
   982  			}).Error("failed to process event")
   983  		}
   984  	})
   985  
   986  	return nil
   987  }
   988  
   989  // Resume handles resume requests for containers
   990  func (c *client) Resume(_ context.Context, containerID string) error {
   991  	ctr, _, err := c.getProcess(containerID, InitProcessName)
   992  	if err != nil {
   993  		return err
   994  	}
   995  
   996  	if ctr.ociSpec.Windows.HyperV == nil {
   997  		return errors.New("cannot resume Windows Server Containers")
   998  	}
   999  
  1000  	ctr.Lock()
  1001  	defer ctr.Unlock()
  1002  
  1003  	if err = ctr.hcsContainer.Resume(); err != nil {
  1004  		return err
  1005  	}
  1006  
  1007  	ctr.status = StatusRunning
  1008  
  1009  	c.eventQ.append(containerID, func() {
  1010  		err := c.backend.ProcessEvent(containerID, EventResumed, EventInfo{
  1011  			ContainerID: containerID,
  1012  			ProcessID:   InitProcessName,
  1013  		})
  1014  		c.logger.WithFields(logrus.Fields{
  1015  			"container": ctr.id,
  1016  			"event":     EventResumed,
  1017  		}).Info("sending event")
  1018  		if err != nil {
  1019  			c.logger.WithError(err).WithFields(logrus.Fields{
  1020  				"container": containerID,
  1021  				"event":     EventResumed,
  1022  			}).Error("failed to process event")
  1023  		}
  1024  	})
  1025  
  1026  	return nil
  1027  }
  1028  
  1029  // Stats handles stats requests for containers
  1030  func (c *client) Stats(_ context.Context, containerID string) (*Stats, error) {
  1031  	ctr, _, err := c.getProcess(containerID, InitProcessName)
  1032  	if err != nil {
  1033  		return nil, err
  1034  	}
  1035  
  1036  	readAt := time.Now()
  1037  	s, err := ctr.hcsContainer.Statistics()
  1038  	if err != nil {
  1039  		return nil, err
  1040  	}
  1041  	return &Stats{
  1042  		Read:     readAt,
  1043  		HCSStats: &s,
  1044  	}, nil
  1045  }
  1046  
  1047  // Restore is the handler for restoring a container
  1048  func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (bool, int, error) {
  1049  	c.logger.WithField("container", id).Debug("restore()")
  1050  
  1051  	// TODO Windows: On RS1, a re-attach isn't possible.
  1052  	// However, there is a scenario in which there is an issue.
  1053  	// Consider a background container. The daemon dies unexpectedly.
  1054  	// HCS will still have the compute service alive and running.
  1055  	// For consistence, we call in to shoot it regardless if HCS knows about it
  1056  	// We explicitly just log a warning if the terminate fails.
  1057  	// Then we tell the backend the container exited.
  1058  	if hc, err := hcsshim.OpenContainer(id); err == nil {
  1059  		const terminateTimeout = time.Minute * 2
  1060  		err := hc.Terminate()
  1061  
  1062  		if hcsshim.IsPending(err) {
  1063  			err = hc.WaitTimeout(terminateTimeout)
  1064  		} else if hcsshim.IsAlreadyStopped(err) {
  1065  			err = nil
  1066  		}
  1067  
  1068  		if err != nil {
  1069  			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
  1070  			return false, -1, err
  1071  		}
  1072  	}
  1073  	return false, -1, nil
  1074  }
  1075  
  1076  // GetPidsForContainer returns a list of process IDs running in a container.
  1077  // Not used on Windows.
  1078  func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
  1079  	return nil, errors.New("not implemented on Windows")
  1080  }
  1081  
  1082  // Summary returns a summary of the processes running in a container.
  1083  // This is present in Windows to support docker top. In linux, the
  1084  // engine shells out to ps to get process information. On Windows, as
  1085  // the containers could be Hyper-V containers, they would not be
  1086  // visible on the container host. However, libcontainerd does have
  1087  // that information.
  1088  func (c *client) Summary(_ context.Context, containerID string) ([]Summary, error) {
  1089  	ctr, _, err := c.getProcess(containerID, InitProcessName)
  1090  	if err != nil {
  1091  		return nil, err
  1092  	}
  1093  
  1094  	p, err := ctr.hcsContainer.ProcessList()
  1095  	if err != nil {
  1096  		return nil, err
  1097  	}
  1098  
  1099  	pl := make([]Summary, len(p))
  1100  	for i := range p {
  1101  		pl[i] = Summary(p[i])
  1102  	}
  1103  	return pl, nil
  1104  }
  1105  
  1106  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
  1107  	ec := -1
  1108  	ctr := c.getContainer(containerID)
  1109  	if ctr == nil {
  1110  		return uint32(ec), time.Now(), errors.WithStack(newNotFoundError("no such container"))
  1111  	}
  1112  
  1113  	select {
  1114  	case <-ctx.Done():
  1115  		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
  1116  	case <-ctr.waitCh:
  1117  	default:
  1118  		return uint32(ec), time.Now(), errors.New("container is not stopped")
  1119  	}
  1120  
  1121  	ctr.Lock()
  1122  	defer ctr.Unlock()
  1123  	return ctr.exitCode, ctr.exitedAt, nil
  1124  }
  1125  
  1126  func (c *client) Delete(_ context.Context, containerID string) error {
  1127  	c.Lock()
  1128  	defer c.Unlock()
  1129  	ctr := c.containers[containerID]
  1130  	if ctr == nil {
  1131  		return errors.WithStack(newNotFoundError("no such container"))
  1132  	}
  1133  
  1134  	ctr.Lock()
  1135  	defer ctr.Unlock()
  1136  
  1137  	switch ctr.status {
  1138  	case StatusCreated:
  1139  		if err := c.shutdownContainer(ctr); err != nil {
  1140  			return err
  1141  		}
  1142  		fallthrough
  1143  	case StatusStopped:
  1144  		delete(c.containers, containerID)
  1145  		return nil
  1146  	}
  1147  
  1148  	return errors.WithStack(newInvalidParameterError("container is not stopped"))
  1149  }
  1150  
  1151  func (c *client) Status(ctx context.Context, containerID string) (Status, error) {
  1152  	c.Lock()
  1153  	defer c.Unlock()
  1154  	ctr := c.containers[containerID]
  1155  	if ctr == nil {
  1156  		return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
  1157  	}
  1158  
  1159  	ctr.Lock()
  1160  	defer ctr.Unlock()
  1161  	return ctr.status, nil
  1162  }
  1163  
  1164  func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error {
  1165  	// Updating resource isn't supported on Windows
  1166  	// but we should return nil for enabling updating container
  1167  	return nil
  1168  }
  1169  
  1170  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1171  	return errors.New("Windows: Containers do not support checkpoints")
  1172  }
  1173  
  1174  func (c *client) getContainer(id string) *container {
  1175  	c.Lock()
  1176  	ctr := c.containers[id]
  1177  	c.Unlock()
  1178  
  1179  	return ctr
  1180  }
  1181  
  1182  func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1183  	ctr := c.getContainer(containerID)
  1184  	switch {
  1185  	case ctr == nil:
  1186  		return nil, nil, errors.WithStack(newNotFoundError("no such container"))
  1187  	case ctr.init == nil:
  1188  		return nil, nil, errors.WithStack(newNotFoundError("container is not running"))
  1189  	case processID == InitProcessName:
  1190  		return ctr, ctr.init, nil
  1191  	default:
  1192  		ctr.Lock()
  1193  		defer ctr.Unlock()
  1194  		if ctr.execs == nil {
  1195  			return nil, nil, errors.WithStack(newNotFoundError("no execs"))
  1196  		}
  1197  	}
  1198  
  1199  	p := ctr.execs[processID]
  1200  	if p == nil {
  1201  		return nil, nil, errors.WithStack(newNotFoundError("no such exec"))
  1202  	}
  1203  
  1204  	return ctr, p, nil
  1205  }
  1206  
  1207  func (c *client) shutdownContainer(ctr *container) error {
  1208  	const shutdownTimeout = time.Minute * 5
  1209  	err := ctr.hcsContainer.Shutdown()
  1210  
  1211  	if hcsshim.IsPending(err) {
  1212  		err = ctr.hcsContainer.WaitTimeout(shutdownTimeout)
  1213  	} else if hcsshim.IsAlreadyStopped(err) {
  1214  		err = nil
  1215  	}
  1216  
  1217  	if err != nil {
  1218  		c.logger.WithError(err).WithField("container", ctr.id).
  1219  			Debug("failed to shutdown container, terminating it")
  1220  		return c.terminateContainer(ctr)
  1221  	}
  1222  
  1223  	return nil
  1224  }
  1225  
  1226  func (c *client) terminateContainer(ctr *container) error {
  1227  	const terminateTimeout = time.Minute * 5
  1228  	err := ctr.hcsContainer.Terminate()
  1229  
  1230  	if hcsshim.IsPending(err) {
  1231  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1232  	} else if hcsshim.IsAlreadyStopped(err) {
  1233  		err = nil
  1234  	}
  1235  
  1236  	if err != nil {
  1237  		c.logger.WithError(err).WithField("container", ctr.id).
  1238  			Debug("failed to terminate container")
  1239  		return err
  1240  	}
  1241  
  1242  	return nil
  1243  }
  1244  
  1245  func (c *client) reapProcess(ctr *container, p *process) int {
  1246  	logger := c.logger.WithFields(logrus.Fields{
  1247  		"container": ctr.id,
  1248  		"process":   p.id,
  1249  	})
  1250  
  1251  	// Block indefinitely for the process to exit.
  1252  	if err := p.hcsProcess.Wait(); err != nil {
  1253  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1254  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1255  		}
  1256  		// Fall through here, do not return. This ensures we attempt to
  1257  		// continue the shutdown in HCS and tell the docker engine that the
  1258  		// process/container has exited to avoid a container being dropped on
  1259  		// the floor.
  1260  	}
  1261  	exitedAt := time.Now()
  1262  
  1263  	exitCode, err := p.hcsProcess.ExitCode()
  1264  	if err != nil {
  1265  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1266  			logger.WithError(err).Warnf("unable to get exit code for process")
  1267  		}
  1268  		// Since we got an error retrieving the exit code, make sure that the
  1269  		// code we return doesn't incorrectly indicate success.
  1270  		exitCode = -1
  1271  
  1272  		// Fall through here, do not return. This ensures we attempt to
  1273  		// continue the shutdown in HCS and tell the docker engine that the
  1274  		// process/container has exited to avoid a container being dropped on
  1275  		// the floor.
  1276  	}
  1277  
  1278  	if err := p.hcsProcess.Close(); err != nil {
  1279  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1280  	}
  1281  
  1282  	var pendingUpdates bool
  1283  	if p.id == InitProcessName {
  1284  		// Update container status
  1285  		ctr.Lock()
  1286  		ctr.status = StatusStopped
  1287  		ctr.exitedAt = exitedAt
  1288  		ctr.exitCode = uint32(exitCode)
  1289  		close(ctr.waitCh)
  1290  		ctr.Unlock()
  1291  
  1292  		// Handle any servicing
  1293  		if exitCode == 0 && ctr.isWindows && !ctr.ociSpec.Windows.Servicing {
  1294  			pendingUpdates, err = ctr.hcsContainer.HasPendingUpdates()
  1295  			logger.Infof("Pending updates: %v", pendingUpdates)
  1296  			if err != nil {
  1297  				logger.WithError(err).
  1298  					Warnf("failed to check for pending updates (container may have been killed)")
  1299  			}
  1300  		}
  1301  
  1302  		if err := c.shutdownContainer(ctr); err != nil {
  1303  			logger.WithError(err).Warn("failed to shutdown container")
  1304  		} else {
  1305  			logger.Debug("completed container shutdown")
  1306  		}
  1307  
  1308  		if err := ctr.hcsContainer.Close(); err != nil {
  1309  			logger.WithError(err).Error("failed to clean hcs container resources")
  1310  		}
  1311  	}
  1312  
  1313  	if !(ctr.isWindows && ctr.ociSpec.Windows.Servicing) {
  1314  		c.eventQ.append(ctr.id, func() {
  1315  			ei := EventInfo{
  1316  				ContainerID:   ctr.id,
  1317  				ProcessID:     p.id,
  1318  				Pid:           uint32(p.pid),
  1319  				ExitCode:      uint32(exitCode),
  1320  				ExitedAt:      exitedAt,
  1321  				UpdatePending: pendingUpdates,
  1322  			}
  1323  			c.logger.WithFields(logrus.Fields{
  1324  				"container":  ctr.id,
  1325  				"event":      EventExit,
  1326  				"event-info": ei,
  1327  			}).Info("sending event")
  1328  			err := c.backend.ProcessEvent(ctr.id, EventExit, ei)
  1329  			if err != nil {
  1330  				c.logger.WithError(err).WithFields(logrus.Fields{
  1331  					"container":  ctr.id,
  1332  					"event":      EventExit,
  1333  					"event-info": ei,
  1334  				}).Error("failed to process event")
  1335  			}
  1336  			if p.id != InitProcessName {
  1337  				ctr.Lock()
  1338  				delete(ctr.execs, p.id)
  1339  				ctr.Unlock()
  1340  			}
  1341  		})
  1342  	}
  1343  
  1344  	return exitCode
  1345  }