github.com/rawahars/moby@v24.0.4+incompatible/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/docker/docker/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"os"
    11  	"path/filepath"
    12  	"regexp"
    13  	"strings"
    14  	"sync"
    15  	"syscall"
    16  	"time"
    17  
    18  	"github.com/Microsoft/hcsshim"
    19  	"github.com/containerd/containerd"
    20  	"github.com/containerd/containerd/cio"
    21  	cerrdefs "github.com/containerd/containerd/errdefs"
    22  	"github.com/docker/docker/errdefs"
    23  	"github.com/docker/docker/libcontainerd/queue"
    24  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    25  	"github.com/docker/docker/pkg/sysinfo"
    26  	"github.com/docker/docker/pkg/system"
    27  	specs "github.com/opencontainers/runtime-spec/specs-go"
    28  	"github.com/pkg/errors"
    29  	"github.com/sirupsen/logrus"
    30  	"golang.org/x/sys/windows"
    31  )
    32  
    33  type process struct {
    34  	// mu guards the mutable fields of this struct.
    35  	//
    36  	// Always lock mu before ctr's mutex to prevent deadlocks.
    37  	mu         sync.Mutex
    38  	id         string                 // Invariants: immutable
    39  	ctr        *container             // Invariants: immutable, ctr != nil
    40  	hcsProcess hcsshim.Process        // Is set to nil on process exit
    41  	exited     *containerd.ExitStatus // Valid iff waitCh is closed
    42  	waitCh     chan struct{}
    43  }
    44  
    45  type task struct {
    46  	process
    47  }
    48  
    49  type container struct {
    50  	mu sync.Mutex
    51  
    52  	// The ociSpec is required, as client.Create() needs a spec, but can
    53  	// be called from the RestartManager context which does not otherwise
    54  	// have access to the Spec
    55  	//
    56  	// A container value with ociSpec == nil represents a container which
    57  	// has been loaded with (*client).LoadContainer, and is ineligible to
    58  	// be Start()ed.
    59  	ociSpec *specs.Spec
    60  
    61  	hcsContainer hcsshim.Container // Is set to nil on container delete
    62  	isPaused     bool
    63  
    64  	client           *client
    65  	id               string
    66  	terminateInvoked bool
    67  
    68  	// task is a reference to the current task for the container. As a
    69  	// corollary, when task == nil the container has no current task: the
    70  	// container was never Start()ed or the task was Delete()d.
    71  	task *task
    72  }
    73  
    74  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    75  // container creator management stacks. We hard code "docker" in the case
    76  // of docker.
    77  const defaultOwner = "docker"
    78  
    79  type client struct {
    80  	stateDir string
    81  	backend  libcontainerdtypes.Backend
    82  	logger   *logrus.Entry
    83  	eventQ   queue.Queue
    84  }
    85  
    86  // NewClient creates a new local executor for windows
    87  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    88  	c := &client{
    89  		stateDir: stateDir,
    90  		backend:  b,
    91  		logger:   logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
    92  	}
    93  
    94  	return c, nil
    95  }
    96  
    97  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    98  	return containerd.Version{}, errors.New("not implemented on Windows")
    99  }
   100  
   101  // NewContainer is the entrypoint to create a container from a spec.
   102  // Table below shows the fields required for HCS JSON calling parameters,
   103  // where if not populated, is omitted.
   104  // +-----------------+--------------------------------------------+---------------------------------------------------+
   105  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
   106  // +-----------------+--------------------------------------------+---------------------------------------------------+
   107  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
   108  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
   109  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   110  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   111  // +-----------------+--------------------------------------------+---------------------------------------------------+
   112  //
   113  // Isolation=Process example:
   114  //
   115  //	{
   116  //		"SystemType": "Container",
   117  //		"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   118  //		"Owner": "docker",
   119  //		"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   120  //		"IgnoreFlushesDuringBoot": true,
   121  //		"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   122  //		"Layers": [{
   123  //			"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   124  //			"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   125  //		}],
   126  //		"HostName": "5e0055c814a6",
   127  //		"MappedDirectories": [],
   128  //		"HvPartition": false,
   129  //		"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   130  //	}
   131  //
   132  // Isolation=Hyper-V example:
   133  //
   134  //	{
   135  //		"SystemType": "Container",
   136  //		"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   137  //		"Owner": "docker",
   138  //		"IgnoreFlushesDuringBoot": true,
   139  //		"Layers": [{
   140  //			"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   141  //			"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   142  //		}],
   143  //		"HostName": "475c2c58933b",
   144  //		"MappedDirectories": [],
   145  //		"HvPartition": true,
   146  //		"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   147  //		"DNSSearchList": "a.com,b.com,c.com",
   148  //		"HvRuntime": {
   149  //			"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   150  //		},
   151  //	}
   152  func (c *client) NewContainer(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
   153  	var err error
   154  	if spec.Linux != nil {
   155  		return nil, errors.New("linux containers are not supported on this platform")
   156  	}
   157  	ctr, err := c.createWindows(id, spec, runtimeOptions)
   158  
   159  	if err == nil {
   160  		c.eventQ.Append(id, func() {
   161  			ei := libcontainerdtypes.EventInfo{
   162  				ContainerID: id,
   163  			}
   164  			c.logger.WithFields(logrus.Fields{
   165  				"container": id,
   166  				"event":     libcontainerdtypes.EventCreate,
   167  			}).Info("sending event")
   168  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   169  			if err != nil {
   170  				c.logger.WithError(err).WithFields(logrus.Fields{
   171  					"container": id,
   172  					"event":     libcontainerdtypes.EventCreate,
   173  				}).Error("failed to process event")
   174  			}
   175  		})
   176  	}
   177  	return ctr, err
   178  }
   179  
   180  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) (*container, error) {
   181  	logger := c.logger.WithField("container", id)
   182  	configuration := &hcsshim.ContainerConfig{
   183  		SystemType:              "Container",
   184  		Name:                    id,
   185  		Owner:                   defaultOwner,
   186  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   187  		HostName:                spec.Hostname,
   188  		HvPartition:             false,
   189  	}
   190  
   191  	c.extractResourcesFromSpec(spec, configuration)
   192  
   193  	if spec.Windows.Resources != nil {
   194  		if spec.Windows.Resources.Storage != nil {
   195  			if spec.Windows.Resources.Storage.Bps != nil {
   196  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   197  			}
   198  			if spec.Windows.Resources.Storage.Iops != nil {
   199  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   200  			}
   201  		}
   202  	}
   203  
   204  	if spec.Windows.HyperV != nil {
   205  		configuration.HvPartition = true
   206  	}
   207  
   208  	if spec.Windows.Network != nil {
   209  		configuration.EndpointList = spec.Windows.Network.EndpointList
   210  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   211  		if spec.Windows.Network.DNSSearchList != nil {
   212  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   213  		}
   214  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   215  	}
   216  
   217  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   218  		configuration.Credentials = cs
   219  	}
   220  
   221  	// We must have least two layers in the spec, the bottom one being a
   222  	// base image, the top one being the RW layer.
   223  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   224  		return nil, fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   225  	}
   226  
   227  	// Strip off the top-most layer as that's passed in separately to HCS
   228  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   229  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   230  
   231  	if configuration.HvPartition {
   232  		// We don't currently support setting the utility VM image explicitly.
   233  		// TODO circa RS5, this may be re-locatable.
   234  		if spec.Windows.HyperV.UtilityVMPath != "" {
   235  			return nil, errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   236  		}
   237  
   238  		// Find the upper-most utility VM image.
   239  		var uvmImagePath string
   240  		for _, path := range layerFolders {
   241  			fullPath := filepath.Join(path, "UtilityVM")
   242  			_, err := os.Stat(fullPath)
   243  			if err == nil {
   244  				uvmImagePath = fullPath
   245  				break
   246  			}
   247  			if !os.IsNotExist(err) {
   248  				return nil, err
   249  			}
   250  		}
   251  		if uvmImagePath == "" {
   252  			return nil, errors.New("utility VM image could not be found")
   253  		}
   254  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   255  
   256  		if spec.Root.Path != "" {
   257  			return nil, errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   258  		}
   259  	} else {
   260  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   261  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   262  			return nil, fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   263  		}
   264  		// HCS API requires the trailing backslash to be removed
   265  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   266  	}
   267  
   268  	if spec.Root.Readonly {
   269  		return nil, errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   270  	}
   271  
   272  	for _, layerPath := range layerFolders {
   273  		_, filename := filepath.Split(layerPath)
   274  		g, err := hcsshim.NameToGuid(filename)
   275  		if err != nil {
   276  			return nil, err
   277  		}
   278  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   279  			ID:   g.ToString(),
   280  			Path: layerPath,
   281  		})
   282  	}
   283  
   284  	// Add the mounts (volumes, bind mounts etc) to the structure
   285  	var mds []hcsshim.MappedDir
   286  	var mps []hcsshim.MappedPipe
   287  	for _, mount := range spec.Mounts {
   288  		const pipePrefix = `\\.\pipe\`
   289  		if mount.Type != "" {
   290  			return nil, fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   291  		}
   292  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   293  			mp := hcsshim.MappedPipe{
   294  				HostPath:          mount.Source,
   295  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   296  			}
   297  			mps = append(mps, mp)
   298  		} else {
   299  			md := hcsshim.MappedDir{
   300  				HostPath:      mount.Source,
   301  				ContainerPath: mount.Destination,
   302  				ReadOnly:      false,
   303  			}
   304  			for _, o := range mount.Options {
   305  				if strings.ToLower(o) == "ro" {
   306  					md.ReadOnly = true
   307  				}
   308  			}
   309  			mds = append(mds, md)
   310  		}
   311  	}
   312  	configuration.MappedDirectories = mds
   313  	configuration.MappedPipes = mps
   314  
   315  	if len(spec.Windows.Devices) > 0 {
   316  		// Add any device assignments
   317  		if configuration.HvPartition {
   318  			return nil, errors.New("device assignment is not supported for HyperV containers")
   319  		}
   320  		for _, d := range spec.Windows.Devices {
   321  			// Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
   322  			// these represent an Interface Class GUID.
   323  			if d.IDType != "class" && d.IDType != "vpci-class-guid" {
   324  				return nil, errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
   325  			}
   326  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   327  		}
   328  	}
   329  
   330  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   331  	if err != nil {
   332  		return nil, err
   333  	}
   334  
   335  	// Construct a container object for calling start on it.
   336  	ctr := &container{
   337  		client:       c,
   338  		id:           id,
   339  		ociSpec:      spec,
   340  		hcsContainer: hcsContainer,
   341  	}
   342  
   343  	logger.Debug("starting container")
   344  	if err := ctr.hcsContainer.Start(); err != nil {
   345  		logger.WithError(err).Error("failed to start container")
   346  		ctr.mu.Lock()
   347  		if err := ctr.terminateContainer(); err != nil {
   348  			logger.WithError(err).Error("failed to cleanup after a failed Start")
   349  		} else {
   350  			logger.Debug("cleaned up after failed Start by calling Terminate")
   351  		}
   352  		ctr.mu.Unlock()
   353  		return nil, err
   354  	}
   355  
   356  	logger.Debug("createWindows() completed successfully")
   357  	return ctr, nil
   358  
   359  }
   360  
   361  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   362  	if spec.Windows.Resources != nil {
   363  		if spec.Windows.Resources.CPU != nil {
   364  			if spec.Windows.Resources.CPU.Count != nil {
   365  				// This check is being done here rather than in adaptContainerSettings
   366  				// because we don't want to update the HostConfig in case this container
   367  				// is moved to a host with more CPUs than this one.
   368  				cpuCount := *spec.Windows.Resources.CPU.Count
   369  				hostCPUCount := uint64(sysinfo.NumCPU())
   370  				if cpuCount > hostCPUCount {
   371  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   372  					cpuCount = hostCPUCount
   373  				}
   374  				configuration.ProcessorCount = uint32(cpuCount)
   375  			}
   376  			if spec.Windows.Resources.CPU.Shares != nil {
   377  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   378  			}
   379  			if spec.Windows.Resources.CPU.Maximum != nil {
   380  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   381  			}
   382  		}
   383  		if spec.Windows.Resources.Memory != nil {
   384  			if spec.Windows.Resources.Memory.Limit != nil {
   385  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   386  			}
   387  		}
   388  	}
   389  }
   390  
   391  func (ctr *container) Start(_ context.Context, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
   392  	ctr.mu.Lock()
   393  	defer ctr.mu.Unlock()
   394  
   395  	switch {
   396  	case ctr.ociSpec == nil:
   397  		return nil, errors.WithStack(errdefs.NotImplemented(errors.New("a restored container cannot be started")))
   398  	case ctr.task != nil:
   399  		return nil, errors.WithStack(errdefs.NotModified(cerrdefs.ErrAlreadyExists))
   400  	}
   401  
   402  	logger := ctr.client.logger.WithField("container", ctr.id)
   403  
   404  	// Note we always tell HCS to create stdout as it's required
   405  	// regardless of '-i' or '-t' options, so that docker can always grab
   406  	// the output through logs. We also tell HCS to always create stdin,
   407  	// even if it's not used - it will be closed shortly. Stderr is only
   408  	// created if it we're not -t.
   409  	var (
   410  		emulateConsole   bool
   411  		createStdErrPipe bool
   412  	)
   413  	if ctr.ociSpec.Process != nil {
   414  		emulateConsole = ctr.ociSpec.Process.Terminal
   415  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   416  	}
   417  
   418  	createProcessParms := &hcsshim.ProcessConfig{
   419  		EmulateConsole:   emulateConsole,
   420  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   421  		CreateStdInPipe:  true,
   422  		CreateStdOutPipe: true,
   423  		CreateStdErrPipe: createStdErrPipe,
   424  	}
   425  
   426  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   427  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   428  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   429  	}
   430  
   431  	// Configure the environment for the process
   432  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   433  
   434  	// Configure the CommandLine/CommandArgs
   435  	setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
   436  	logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   437  
   438  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   439  
   440  	// Start the command running in the container.
   441  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   442  	if err != nil {
   443  		logger.WithError(err).Error("CreateProcess() failed")
   444  		return nil, err
   445  	}
   446  
   447  	defer func() {
   448  		if err != nil {
   449  			if err := newProcess.Kill(); err != nil {
   450  				logger.WithError(err).Error("failed to kill process")
   451  			}
   452  			go func() {
   453  				if err := newProcess.Wait(); err != nil {
   454  					logger.WithError(err).Error("failed to wait for process")
   455  				}
   456  				if err := newProcess.Close(); err != nil {
   457  					logger.WithError(err).Error("failed to clean process resources")
   458  				}
   459  			}()
   460  		}
   461  	}()
   462  	t := &task{process: process{
   463  		id:         ctr.id,
   464  		ctr:        ctr,
   465  		hcsProcess: newProcess,
   466  		waitCh:     make(chan struct{}),
   467  	}}
   468  	pid := t.Pid()
   469  	logger.WithField("pid", pid).Debug("init process started")
   470  
   471  	// Spin up a goroutine to notify the backend and clean up resources when
   472  	// the task exits. Defer until after the start event is sent so that the
   473  	// exit event is not sent out-of-order.
   474  	defer func() { go t.reap() }()
   475  
   476  	// Don't shadow err here due to our deferred clean-up.
   477  	var dio *cio.DirectIO
   478  	dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   479  	if err != nil {
   480  		logger.WithError(err).Error("failed to get stdio pipes")
   481  		return nil, err
   482  	}
   483  	_, err = attachStdio(dio)
   484  	if err != nil {
   485  		logger.WithError(err).Error("failed to attach stdio")
   486  		return nil, err
   487  	}
   488  
   489  	// All fallible operations have succeeded so it is now safe to set the
   490  	// container's current task.
   491  	ctr.task = t
   492  
   493  	// Generate the associated event
   494  	ctr.client.eventQ.Append(ctr.id, func() {
   495  		ei := libcontainerdtypes.EventInfo{
   496  			ContainerID: ctr.id,
   497  			ProcessID:   t.id,
   498  			Pid:         pid,
   499  		}
   500  		ctr.client.logger.WithFields(logrus.Fields{
   501  			"container":  ctr.id,
   502  			"event":      libcontainerdtypes.EventStart,
   503  			"event-info": ei,
   504  		}).Info("sending event")
   505  		err := ctr.client.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   506  		if err != nil {
   507  			ctr.client.logger.WithError(err).WithFields(logrus.Fields{
   508  				"container":  ei.ContainerID,
   509  				"event":      libcontainerdtypes.EventStart,
   510  				"event-info": ei,
   511  			}).Error("failed to process event")
   512  		}
   513  	})
   514  	logger.Debug("start() completed")
   515  	return t, nil
   516  }
   517  
   518  func (ctr *container) Task(context.Context) (libcontainerdtypes.Task, error) {
   519  	ctr.mu.Lock()
   520  	defer ctr.mu.Unlock()
   521  	if ctr.task == nil {
   522  		return nil, errdefs.NotFound(cerrdefs.ErrNotFound)
   523  	}
   524  	return ctr.task, nil
   525  }
   526  
   527  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   528  func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   529  	if process.CommandLine != "" {
   530  		createProcessParms.CommandLine = process.CommandLine
   531  	} else {
   532  		createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   533  	}
   534  }
   535  
   536  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   537  	stdin, stdout, stderr, err := newProcess.Stdio()
   538  	if err != nil {
   539  		return nil, err
   540  	}
   541  
   542  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   543  
   544  	// Convert io.ReadClosers to io.Readers
   545  	if stdout != nil {
   546  		dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
   547  	}
   548  	if stderr != nil {
   549  		dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
   550  	}
   551  	return dio, nil
   552  }
   553  
   554  // Exec launches a process in a running container.
   555  //
   556  // The processID argument is entirely informational. As there is no mechanism
   557  // (exposed through the libcontainerd interfaces) to enumerate or reference an
   558  // exec'd process by ID, uniqueness is not currently enforced.
   559  func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
   560  	hcsContainer, err := t.getHCSContainer()
   561  	if err != nil {
   562  		return nil, err
   563  	}
   564  	logger := t.ctr.client.logger.WithFields(logrus.Fields{
   565  		"container": t.ctr.id,
   566  		"exec":      processID,
   567  	})
   568  
   569  	// Note we always tell HCS to
   570  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   571  	// docker can always grab the output through logs. We also tell HCS to always
   572  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   573  	// is only created if it we're not -t.
   574  	createProcessParms := &hcsshim.ProcessConfig{
   575  		CreateStdInPipe:  true,
   576  		CreateStdOutPipe: true,
   577  		CreateStdErrPipe: !spec.Terminal,
   578  	}
   579  	if spec.Terminal {
   580  		createProcessParms.EmulateConsole = true
   581  		if spec.ConsoleSize != nil {
   582  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   583  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   584  		}
   585  	}
   586  
   587  	// Take working directory from the process to add if it is defined,
   588  	// otherwise take from the first process.
   589  	if spec.Cwd != "" {
   590  		createProcessParms.WorkingDirectory = spec.Cwd
   591  	} else {
   592  		createProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd
   593  	}
   594  
   595  	// Configure the environment for the process
   596  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   597  
   598  	// Configure the CommandLine/CommandArgs
   599  	setCommandLineAndArgs(spec, createProcessParms)
   600  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   601  
   602  	createProcessParms.User = spec.User.Username
   603  
   604  	// Start the command running in the container.
   605  	newProcess, err := hcsContainer.CreateProcess(createProcessParms)
   606  	if err != nil {
   607  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   608  		return nil, err
   609  	}
   610  	pid := newProcess.Pid()
   611  	defer func() {
   612  		if err != nil {
   613  			if err := newProcess.Kill(); err != nil {
   614  				logger.WithError(err).Error("failed to kill process")
   615  			}
   616  			go func() {
   617  				if err := newProcess.Wait(); err != nil {
   618  					logger.WithError(err).Error("failed to wait for process")
   619  				}
   620  				if err := newProcess.Close(); err != nil {
   621  					logger.WithError(err).Error("failed to clean process resources")
   622  				}
   623  			}()
   624  		}
   625  	}()
   626  
   627  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   628  	if err != nil {
   629  		logger.WithError(err).Error("failed to get stdio pipes")
   630  		return nil, err
   631  	}
   632  	// Tell the engine to attach streams back to the client
   633  	_, err = attachStdio(dio)
   634  	if err != nil {
   635  		return nil, err
   636  	}
   637  
   638  	p := &process{
   639  		id:         processID,
   640  		ctr:        t.ctr,
   641  		hcsProcess: newProcess,
   642  		waitCh:     make(chan struct{}),
   643  	}
   644  
   645  	// Spin up a goroutine to notify the backend and clean up resources when
   646  	// the process exits. Defer until after the start event is sent so that
   647  	// the exit event is not sent out-of-order.
   648  	defer func() { go p.reap() }()
   649  
   650  	t.ctr.client.eventQ.Append(t.ctr.id, func() {
   651  		ei := libcontainerdtypes.EventInfo{
   652  			ContainerID: t.ctr.id,
   653  			ProcessID:   p.id,
   654  			Pid:         uint32(pid),
   655  		}
   656  		t.ctr.client.logger.WithFields(logrus.Fields{
   657  			"container":  t.ctr.id,
   658  			"event":      libcontainerdtypes.EventExecAdded,
   659  			"event-info": ei,
   660  		}).Info("sending event")
   661  		err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)
   662  		if err != nil {
   663  			t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
   664  				"container":  t.ctr.id,
   665  				"event":      libcontainerdtypes.EventExecAdded,
   666  				"event-info": ei,
   667  			}).Error("failed to process event")
   668  		}
   669  		err = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)
   670  		if err != nil {
   671  			t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
   672  				"container":  t.ctr.id,
   673  				"event":      libcontainerdtypes.EventExecStarted,
   674  				"event-info": ei,
   675  			}).Error("failed to process event")
   676  		}
   677  	})
   678  
   679  	return p, nil
   680  }
   681  
   682  func (p *process) Pid() uint32 {
   683  	p.mu.Lock()
   684  	hcsProcess := p.hcsProcess
   685  	p.mu.Unlock()
   686  	if hcsProcess == nil {
   687  		return 0
   688  	}
   689  	return uint32(hcsProcess.Pid())
   690  }
   691  
   692  func (p *process) Kill(_ context.Context, signal syscall.Signal) error {
   693  	p.mu.Lock()
   694  	hcsProcess := p.hcsProcess
   695  	p.mu.Unlock()
   696  	if hcsProcess == nil {
   697  		return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
   698  	}
   699  	return hcsProcess.Kill()
   700  }
   701  
   702  // Kill handles `docker stop` on Windows. While Linux has support for
   703  // the full range of signals, signals aren't really implemented on Windows.
   704  // We fake supporting regular stop and -9 to force kill.
   705  func (t *task) Kill(_ context.Context, signal syscall.Signal) error {
   706  	hcsContainer, err := t.getHCSContainer()
   707  	if err != nil {
   708  		return err
   709  	}
   710  
   711  	logger := t.ctr.client.logger.WithFields(logrus.Fields{
   712  		"container": t.ctr.id,
   713  		"process":   t.id,
   714  		"pid":       t.Pid(),
   715  		"signal":    signal,
   716  	})
   717  	logger.Debug("Signal()")
   718  
   719  	var op string
   720  	if signal == syscall.SIGKILL {
   721  		// Terminate the compute system
   722  		t.ctr.mu.Lock()
   723  		t.ctr.terminateInvoked = true
   724  		t.ctr.mu.Unlock()
   725  		op, err = "terminate", hcsContainer.Terminate()
   726  	} else {
   727  		// Shut down the container
   728  		op, err = "shutdown", hcsContainer.Shutdown()
   729  	}
   730  	if err != nil {
   731  		if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   732  			// ignore errors
   733  			logger.WithError(err).Errorf("failed to %s hccshim container", op)
   734  		}
   735  	}
   736  
   737  	return nil
   738  }
   739  
   740  // Resize handles a CLI event to resize an interactive docker run or docker
   741  // exec window.
   742  func (p *process) Resize(_ context.Context, width, height uint32) error {
   743  	p.mu.Lock()
   744  	hcsProcess := p.hcsProcess
   745  	p.mu.Unlock()
   746  	if hcsProcess == nil {
   747  		return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
   748  	}
   749  
   750  	p.ctr.client.logger.WithFields(logrus.Fields{
   751  		"container": p.ctr.id,
   752  		"process":   p.id,
   753  		"height":    height,
   754  		"width":     width,
   755  		"pid":       hcsProcess.Pid(),
   756  	}).Debug("resizing")
   757  	return hcsProcess.ResizeConsole(uint16(width), uint16(height))
   758  }
   759  
   760  func (p *process) CloseStdin(context.Context) error {
   761  	p.mu.Lock()
   762  	hcsProcess := p.hcsProcess
   763  	p.mu.Unlock()
   764  	if hcsProcess == nil {
   765  		return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
   766  	}
   767  
   768  	return hcsProcess.CloseStdin()
   769  }
   770  
   771  // Pause handles pause requests for containers
   772  func (t *task) Pause(_ context.Context) error {
   773  	if t.ctr.ociSpec.Windows.HyperV == nil {
   774  		return cerrdefs.ErrNotImplemented
   775  	}
   776  
   777  	t.ctr.mu.Lock()
   778  	defer t.ctr.mu.Unlock()
   779  
   780  	if err := t.assertIsCurrentTask(); err != nil {
   781  		return err
   782  	}
   783  	if t.ctr.hcsContainer == nil {
   784  		return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
   785  	}
   786  	if err := t.ctr.hcsContainer.Pause(); err != nil {
   787  		return err
   788  	}
   789  
   790  	t.ctr.isPaused = true
   791  
   792  	t.ctr.client.eventQ.Append(t.ctr.id, func() {
   793  		err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
   794  			ContainerID: t.ctr.id,
   795  			ProcessID:   t.id,
   796  		})
   797  		t.ctr.client.logger.WithFields(logrus.Fields{
   798  			"container": t.ctr.id,
   799  			"event":     libcontainerdtypes.EventPaused,
   800  		}).Info("sending event")
   801  		if err != nil {
   802  			t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
   803  				"container": t.ctr.id,
   804  				"event":     libcontainerdtypes.EventPaused,
   805  			}).Error("failed to process event")
   806  		}
   807  	})
   808  
   809  	return nil
   810  }
   811  
   812  // Resume handles resume requests for containers
   813  func (t *task) Resume(ctx context.Context) error {
   814  	if t.ctr.ociSpec.Windows.HyperV == nil {
   815  		return errors.New("cannot resume Windows Server Containers")
   816  	}
   817  
   818  	t.ctr.mu.Lock()
   819  	defer t.ctr.mu.Unlock()
   820  
   821  	if err := t.assertIsCurrentTask(); err != nil {
   822  		return err
   823  	}
   824  	if t.ctr.hcsContainer == nil {
   825  		return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
   826  	}
   827  	if err := t.ctr.hcsContainer.Resume(); err != nil {
   828  		return err
   829  	}
   830  
   831  	t.ctr.isPaused = false
   832  
   833  	t.ctr.client.eventQ.Append(t.ctr.id, func() {
   834  		err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
   835  			ContainerID: t.ctr.id,
   836  			ProcessID:   t.id,
   837  		})
   838  		t.ctr.client.logger.WithFields(logrus.Fields{
   839  			"container": t.ctr.id,
   840  			"event":     libcontainerdtypes.EventResumed,
   841  		}).Info("sending event")
   842  		if err != nil {
   843  			t.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
   844  				"container": t.ctr.id,
   845  				"event":     libcontainerdtypes.EventResumed,
   846  			}).Error("failed to process event")
   847  		}
   848  	})
   849  
   850  	return nil
   851  }
   852  
   853  // Stats handles stats requests for containers
   854  func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {
   855  	hc, err := t.getHCSContainer()
   856  	if err != nil {
   857  		return nil, err
   858  	}
   859  
   860  	readAt := time.Now()
   861  	s, err := hc.Statistics()
   862  	if err != nil {
   863  		return nil, err
   864  	}
   865  	return &libcontainerdtypes.Stats{
   866  		Read:     readAt,
   867  		HCSStats: &s,
   868  	}, nil
   869  }
   870  
   871  // LoadContainer is the handler for restoring a container
   872  func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
   873  	c.logger.WithField("container", id).Debug("LoadContainer()")
   874  
   875  	// TODO Windows: On RS1, a re-attach isn't possible.
   876  	// However, there is a scenario in which there is an issue.
   877  	// Consider a background container. The daemon dies unexpectedly.
   878  	// HCS will still have the compute service alive and running.
   879  	// For consistence, we call in to shoot it regardless if HCS knows about it
   880  	// We explicitly just log a warning if the terminate fails.
   881  	// Then we tell the backend the container exited.
   882  	hc, err := hcsshim.OpenContainer(id)
   883  	if err != nil {
   884  		return nil, errdefs.NotFound(errors.New("container not found"))
   885  	}
   886  	const terminateTimeout = time.Minute * 2
   887  	err = hc.Terminate()
   888  
   889  	if hcsshim.IsPending(err) {
   890  		err = hc.WaitTimeout(terminateTimeout)
   891  	} else if hcsshim.IsAlreadyStopped(err) {
   892  		err = nil
   893  	}
   894  
   895  	if err != nil {
   896  		c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
   897  		return nil, err
   898  	}
   899  	return &container{
   900  		client:       c,
   901  		hcsContainer: hc,
   902  		id:           id,
   903  	}, nil
   904  }
   905  
   906  // AttachTask is only called by the daemon when restoring containers. As
   907  // re-attach isn't possible (see LoadContainer), a NotFound error is
   908  // unconditionally returned to allow restore to make progress.
   909  func (*container) AttachTask(context.Context, libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
   910  	return nil, errdefs.NotFound(cerrdefs.ErrNotImplemented)
   911  }
   912  
   913  // Pids returns a list of process IDs running in a container. It is not
   914  // implemented on Windows.
   915  func (t *task) Pids(context.Context) ([]containerd.ProcessInfo, error) {
   916  	return nil, errors.New("not implemented on Windows")
   917  }
   918  
   919  // Summary returns a summary of the processes running in a container.
   920  // This is present in Windows to support docker top. In linux, the
   921  // engine shells out to ps to get process information. On Windows, as
   922  // the containers could be Hyper-V containers, they would not be
   923  // visible on the container host. However, libcontainerd does have
   924  // that information.
   925  func (t *task) Summary(_ context.Context) ([]libcontainerdtypes.Summary, error) {
   926  	hc, err := t.getHCSContainer()
   927  	if err != nil {
   928  		return nil, err
   929  	}
   930  
   931  	p, err := hc.ProcessList()
   932  	if err != nil {
   933  		return nil, err
   934  	}
   935  
   936  	pl := make([]libcontainerdtypes.Summary, len(p))
   937  	for i := range p {
   938  		pl[i] = libcontainerdtypes.Summary{
   939  			ImageName:                    p[i].ImageName,
   940  			CreatedAt:                    p[i].CreateTimestamp,
   941  			KernelTime_100Ns:             p[i].KernelTime100ns,
   942  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
   943  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
   944  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
   945  			ProcessID:                    p[i].ProcessId,
   946  			UserTime_100Ns:               p[i].UserTime100ns,
   947  			ExecID:                       "",
   948  		}
   949  	}
   950  	return pl, nil
   951  }
   952  
   953  func (p *process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   954  	select {
   955  	case <-ctx.Done():
   956  		return nil, errors.WithStack(ctx.Err())
   957  	case <-p.waitCh:
   958  	default:
   959  		return nil, errdefs.Conflict(errors.New("process is running"))
   960  	}
   961  	return p.exited, nil
   962  }
   963  
   964  func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   965  	select {
   966  	case <-ctx.Done():
   967  		return nil, errors.WithStack(ctx.Err())
   968  	case <-t.waitCh:
   969  	default:
   970  		return nil, errdefs.Conflict(errors.New("container is not stopped"))
   971  	}
   972  
   973  	t.ctr.mu.Lock()
   974  	defer t.ctr.mu.Unlock()
   975  	if err := t.assertIsCurrentTask(); err != nil {
   976  		return nil, err
   977  	}
   978  	t.ctr.task = nil
   979  	return t.exited, nil
   980  }
   981  
   982  func (t *task) ForceDelete(ctx context.Context) error {
   983  	select {
   984  	case <-t.waitCh: // Task is already stopped.
   985  		_, err := t.Delete(ctx)
   986  		return err
   987  	default:
   988  	}
   989  
   990  	if err := t.Kill(ctx, syscall.SIGKILL); err != nil {
   991  		return errors.Wrap(err, "could not force-kill task")
   992  	}
   993  
   994  	select {
   995  	case <-ctx.Done():
   996  		return ctx.Err()
   997  	case <-t.waitCh:
   998  		_, err := t.Delete(ctx)
   999  		return err
  1000  	}
  1001  }
  1002  
  1003  func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  1004  	select {
  1005  	case <-t.waitCh:
  1006  		return containerd.Status{
  1007  			Status:     containerd.Stopped,
  1008  			ExitStatus: t.exited.ExitCode(),
  1009  			ExitTime:   t.exited.ExitTime(),
  1010  		}, nil
  1011  	default:
  1012  	}
  1013  
  1014  	t.ctr.mu.Lock()
  1015  	defer t.ctr.mu.Unlock()
  1016  	s := containerd.Running
  1017  	if t.ctr.isPaused {
  1018  		s = containerd.Paused
  1019  	}
  1020  	return containerd.Status{Status: s}, nil
  1021  }
  1022  
  1023  func (*task) UpdateResources(ctx context.Context, resources *libcontainerdtypes.Resources) error {
  1024  	// Updating resource isn't supported on Windows
  1025  	// but we should return nil for enabling updating container
  1026  	return nil
  1027  }
  1028  
  1029  func (*task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  1030  	return errors.New("Windows: Containers do not support checkpoints")
  1031  }
  1032  
  1033  // assertIsCurrentTask returns a non-nil error if the task has been deleted.
  1034  func (t *task) assertIsCurrentTask() error {
  1035  	if t.ctr.task != t {
  1036  		return errors.WithStack(errdefs.NotFound(fmt.Errorf("task %q not found", t.id)))
  1037  	}
  1038  	return nil
  1039  }
  1040  
  1041  // getHCSContainer returns a reference to the hcsshim Container for the task's
  1042  // container if neither the task nor container have been deleted.
  1043  //
  1044  // t.ctr.mu must not be locked by the calling goroutine when calling this
  1045  // function.
  1046  func (t *task) getHCSContainer() (hcsshim.Container, error) {
  1047  	t.ctr.mu.Lock()
  1048  	defer t.ctr.mu.Unlock()
  1049  	if err := t.assertIsCurrentTask(); err != nil {
  1050  		return nil, err
  1051  	}
  1052  	hc := t.ctr.hcsContainer
  1053  	if hc == nil {
  1054  		return nil, errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", t.ctr.id)))
  1055  	}
  1056  	return hc, nil
  1057  }
  1058  
  1059  // ctr mutex must be held when calling this function.
  1060  func (ctr *container) shutdownContainer() error {
  1061  	var err error
  1062  	const waitTimeout = time.Minute * 5
  1063  
  1064  	if !ctr.terminateInvoked {
  1065  		err = ctr.hcsContainer.Shutdown()
  1066  	}
  1067  
  1068  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1069  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1070  	} else if hcsshim.IsAlreadyStopped(err) {
  1071  		err = nil
  1072  	}
  1073  
  1074  	if err != nil {
  1075  		ctr.client.logger.WithError(err).WithField("container", ctr.id).
  1076  			Debug("failed to shutdown container, terminating it")
  1077  		terminateErr := ctr.terminateContainer()
  1078  		if terminateErr != nil {
  1079  			ctr.client.logger.WithError(terminateErr).WithField("container", ctr.id).
  1080  				Error("failed to shutdown container, and subsequent terminate also failed")
  1081  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1082  		}
  1083  		return err
  1084  	}
  1085  
  1086  	return nil
  1087  }
  1088  
  1089  // ctr mutex must be held when calling this function.
  1090  func (ctr *container) terminateContainer() error {
  1091  	const terminateTimeout = time.Minute * 5
  1092  	ctr.terminateInvoked = true
  1093  	err := ctr.hcsContainer.Terminate()
  1094  
  1095  	if hcsshim.IsPending(err) {
  1096  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1097  	} else if hcsshim.IsAlreadyStopped(err) {
  1098  		err = nil
  1099  	}
  1100  
  1101  	if err != nil {
  1102  		ctr.client.logger.WithError(err).WithField("container", ctr.id).
  1103  			Debug("failed to terminate container")
  1104  		return err
  1105  	}
  1106  
  1107  	return nil
  1108  }
  1109  
  1110  func (p *process) reap() {
  1111  	logger := p.ctr.client.logger.WithFields(logrus.Fields{
  1112  		"container": p.ctr.id,
  1113  		"process":   p.id,
  1114  	})
  1115  
  1116  	var eventErr error
  1117  
  1118  	// Block indefinitely for the process to exit.
  1119  	if err := p.hcsProcess.Wait(); err != nil {
  1120  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1121  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1122  		}
  1123  		// Fall through here, do not return. This ensures we tell the
  1124  		// docker engine that the process/container has exited to avoid
  1125  		// a container being dropped on the floor.
  1126  	}
  1127  	exitedAt := time.Now()
  1128  
  1129  	exitCode, err := p.hcsProcess.ExitCode()
  1130  	if err != nil {
  1131  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1132  			logger.WithError(err).Warnf("unable to get exit code for process")
  1133  		}
  1134  		// Since we got an error retrieving the exit code, make sure that the
  1135  		// code we return doesn't incorrectly indicate success.
  1136  		exitCode = -1
  1137  
  1138  		// Fall through here, do not return. This ensures we tell the
  1139  		// docker engine that the process/container has exited to avoid
  1140  		// a container being dropped on the floor.
  1141  	}
  1142  
  1143  	p.mu.Lock()
  1144  	hcsProcess := p.hcsProcess
  1145  	p.hcsProcess = nil
  1146  	p.mu.Unlock()
  1147  
  1148  	if err := hcsProcess.Close(); err != nil {
  1149  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1150  		exitCode = -1
  1151  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1152  	}
  1153  
  1154  	// Explicit locking is not required as reads from exited are
  1155  	// synchronized using waitCh.
  1156  	p.exited = containerd.NewExitStatus(uint32(exitCode), exitedAt, nil)
  1157  	close(p.waitCh)
  1158  
  1159  	p.ctr.client.eventQ.Append(p.ctr.id, func() {
  1160  		ei := libcontainerdtypes.EventInfo{
  1161  			ContainerID: p.ctr.id,
  1162  			ProcessID:   p.id,
  1163  			Pid:         uint32(hcsProcess.Pid()),
  1164  			ExitCode:    uint32(exitCode),
  1165  			ExitedAt:    exitedAt,
  1166  			Error:       eventErr,
  1167  		}
  1168  		p.ctr.client.logger.WithFields(logrus.Fields{
  1169  			"container":  p.ctr.id,
  1170  			"event":      libcontainerdtypes.EventExit,
  1171  			"event-info": ei,
  1172  		}).Info("sending event")
  1173  		err := p.ctr.client.backend.ProcessEvent(p.ctr.id, libcontainerdtypes.EventExit, ei)
  1174  		if err != nil {
  1175  			p.ctr.client.logger.WithError(err).WithFields(logrus.Fields{
  1176  				"container":  p.ctr.id,
  1177  				"event":      libcontainerdtypes.EventExit,
  1178  				"event-info": ei,
  1179  			}).Error("failed to process event")
  1180  		}
  1181  	})
  1182  }
  1183  
  1184  func (ctr *container) Delete(context.Context) error {
  1185  	ctr.mu.Lock()
  1186  	defer ctr.mu.Unlock()
  1187  
  1188  	if ctr.hcsContainer == nil {
  1189  		return errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", ctr.id)))
  1190  	}
  1191  
  1192  	// Check that there is no task currently running.
  1193  	if ctr.task != nil {
  1194  		select {
  1195  		case <-ctr.task.waitCh:
  1196  		default:
  1197  			return errors.WithStack(errdefs.Conflict(errors.New("container is not stopped")))
  1198  		}
  1199  	}
  1200  
  1201  	var (
  1202  		logger = ctr.client.logger.WithFields(logrus.Fields{
  1203  			"container": ctr.id,
  1204  		})
  1205  		thisErr error
  1206  	)
  1207  
  1208  	if err := ctr.shutdownContainer(); err != nil {
  1209  		logger.WithError(err).Warn("failed to shutdown container")
  1210  		thisErr = errors.Wrap(err, "failed to shutdown container")
  1211  	} else {
  1212  		logger.Debug("completed container shutdown")
  1213  	}
  1214  
  1215  	if err := ctr.hcsContainer.Close(); err != nil {
  1216  		logger.WithError(err).Error("failed to clean hcs container resources")
  1217  		thisErr = errors.Wrap(err, "failed to terminate container")
  1218  	}
  1219  
  1220  	ctr.hcsContainer = nil
  1221  	return thisErr
  1222  }