github.com/jfrazelle/docker@v1.1.2-0.20210712172922-bf78e25fe508/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/docker/docker/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"encoding/json"
     9  	"fmt"
    10  	"io/ioutil"
    11  	"os"
    12  	"path/filepath"
    13  	"regexp"
    14  	"strings"
    15  	"sync"
    16  	"syscall"
    17  	"time"
    18  
    19  	"github.com/Microsoft/hcsshim"
    20  	"github.com/Microsoft/hcsshim/osversion"
    21  	"github.com/containerd/containerd"
    22  	"github.com/containerd/containerd/cio"
    23  	containerderrdefs "github.com/containerd/containerd/errdefs"
    24  
    25  	"github.com/docker/docker/errdefs"
    26  	"github.com/docker/docker/libcontainerd/queue"
    27  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    28  	"github.com/docker/docker/pkg/sysinfo"
    29  	"github.com/docker/docker/pkg/system"
    30  	specs "github.com/opencontainers/runtime-spec/specs-go"
    31  	"github.com/pkg/errors"
    32  	"github.com/sirupsen/logrus"
    33  	"golang.org/x/sys/windows"
    34  )
    35  
    36  type process struct {
    37  	id         string
    38  	pid        int
    39  	hcsProcess hcsshim.Process
    40  }
    41  
    42  type container struct {
    43  	sync.Mutex
    44  
    45  	// The ociSpec is required, as client.Create() needs a spec, but can
    46  	// be called from the RestartManager context which does not otherwise
    47  	// have access to the Spec
    48  	ociSpec *specs.Spec
    49  
    50  	isWindows    bool
    51  	hcsContainer hcsshim.Container
    52  
    53  	id               string
    54  	status           containerd.ProcessStatus
    55  	exitedAt         time.Time
    56  	exitCode         uint32
    57  	waitCh           chan struct{}
    58  	init             *process
    59  	execs            map[string]*process
    60  	terminateInvoked bool
    61  }
    62  
    63  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    64  // container creator management stacks. We hard code "docker" in the case
    65  // of docker.
    66  const defaultOwner = "docker"
    67  
    68  type client struct {
    69  	sync.Mutex
    70  
    71  	stateDir   string
    72  	backend    libcontainerdtypes.Backend
    73  	logger     *logrus.Entry
    74  	eventQ     queue.Queue
    75  	containers map[string]*container
    76  }
    77  
    78  // NewClient creates a new local executor for windows
    79  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    80  	c := &client{
    81  		stateDir:   stateDir,
    82  		backend:    b,
    83  		logger:     logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
    84  		containers: make(map[string]*container),
    85  	}
    86  
    87  	return c, nil
    88  }
    89  
    90  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    91  	return containerd.Version{}, errors.New("not implemented on Windows")
    92  }
    93  
    94  // Create is the entrypoint to create a container from a spec.
    95  // Table below shows the fields required for HCS JSON calling parameters,
    96  // where if not populated, is omitted.
    97  // +-----------------+--------------------------------------------+---------------------------------------------------+
    98  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
    99  // +-----------------+--------------------------------------------+---------------------------------------------------+
   100  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
   101  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
   102  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   103  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   104  // +-----------------+--------------------------------------------+---------------------------------------------------+
   105  //
   106  // Isolation=Process example:
   107  //
   108  // {
   109  // 	"SystemType": "Container",
   110  // 	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   111  // 	"Owner": "docker",
   112  // 	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   113  // 	"IgnoreFlushesDuringBoot": true,
   114  // 	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   115  // 	"Layers": [{
   116  // 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   117  // 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   118  // 	}],
   119  // 	"HostName": "5e0055c814a6",
   120  // 	"MappedDirectories": [],
   121  // 	"HvPartition": false,
   122  // 	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   123  // }
   124  //
   125  // Isolation=Hyper-V example:
   126  //
   127  // {
   128  // 	"SystemType": "Container",
   129  // 	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   130  // 	"Owner": "docker",
   131  // 	"IgnoreFlushesDuringBoot": true,
   132  // 	"Layers": [{
   133  // 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   134  // 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   135  // 	}],
   136  // 	"HostName": "475c2c58933b",
   137  // 	"MappedDirectories": [],
   138  // 	"HvPartition": true,
   139  // 	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   140  // 	"DNSSearchList": "a.com,b.com,c.com",
   141  // 	"HvRuntime": {
   142  // 		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   143  // 	},
   144  // }
   145  func (c *client) Create(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
   146  	if ctr := c.getContainer(id); ctr != nil {
   147  		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   148  	}
   149  
   150  	var err error
   151  	if spec.Linux != nil {
   152  		return errors.New("linux containers are not supported on this platform")
   153  	}
   154  	err = c.createWindows(id, spec, runtimeOptions)
   155  
   156  	if err == nil {
   157  		c.eventQ.Append(id, func() {
   158  			ei := libcontainerdtypes.EventInfo{
   159  				ContainerID: id,
   160  			}
   161  			c.logger.WithFields(logrus.Fields{
   162  				"container": id,
   163  				"event":     libcontainerdtypes.EventCreate,
   164  			}).Info("sending event")
   165  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   166  			if err != nil {
   167  				c.logger.WithError(err).WithFields(logrus.Fields{
   168  					"container": id,
   169  					"event":     libcontainerdtypes.EventCreate,
   170  				}).Error("failed to process event")
   171  			}
   172  		})
   173  	}
   174  	return err
   175  }
   176  
   177  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   178  	logger := c.logger.WithField("container", id)
   179  	configuration := &hcsshim.ContainerConfig{
   180  		SystemType:              "Container",
   181  		Name:                    id,
   182  		Owner:                   defaultOwner,
   183  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   184  		HostName:                spec.Hostname,
   185  		HvPartition:             false,
   186  	}
   187  
   188  	c.extractResourcesFromSpec(spec, configuration)
   189  
   190  	if spec.Windows.Resources != nil {
   191  		if spec.Windows.Resources.Storage != nil {
   192  			if spec.Windows.Resources.Storage.Bps != nil {
   193  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   194  			}
   195  			if spec.Windows.Resources.Storage.Iops != nil {
   196  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   197  			}
   198  		}
   199  	}
   200  
   201  	if spec.Windows.HyperV != nil {
   202  		configuration.HvPartition = true
   203  	}
   204  
   205  	if spec.Windows.Network != nil {
   206  		configuration.EndpointList = spec.Windows.Network.EndpointList
   207  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   208  		if spec.Windows.Network.DNSSearchList != nil {
   209  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   210  		}
   211  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   212  	}
   213  
   214  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   215  		configuration.Credentials = cs
   216  	}
   217  
   218  	// We must have least two layers in the spec, the bottom one being a
   219  	// base image, the top one being the RW layer.
   220  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   221  		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   222  	}
   223  
   224  	// Strip off the top-most layer as that's passed in separately to HCS
   225  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   226  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   227  
   228  	if configuration.HvPartition {
   229  		// We don't currently support setting the utility VM image explicitly.
   230  		// TODO circa RS5, this may be re-locatable.
   231  		if spec.Windows.HyperV.UtilityVMPath != "" {
   232  			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   233  		}
   234  
   235  		// Find the upper-most utility VM image.
   236  		var uvmImagePath string
   237  		for _, path := range layerFolders {
   238  			fullPath := filepath.Join(path, "UtilityVM")
   239  			_, err := os.Stat(fullPath)
   240  			if err == nil {
   241  				uvmImagePath = fullPath
   242  				break
   243  			}
   244  			if !os.IsNotExist(err) {
   245  				return err
   246  			}
   247  		}
   248  		if uvmImagePath == "" {
   249  			return errors.New("utility VM image could not be found")
   250  		}
   251  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   252  
   253  		if spec.Root.Path != "" {
   254  			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   255  		}
   256  	} else {
   257  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   258  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   259  			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   260  		}
   261  		// HCS API requires the trailing backslash to be removed
   262  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   263  	}
   264  
   265  	if spec.Root.Readonly {
   266  		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   267  	}
   268  
   269  	for _, layerPath := range layerFolders {
   270  		_, filename := filepath.Split(layerPath)
   271  		g, err := hcsshim.NameToGuid(filename)
   272  		if err != nil {
   273  			return err
   274  		}
   275  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   276  			ID:   g.ToString(),
   277  			Path: layerPath,
   278  		})
   279  	}
   280  
   281  	// Add the mounts (volumes, bind mounts etc) to the structure
   282  	var mds []hcsshim.MappedDir
   283  	var mps []hcsshim.MappedPipe
   284  	for _, mount := range spec.Mounts {
   285  		const pipePrefix = `\\.\pipe\`
   286  		if mount.Type != "" {
   287  			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   288  		}
   289  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   290  			mp := hcsshim.MappedPipe{
   291  				HostPath:          mount.Source,
   292  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   293  			}
   294  			mps = append(mps, mp)
   295  		} else {
   296  			md := hcsshim.MappedDir{
   297  				HostPath:      mount.Source,
   298  				ContainerPath: mount.Destination,
   299  				ReadOnly:      false,
   300  			}
   301  			for _, o := range mount.Options {
   302  				if strings.ToLower(o) == "ro" {
   303  					md.ReadOnly = true
   304  				}
   305  			}
   306  			mds = append(mds, md)
   307  		}
   308  	}
   309  	configuration.MappedDirectories = mds
   310  	if len(mps) > 0 && osversion.Build() < osversion.RS3 {
   311  		return errors.New("named pipe mounts are not supported on this version of Windows")
   312  	}
   313  	configuration.MappedPipes = mps
   314  
   315  	if len(spec.Windows.Devices) > 0 {
   316  		// Add any device assignments
   317  		if configuration.HvPartition {
   318  			return errors.New("device assignment is not supported for HyperV containers")
   319  		}
   320  		if osversion.Build() < osversion.RS5 {
   321  			return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
   322  		}
   323  		for _, d := range spec.Windows.Devices {
   324  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   325  		}
   326  	}
   327  
   328  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   329  	if err != nil {
   330  		return err
   331  	}
   332  
   333  	// Construct a container object for calling start on it.
   334  	ctr := &container{
   335  		id:           id,
   336  		execs:        make(map[string]*process),
   337  		isWindows:    true,
   338  		ociSpec:      spec,
   339  		hcsContainer: hcsContainer,
   340  		status:       containerd.Created,
   341  		waitCh:       make(chan struct{}),
   342  	}
   343  
   344  	logger.Debug("starting container")
   345  	if err = hcsContainer.Start(); err != nil {
   346  		c.logger.WithError(err).Error("failed to start container")
   347  		ctr.Lock()
   348  		if err := c.terminateContainer(ctr); err != nil {
   349  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   350  		} else {
   351  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   352  		}
   353  		ctr.Unlock()
   354  		return err
   355  	}
   356  
   357  	c.Lock()
   358  	c.containers[id] = ctr
   359  	c.Unlock()
   360  
   361  	logger.Debug("createWindows() completed successfully")
   362  	return nil
   363  
   364  }
   365  
   366  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   367  	if spec.Windows.Resources != nil {
   368  		if spec.Windows.Resources.CPU != nil {
   369  			if spec.Windows.Resources.CPU.Count != nil {
   370  				// This check is being done here rather than in adaptContainerSettings
   371  				// because we don't want to update the HostConfig in case this container
   372  				// is moved to a host with more CPUs than this one.
   373  				cpuCount := *spec.Windows.Resources.CPU.Count
   374  				hostCPUCount := uint64(sysinfo.NumCPU())
   375  				if cpuCount > hostCPUCount {
   376  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   377  					cpuCount = hostCPUCount
   378  				}
   379  				configuration.ProcessorCount = uint32(cpuCount)
   380  			}
   381  			if spec.Windows.Resources.CPU.Shares != nil {
   382  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   383  			}
   384  			if spec.Windows.Resources.CPU.Maximum != nil {
   385  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   386  			}
   387  		}
   388  		if spec.Windows.Resources.Memory != nil {
   389  			if spec.Windows.Resources.Memory.Limit != nil {
   390  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   391  			}
   392  		}
   393  	}
   394  }
   395  
   396  func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   397  	ctr := c.getContainer(id)
   398  	switch {
   399  	case ctr == nil:
   400  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   401  	case ctr.init != nil:
   402  		return -1, errors.WithStack(errdefs.NotModified(errors.New("container already started")))
   403  	}
   404  
   405  	logger := c.logger.WithField("container", id)
   406  
   407  	// Note we always tell HCS to create stdout as it's required
   408  	// regardless of '-i' or '-t' options, so that docker can always grab
   409  	// the output through logs. We also tell HCS to always create stdin,
   410  	// even if it's not used - it will be closed shortly. Stderr is only
   411  	// created if it we're not -t.
   412  	var (
   413  		emulateConsole   bool
   414  		createStdErrPipe bool
   415  	)
   416  	if ctr.ociSpec.Process != nil {
   417  		emulateConsole = ctr.ociSpec.Process.Terminal
   418  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   419  	}
   420  
   421  	createProcessParms := &hcsshim.ProcessConfig{
   422  		EmulateConsole:   emulateConsole,
   423  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   424  		CreateStdInPipe:  true,
   425  		CreateStdOutPipe: true,
   426  		CreateStdErrPipe: createStdErrPipe,
   427  	}
   428  
   429  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   430  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   431  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   432  	}
   433  
   434  	// Configure the environment for the process
   435  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   436  
   437  	// Configure the CommandLine/CommandArgs
   438  	setCommandLineAndArgs(ctr.isWindows, ctr.ociSpec.Process, createProcessParms)
   439  	if ctr.isWindows {
   440  		logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   441  	}
   442  
   443  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   444  
   445  	// LCOW requires the raw OCI spec passed through HCS and onwards to
   446  	// GCS for the utility VM.
   447  	if !ctr.isWindows {
   448  		ociBuf, err := json.Marshal(ctr.ociSpec)
   449  		if err != nil {
   450  			return -1, err
   451  		}
   452  		ociRaw := json.RawMessage(ociBuf)
   453  		createProcessParms.OCISpecification = &ociRaw
   454  	}
   455  
   456  	ctr.Lock()
   457  
   458  	// Start the command running in the container.
   459  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   460  	if err != nil {
   461  		logger.WithError(err).Error("CreateProcess() failed")
   462  		// Fix for https://github.com/moby/moby/issues/38719.
   463  		// If the init process failed to launch, we still need to reap the
   464  		// container to avoid leaking it.
   465  		//
   466  		// Note we use the explicit exit code of 127 which is the
   467  		// Linux shell equivalent of "command not found". Windows cannot
   468  		// know ahead of time whether or not the command exists, especially
   469  		// in the case of Hyper-V containers.
   470  		ctr.Unlock()
   471  		exitedAt := time.Now()
   472  		p := &process{
   473  			id:  libcontainerdtypes.InitProcessName,
   474  			pid: 0,
   475  		}
   476  		c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
   477  		return -1, err
   478  	}
   479  
   480  	defer ctr.Unlock()
   481  
   482  	defer func() {
   483  		if err != nil {
   484  			if err := newProcess.Kill(); err != nil {
   485  				logger.WithError(err).Error("failed to kill process")
   486  			}
   487  			go func() {
   488  				if err := newProcess.Wait(); err != nil {
   489  					logger.WithError(err).Error("failed to wait for process")
   490  				}
   491  				if err := newProcess.Close(); err != nil {
   492  					logger.WithError(err).Error("failed to clean process resources")
   493  				}
   494  			}()
   495  		}
   496  	}()
   497  	p := &process{
   498  		hcsProcess: newProcess,
   499  		id:         libcontainerdtypes.InitProcessName,
   500  		pid:        newProcess.Pid(),
   501  	}
   502  	logger.WithField("pid", p.pid).Debug("init process started")
   503  
   504  	ctr.status = containerd.Running
   505  	ctr.init = p
   506  
   507  	// Spin up a go routine waiting for exit to handle cleanup
   508  	go c.reapProcess(ctr, p)
   509  
   510  	// Don't shadow err here due to our deferred clean-up.
   511  	var dio *cio.DirectIO
   512  	dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   513  	if err != nil {
   514  		logger.WithError(err).Error("failed to get stdio pipes")
   515  		return -1, err
   516  	}
   517  	_, err = attachStdio(dio)
   518  	if err != nil {
   519  		logger.WithError(err).Error("failed to attach stdio")
   520  		return -1, err
   521  	}
   522  
   523  	// Generate the associated event
   524  	c.eventQ.Append(id, func() {
   525  		ei := libcontainerdtypes.EventInfo{
   526  			ContainerID: id,
   527  			ProcessID:   libcontainerdtypes.InitProcessName,
   528  			Pid:         uint32(p.pid),
   529  		}
   530  		c.logger.WithFields(logrus.Fields{
   531  			"container":  ctr.id,
   532  			"event":      libcontainerdtypes.EventStart,
   533  			"event-info": ei,
   534  		}).Info("sending event")
   535  		err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   536  		if err != nil {
   537  			c.logger.WithError(err).WithFields(logrus.Fields{
   538  				"container":  id,
   539  				"event":      libcontainerdtypes.EventStart,
   540  				"event-info": ei,
   541  			}).Error("failed to process event")
   542  		}
   543  	})
   544  	logger.Debug("start() completed")
   545  	return p.pid, nil
   546  }
   547  
   548  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   549  func setCommandLineAndArgs(isWindows bool, process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   550  	if isWindows {
   551  		if process.CommandLine != "" {
   552  			createProcessParms.CommandLine = process.CommandLine
   553  		} else {
   554  			createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   555  		}
   556  	} else {
   557  		createProcessParms.CommandArgs = process.Args
   558  	}
   559  }
   560  
   561  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   562  	stdin, stdout, stderr, err := newProcess.Stdio()
   563  	if err != nil {
   564  		return nil, err
   565  	}
   566  
   567  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   568  
   569  	// Convert io.ReadClosers to io.Readers
   570  	if stdout != nil {
   571  		dio.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
   572  	}
   573  	if stderr != nil {
   574  		dio.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
   575  	}
   576  	return dio, nil
   577  }
   578  
   579  // Exec adds a process in an running container
   580  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   581  	ctr := c.getContainer(containerID)
   582  	switch {
   583  	case ctr == nil:
   584  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   585  	case ctr.hcsContainer == nil:
   586  		return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
   587  	case ctr.execs != nil && ctr.execs[processID] != nil:
   588  		return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   589  	}
   590  	logger := c.logger.WithFields(logrus.Fields{
   591  		"container": containerID,
   592  		"exec":      processID,
   593  	})
   594  
   595  	// Note we always tell HCS to
   596  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   597  	// docker can always grab the output through logs. We also tell HCS to always
   598  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   599  	// is only created if it we're not -t.
   600  	createProcessParms := &hcsshim.ProcessConfig{
   601  		CreateStdInPipe:  true,
   602  		CreateStdOutPipe: true,
   603  		CreateStdErrPipe: !spec.Terminal,
   604  	}
   605  	if spec.Terminal {
   606  		createProcessParms.EmulateConsole = true
   607  		if spec.ConsoleSize != nil {
   608  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   609  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   610  		}
   611  	}
   612  
   613  	// Take working directory from the process to add if it is defined,
   614  	// otherwise take from the first process.
   615  	if spec.Cwd != "" {
   616  		createProcessParms.WorkingDirectory = spec.Cwd
   617  	} else {
   618  		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
   619  	}
   620  
   621  	// Configure the environment for the process
   622  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   623  
   624  	// Configure the CommandLine/CommandArgs
   625  	setCommandLineAndArgs(ctr.isWindows, spec, createProcessParms)
   626  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   627  
   628  	createProcessParms.User = spec.User.Username
   629  
   630  	// Start the command running in the container.
   631  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   632  	if err != nil {
   633  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   634  		return -1, err
   635  	}
   636  	pid := newProcess.Pid()
   637  	defer func() {
   638  		if err != nil {
   639  			if err := newProcess.Kill(); err != nil {
   640  				logger.WithError(err).Error("failed to kill process")
   641  			}
   642  			go func() {
   643  				if err := newProcess.Wait(); err != nil {
   644  					logger.WithError(err).Error("failed to wait for process")
   645  				}
   646  				if err := newProcess.Close(); err != nil {
   647  					logger.WithError(err).Error("failed to clean process resources")
   648  				}
   649  			}()
   650  		}
   651  	}()
   652  
   653  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   654  	if err != nil {
   655  		logger.WithError(err).Error("failed to get stdio pipes")
   656  		return -1, err
   657  	}
   658  	// Tell the engine to attach streams back to the client
   659  	_, err = attachStdio(dio)
   660  	if err != nil {
   661  		return -1, err
   662  	}
   663  
   664  	p := &process{
   665  		id:         processID,
   666  		pid:        pid,
   667  		hcsProcess: newProcess,
   668  	}
   669  
   670  	// Add the process to the container's list of processes
   671  	ctr.Lock()
   672  	ctr.execs[processID] = p
   673  	ctr.Unlock()
   674  
   675  	// Spin up a go routine waiting for exit to handle cleanup
   676  	go c.reapProcess(ctr, p)
   677  
   678  	c.eventQ.Append(ctr.id, func() {
   679  		ei := libcontainerdtypes.EventInfo{
   680  			ContainerID: ctr.id,
   681  			ProcessID:   p.id,
   682  			Pid:         uint32(p.pid),
   683  		}
   684  		c.logger.WithFields(logrus.Fields{
   685  			"container":  ctr.id,
   686  			"event":      libcontainerdtypes.EventExecAdded,
   687  			"event-info": ei,
   688  		}).Info("sending event")
   689  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
   690  		if err != nil {
   691  			c.logger.WithError(err).WithFields(logrus.Fields{
   692  				"container":  ctr.id,
   693  				"event":      libcontainerdtypes.EventExecAdded,
   694  				"event-info": ei,
   695  			}).Error("failed to process event")
   696  		}
   697  		err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
   698  		if err != nil {
   699  			c.logger.WithError(err).WithFields(logrus.Fields{
   700  				"container":  ctr.id,
   701  				"event":      libcontainerdtypes.EventExecStarted,
   702  				"event-info": ei,
   703  			}).Error("failed to process event")
   704  		}
   705  	})
   706  
   707  	return pid, nil
   708  }
   709  
   710  // Signal handles `docker stop` on Windows. While Linux has support for
   711  // the full range of signals, signals aren't really implemented on Windows.
   712  // We fake supporting regular stop and -9 to force kill.
   713  func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
   714  	ctr, p, err := c.getProcess(containerID, processID)
   715  	if err != nil {
   716  		return err
   717  	}
   718  
   719  	logger := c.logger.WithFields(logrus.Fields{
   720  		"container": containerID,
   721  		"process":   processID,
   722  		"pid":       p.pid,
   723  		"signal":    signal,
   724  	})
   725  	logger.Debug("Signal()")
   726  
   727  	if processID == libcontainerdtypes.InitProcessName {
   728  		if syscall.Signal(signal) == syscall.SIGKILL {
   729  			// Terminate the compute system
   730  			ctr.Lock()
   731  			ctr.terminateInvoked = true
   732  			if err := ctr.hcsContainer.Terminate(); err != nil {
   733  				if !hcsshim.IsPending(err) {
   734  					logger.WithError(err).Error("failed to terminate hccshim container")
   735  				}
   736  			}
   737  			ctr.Unlock()
   738  		} else {
   739  			// Shut down the container
   740  			if err := ctr.hcsContainer.Shutdown(); err != nil {
   741  				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   742  					// ignore errors
   743  					logger.WithError(err).Error("failed to shutdown hccshim container")
   744  				}
   745  			}
   746  		}
   747  	} else {
   748  		return p.hcsProcess.Kill()
   749  	}
   750  
   751  	return nil
   752  }
   753  
   754  // Resize handles a CLI event to resize an interactive docker run or docker
   755  // exec window.
   756  func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
   757  	_, p, err := c.getProcess(containerID, processID)
   758  	if err != nil {
   759  		return err
   760  	}
   761  
   762  	c.logger.WithFields(logrus.Fields{
   763  		"container": containerID,
   764  		"process":   processID,
   765  		"height":    height,
   766  		"width":     width,
   767  		"pid":       p.pid,
   768  	}).Debug("resizing")
   769  	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
   770  }
   771  
   772  func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
   773  	_, p, err := c.getProcess(containerID, processID)
   774  	if err != nil {
   775  		return err
   776  	}
   777  
   778  	return p.hcsProcess.CloseStdin()
   779  }
   780  
   781  // Pause handles pause requests for containers
   782  func (c *client) Pause(_ context.Context, containerID string) error {
   783  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   784  	if err != nil {
   785  		return err
   786  	}
   787  
   788  	if ctr.ociSpec.Windows.HyperV == nil {
   789  		return containerderrdefs.ErrNotImplemented
   790  	}
   791  
   792  	ctr.Lock()
   793  	defer ctr.Unlock()
   794  
   795  	if err = ctr.hcsContainer.Pause(); err != nil {
   796  		return err
   797  	}
   798  
   799  	ctr.status = containerd.Paused
   800  
   801  	c.eventQ.Append(containerID, func() {
   802  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
   803  			ContainerID: containerID,
   804  			ProcessID:   libcontainerdtypes.InitProcessName,
   805  		})
   806  		c.logger.WithFields(logrus.Fields{
   807  			"container": ctr.id,
   808  			"event":     libcontainerdtypes.EventPaused,
   809  		}).Info("sending event")
   810  		if err != nil {
   811  			c.logger.WithError(err).WithFields(logrus.Fields{
   812  				"container": containerID,
   813  				"event":     libcontainerdtypes.EventPaused,
   814  			}).Error("failed to process event")
   815  		}
   816  	})
   817  
   818  	return nil
   819  }
   820  
   821  // Resume handles resume requests for containers
   822  func (c *client) Resume(_ context.Context, containerID string) error {
   823  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   824  	if err != nil {
   825  		return err
   826  	}
   827  
   828  	if ctr.ociSpec.Windows.HyperV == nil {
   829  		return errors.New("cannot resume Windows Server Containers")
   830  	}
   831  
   832  	ctr.Lock()
   833  	defer ctr.Unlock()
   834  
   835  	if err = ctr.hcsContainer.Resume(); err != nil {
   836  		return err
   837  	}
   838  
   839  	ctr.status = containerd.Running
   840  
   841  	c.eventQ.Append(containerID, func() {
   842  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
   843  			ContainerID: containerID,
   844  			ProcessID:   libcontainerdtypes.InitProcessName,
   845  		})
   846  		c.logger.WithFields(logrus.Fields{
   847  			"container": ctr.id,
   848  			"event":     libcontainerdtypes.EventResumed,
   849  		}).Info("sending event")
   850  		if err != nil {
   851  			c.logger.WithError(err).WithFields(logrus.Fields{
   852  				"container": containerID,
   853  				"event":     libcontainerdtypes.EventResumed,
   854  			}).Error("failed to process event")
   855  		}
   856  	})
   857  
   858  	return nil
   859  }
   860  
   861  // Stats handles stats requests for containers
   862  func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
   863  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   864  	if err != nil {
   865  		return nil, err
   866  	}
   867  
   868  	readAt := time.Now()
   869  	s, err := ctr.hcsContainer.Statistics()
   870  	if err != nil {
   871  		return nil, err
   872  	}
   873  	return &libcontainerdtypes.Stats{
   874  		Read:     readAt,
   875  		HCSStats: &s,
   876  	}, nil
   877  }
   878  
   879  // Restore is the handler for restoring a container
   880  func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, libcontainerdtypes.Process, error) {
   881  	c.logger.WithField("container", id).Debug("restore()")
   882  
   883  	// TODO Windows: On RS1, a re-attach isn't possible.
   884  	// However, there is a scenario in which there is an issue.
   885  	// Consider a background container. The daemon dies unexpectedly.
   886  	// HCS will still have the compute service alive and running.
   887  	// For consistence, we call in to shoot it regardless if HCS knows about it
   888  	// We explicitly just log a warning if the terminate fails.
   889  	// Then we tell the backend the container exited.
   890  	if hc, err := hcsshim.OpenContainer(id); err == nil {
   891  		const terminateTimeout = time.Minute * 2
   892  		err := hc.Terminate()
   893  
   894  		if hcsshim.IsPending(err) {
   895  			err = hc.WaitTimeout(terminateTimeout)
   896  		} else if hcsshim.IsAlreadyStopped(err) {
   897  			err = nil
   898  		}
   899  
   900  		if err != nil {
   901  			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
   902  			return false, -1, nil, err
   903  		}
   904  	}
   905  	return false, -1, &restoredProcess{
   906  		c:  c,
   907  		id: id,
   908  	}, nil
   909  }
   910  
   911  // GetPidsForContainer returns a list of process IDs running in a container.
   912  // Not used on Windows.
   913  func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
   914  	return nil, errors.New("not implemented on Windows")
   915  }
   916  
   917  // Summary returns a summary of the processes running in a container.
   918  // This is present in Windows to support docker top. In linux, the
   919  // engine shells out to ps to get process information. On Windows, as
   920  // the containers could be Hyper-V containers, they would not be
   921  // visible on the container host. However, libcontainerd does have
   922  // that information.
   923  func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
   924  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   925  	if err != nil {
   926  		return nil, err
   927  	}
   928  
   929  	p, err := ctr.hcsContainer.ProcessList()
   930  	if err != nil {
   931  		return nil, err
   932  	}
   933  
   934  	pl := make([]libcontainerdtypes.Summary, len(p))
   935  	for i := range p {
   936  		pl[i] = libcontainerdtypes.Summary{
   937  			ImageName:                    p[i].ImageName,
   938  			CreatedAt:                    p[i].CreateTimestamp,
   939  			KernelTime_100Ns:             p[i].KernelTime100ns,
   940  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
   941  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
   942  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
   943  			ProcessID:                    p[i].ProcessId,
   944  			UserTime_100Ns:               p[i].UserTime100ns,
   945  			ExecID:                       "",
   946  		}
   947  	}
   948  	return pl, nil
   949  }
   950  
   951  type restoredProcess struct {
   952  	id string
   953  	c  *client
   954  }
   955  
   956  func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
   957  	return p.c.DeleteTask(ctx, p.id)
   958  }
   959  
   960  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
   961  	ec := -1
   962  	ctr := c.getContainer(containerID)
   963  	if ctr == nil {
   964  		return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   965  	}
   966  
   967  	select {
   968  	case <-ctx.Done():
   969  		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
   970  	case <-ctr.waitCh:
   971  	default:
   972  		return uint32(ec), time.Now(), errors.New("container is not stopped")
   973  	}
   974  
   975  	ctr.Lock()
   976  	defer ctr.Unlock()
   977  	return ctr.exitCode, ctr.exitedAt, nil
   978  }
   979  
   980  func (c *client) Delete(_ context.Context, containerID string) error {
   981  	c.Lock()
   982  	defer c.Unlock()
   983  	ctr := c.containers[containerID]
   984  	if ctr == nil {
   985  		return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   986  	}
   987  
   988  	ctr.Lock()
   989  	defer ctr.Unlock()
   990  
   991  	switch ctr.status {
   992  	case containerd.Created:
   993  		if err := c.shutdownContainer(ctr); err != nil {
   994  			return err
   995  		}
   996  		fallthrough
   997  	case containerd.Stopped:
   998  		delete(c.containers, containerID)
   999  		return nil
  1000  	}
  1001  
  1002  	return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
  1003  }
  1004  
  1005  func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
  1006  	c.Lock()
  1007  	defer c.Unlock()
  1008  	ctr := c.containers[containerID]
  1009  	if ctr == nil {
  1010  		return containerd.Unknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1011  	}
  1012  
  1013  	ctr.Lock()
  1014  	defer ctr.Unlock()
  1015  	return ctr.status, nil
  1016  }
  1017  
  1018  func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
  1019  	// Updating resource isn't supported on Windows
  1020  	// but we should return nil for enabling updating container
  1021  	return nil
  1022  }
  1023  
  1024  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1025  	return errors.New("Windows: Containers do not support checkpoints")
  1026  }
  1027  
  1028  func (c *client) getContainer(id string) *container {
  1029  	c.Lock()
  1030  	ctr := c.containers[id]
  1031  	c.Unlock()
  1032  
  1033  	return ctr
  1034  }
  1035  
  1036  func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1037  	ctr := c.getContainer(containerID)
  1038  	switch {
  1039  	case ctr == nil:
  1040  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1041  	case ctr.init == nil:
  1042  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  1043  	case processID == libcontainerdtypes.InitProcessName:
  1044  		return ctr, ctr.init, nil
  1045  	default:
  1046  		ctr.Lock()
  1047  		defer ctr.Unlock()
  1048  		if ctr.execs == nil {
  1049  			return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  1050  		}
  1051  	}
  1052  
  1053  	p := ctr.execs[processID]
  1054  	if p == nil {
  1055  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  1056  	}
  1057  
  1058  	return ctr, p, nil
  1059  }
  1060  
  1061  // ctr mutex must be held when calling this function.
  1062  func (c *client) shutdownContainer(ctr *container) error {
  1063  	var err error
  1064  	const waitTimeout = time.Minute * 5
  1065  
  1066  	if !ctr.terminateInvoked {
  1067  		err = ctr.hcsContainer.Shutdown()
  1068  	}
  1069  
  1070  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1071  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1072  	} else if hcsshim.IsAlreadyStopped(err) {
  1073  		err = nil
  1074  	}
  1075  
  1076  	if err != nil {
  1077  		c.logger.WithError(err).WithField("container", ctr.id).
  1078  			Debug("failed to shutdown container, terminating it")
  1079  		terminateErr := c.terminateContainer(ctr)
  1080  		if terminateErr != nil {
  1081  			c.logger.WithError(terminateErr).WithField("container", ctr.id).
  1082  				Error("failed to shutdown container, and subsequent terminate also failed")
  1083  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1084  		}
  1085  		return err
  1086  	}
  1087  
  1088  	return nil
  1089  }
  1090  
  1091  // ctr mutex must be held when calling this function.
  1092  func (c *client) terminateContainer(ctr *container) error {
  1093  	const terminateTimeout = time.Minute * 5
  1094  	ctr.terminateInvoked = true
  1095  	err := ctr.hcsContainer.Terminate()
  1096  
  1097  	if hcsshim.IsPending(err) {
  1098  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1099  	} else if hcsshim.IsAlreadyStopped(err) {
  1100  		err = nil
  1101  	}
  1102  
  1103  	if err != nil {
  1104  		c.logger.WithError(err).WithField("container", ctr.id).
  1105  			Debug("failed to terminate container")
  1106  		return err
  1107  	}
  1108  
  1109  	return nil
  1110  }
  1111  
  1112  func (c *client) reapProcess(ctr *container, p *process) int {
  1113  	logger := c.logger.WithFields(logrus.Fields{
  1114  		"container": ctr.id,
  1115  		"process":   p.id,
  1116  	})
  1117  
  1118  	var eventErr error
  1119  
  1120  	// Block indefinitely for the process to exit.
  1121  	if err := p.hcsProcess.Wait(); err != nil {
  1122  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1123  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1124  		}
  1125  		// Fall through here, do not return. This ensures we attempt to
  1126  		// continue the shutdown in HCS and tell the docker engine that the
  1127  		// process/container has exited to avoid a container being dropped on
  1128  		// the floor.
  1129  	}
  1130  	exitedAt := time.Now()
  1131  
  1132  	exitCode, err := p.hcsProcess.ExitCode()
  1133  	if err != nil {
  1134  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1135  			logger.WithError(err).Warnf("unable to get exit code for process")
  1136  		}
  1137  		// Since we got an error retrieving the exit code, make sure that the
  1138  		// code we return doesn't incorrectly indicate success.
  1139  		exitCode = -1
  1140  
  1141  		// Fall through here, do not return. This ensures we attempt to
  1142  		// continue the shutdown in HCS and tell the docker engine that the
  1143  		// process/container has exited to avoid a container being dropped on
  1144  		// the floor.
  1145  	}
  1146  
  1147  	if err := p.hcsProcess.Close(); err != nil {
  1148  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1149  		exitCode = -1
  1150  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1151  	}
  1152  
  1153  	if p.id == libcontainerdtypes.InitProcessName {
  1154  		exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  1155  	}
  1156  
  1157  	c.eventQ.Append(ctr.id, func() {
  1158  		ei := libcontainerdtypes.EventInfo{
  1159  			ContainerID: ctr.id,
  1160  			ProcessID:   p.id,
  1161  			Pid:         uint32(p.pid),
  1162  			ExitCode:    uint32(exitCode),
  1163  			ExitedAt:    exitedAt,
  1164  			Error:       eventErr,
  1165  		}
  1166  		c.logger.WithFields(logrus.Fields{
  1167  			"container":  ctr.id,
  1168  			"event":      libcontainerdtypes.EventExit,
  1169  			"event-info": ei,
  1170  		}).Info("sending event")
  1171  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1172  		if err != nil {
  1173  			c.logger.WithError(err).WithFields(logrus.Fields{
  1174  				"container":  ctr.id,
  1175  				"event":      libcontainerdtypes.EventExit,
  1176  				"event-info": ei,
  1177  			}).Error("failed to process event")
  1178  		}
  1179  		if p.id != libcontainerdtypes.InitProcessName {
  1180  			ctr.Lock()
  1181  			delete(ctr.execs, p.id)
  1182  			ctr.Unlock()
  1183  		}
  1184  	})
  1185  
  1186  	return exitCode
  1187  }
  1188  
  1189  // reapContainer shuts down the container and releases associated resources. It returns
  1190  // the error to be logged in the eventInfo sent back to the monitor.
  1191  func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1192  	// Update container status
  1193  	ctr.Lock()
  1194  	ctr.status = containerd.Stopped
  1195  	ctr.exitedAt = exitedAt
  1196  	ctr.exitCode = uint32(exitCode)
  1197  	close(ctr.waitCh)
  1198  
  1199  	if err := c.shutdownContainer(ctr); err != nil {
  1200  		exitCode = -1
  1201  		logger.WithError(err).Warn("failed to shutdown container")
  1202  		thisErr := errors.Wrap(err, "failed to shutdown container")
  1203  		if eventErr != nil {
  1204  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1205  		} else {
  1206  			eventErr = thisErr
  1207  		}
  1208  	} else {
  1209  		logger.Debug("completed container shutdown")
  1210  	}
  1211  	ctr.Unlock()
  1212  
  1213  	if err := ctr.hcsContainer.Close(); err != nil {
  1214  		exitCode = -1
  1215  		logger.WithError(err).Error("failed to clean hcs container resources")
  1216  		thisErr := errors.Wrap(err, "failed to terminate container")
  1217  		if eventErr != nil {
  1218  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1219  		} else {
  1220  			eventErr = thisErr
  1221  		}
  1222  	}
  1223  	return exitCode, eventErr
  1224  }