github.com/docker/engine@v22.0.0-20211208180946-d456264580cf+incompatible/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/docker/docker/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"os"
    11  	"path/filepath"
    12  	"regexp"
    13  	"strings"
    14  	"sync"
    15  	"syscall"
    16  	"time"
    17  
    18  	"github.com/Microsoft/hcsshim"
    19  	"github.com/Microsoft/hcsshim/osversion"
    20  	"github.com/containerd/containerd"
    21  	"github.com/containerd/containerd/cio"
    22  	containerderrdefs "github.com/containerd/containerd/errdefs"
    23  
    24  	"github.com/docker/docker/errdefs"
    25  	"github.com/docker/docker/libcontainerd/queue"
    26  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    27  	"github.com/docker/docker/pkg/sysinfo"
    28  	"github.com/docker/docker/pkg/system"
    29  	specs "github.com/opencontainers/runtime-spec/specs-go"
    30  	"github.com/pkg/errors"
    31  	"github.com/sirupsen/logrus"
    32  	"golang.org/x/sys/windows"
    33  )
    34  
    35  type process struct {
    36  	id         string
    37  	pid        int
    38  	hcsProcess hcsshim.Process
    39  }
    40  
    41  type container struct {
    42  	sync.Mutex
    43  
    44  	// The ociSpec is required, as client.Create() needs a spec, but can
    45  	// be called from the RestartManager context which does not otherwise
    46  	// have access to the Spec
    47  	ociSpec *specs.Spec
    48  
    49  	hcsContainer hcsshim.Container
    50  
    51  	id               string
    52  	status           containerd.ProcessStatus
    53  	exitedAt         time.Time
    54  	exitCode         uint32
    55  	waitCh           chan struct{}
    56  	init             *process
    57  	execs            map[string]*process
    58  	terminateInvoked bool
    59  }
    60  
    61  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    62  // container creator management stacks. We hard code "docker" in the case
    63  // of docker.
    64  const defaultOwner = "docker"
    65  
    66  type client struct {
    67  	sync.Mutex
    68  
    69  	stateDir   string
    70  	backend    libcontainerdtypes.Backend
    71  	logger     *logrus.Entry
    72  	eventQ     queue.Queue
    73  	containers map[string]*container
    74  }
    75  
    76  // NewClient creates a new local executor for windows
    77  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    78  	c := &client{
    79  		stateDir:   stateDir,
    80  		backend:    b,
    81  		logger:     logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
    82  		containers: make(map[string]*container),
    83  	}
    84  
    85  	return c, nil
    86  }
    87  
    88  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    89  	return containerd.Version{}, errors.New("not implemented on Windows")
    90  }
    91  
    92  // Create is the entrypoint to create a container from a spec.
    93  // Table below shows the fields required for HCS JSON calling parameters,
    94  // where if not populated, is omitted.
    95  // +-----------------+--------------------------------------------+---------------------------------------------------+
    96  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
    97  // +-----------------+--------------------------------------------+---------------------------------------------------+
    98  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
    99  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
   100  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   101  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   102  // +-----------------+--------------------------------------------+---------------------------------------------------+
   103  //
   104  // Isolation=Process example:
   105  //
   106  // {
   107  // 	"SystemType": "Container",
   108  // 	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   109  // 	"Owner": "docker",
   110  // 	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   111  // 	"IgnoreFlushesDuringBoot": true,
   112  // 	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   113  // 	"Layers": [{
   114  // 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   115  // 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   116  // 	}],
   117  // 	"HostName": "5e0055c814a6",
   118  // 	"MappedDirectories": [],
   119  // 	"HvPartition": false,
   120  // 	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   121  // }
   122  //
   123  // Isolation=Hyper-V example:
   124  //
   125  // {
   126  // 	"SystemType": "Container",
   127  // 	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   128  // 	"Owner": "docker",
   129  // 	"IgnoreFlushesDuringBoot": true,
   130  // 	"Layers": [{
   131  // 		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   132  // 		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   133  // 	}],
   134  // 	"HostName": "475c2c58933b",
   135  // 	"MappedDirectories": [],
   136  // 	"HvPartition": true,
   137  // 	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   138  // 	"DNSSearchList": "a.com,b.com,c.com",
   139  // 	"HvRuntime": {
   140  // 		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   141  // 	},
   142  // }
   143  func (c *client) Create(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
   144  	if ctr := c.getContainer(id); ctr != nil {
   145  		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   146  	}
   147  
   148  	var err error
   149  	if spec.Linux != nil {
   150  		return errors.New("linux containers are not supported on this platform")
   151  	}
   152  	err = c.createWindows(id, spec, runtimeOptions)
   153  
   154  	if err == nil {
   155  		c.eventQ.Append(id, func() {
   156  			ei := libcontainerdtypes.EventInfo{
   157  				ContainerID: id,
   158  			}
   159  			c.logger.WithFields(logrus.Fields{
   160  				"container": id,
   161  				"event":     libcontainerdtypes.EventCreate,
   162  			}).Info("sending event")
   163  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   164  			if err != nil {
   165  				c.logger.WithError(err).WithFields(logrus.Fields{
   166  					"container": id,
   167  					"event":     libcontainerdtypes.EventCreate,
   168  				}).Error("failed to process event")
   169  			}
   170  		})
   171  	}
   172  	return err
   173  }
   174  
   175  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   176  	logger := c.logger.WithField("container", id)
   177  	configuration := &hcsshim.ContainerConfig{
   178  		SystemType:              "Container",
   179  		Name:                    id,
   180  		Owner:                   defaultOwner,
   181  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   182  		HostName:                spec.Hostname,
   183  		HvPartition:             false,
   184  	}
   185  
   186  	c.extractResourcesFromSpec(spec, configuration)
   187  
   188  	if spec.Windows.Resources != nil {
   189  		if spec.Windows.Resources.Storage != nil {
   190  			if spec.Windows.Resources.Storage.Bps != nil {
   191  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   192  			}
   193  			if spec.Windows.Resources.Storage.Iops != nil {
   194  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   195  			}
   196  		}
   197  	}
   198  
   199  	if spec.Windows.HyperV != nil {
   200  		configuration.HvPartition = true
   201  	}
   202  
   203  	if spec.Windows.Network != nil {
   204  		configuration.EndpointList = spec.Windows.Network.EndpointList
   205  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   206  		if spec.Windows.Network.DNSSearchList != nil {
   207  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   208  		}
   209  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   210  	}
   211  
   212  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   213  		configuration.Credentials = cs
   214  	}
   215  
   216  	// We must have least two layers in the spec, the bottom one being a
   217  	// base image, the top one being the RW layer.
   218  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   219  		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   220  	}
   221  
   222  	// Strip off the top-most layer as that's passed in separately to HCS
   223  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   224  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   225  
   226  	if configuration.HvPartition {
   227  		// We don't currently support setting the utility VM image explicitly.
   228  		// TODO circa RS5, this may be re-locatable.
   229  		if spec.Windows.HyperV.UtilityVMPath != "" {
   230  			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   231  		}
   232  
   233  		// Find the upper-most utility VM image.
   234  		var uvmImagePath string
   235  		for _, path := range layerFolders {
   236  			fullPath := filepath.Join(path, "UtilityVM")
   237  			_, err := os.Stat(fullPath)
   238  			if err == nil {
   239  				uvmImagePath = fullPath
   240  				break
   241  			}
   242  			if !os.IsNotExist(err) {
   243  				return err
   244  			}
   245  		}
   246  		if uvmImagePath == "" {
   247  			return errors.New("utility VM image could not be found")
   248  		}
   249  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   250  
   251  		if spec.Root.Path != "" {
   252  			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   253  		}
   254  	} else {
   255  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   256  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   257  			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   258  		}
   259  		// HCS API requires the trailing backslash to be removed
   260  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   261  	}
   262  
   263  	if spec.Root.Readonly {
   264  		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   265  	}
   266  
   267  	for _, layerPath := range layerFolders {
   268  		_, filename := filepath.Split(layerPath)
   269  		g, err := hcsshim.NameToGuid(filename)
   270  		if err != nil {
   271  			return err
   272  		}
   273  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   274  			ID:   g.ToString(),
   275  			Path: layerPath,
   276  		})
   277  	}
   278  
   279  	// Add the mounts (volumes, bind mounts etc) to the structure
   280  	var mds []hcsshim.MappedDir
   281  	var mps []hcsshim.MappedPipe
   282  	for _, mount := range spec.Mounts {
   283  		const pipePrefix = `\\.\pipe\`
   284  		if mount.Type != "" {
   285  			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   286  		}
   287  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   288  			mp := hcsshim.MappedPipe{
   289  				HostPath:          mount.Source,
   290  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   291  			}
   292  			mps = append(mps, mp)
   293  		} else {
   294  			md := hcsshim.MappedDir{
   295  				HostPath:      mount.Source,
   296  				ContainerPath: mount.Destination,
   297  				ReadOnly:      false,
   298  			}
   299  			for _, o := range mount.Options {
   300  				if strings.ToLower(o) == "ro" {
   301  					md.ReadOnly = true
   302  				}
   303  			}
   304  			mds = append(mds, md)
   305  		}
   306  	}
   307  	configuration.MappedDirectories = mds
   308  	if len(mps) > 0 && osversion.Build() < osversion.RS3 {
   309  		return errors.New("named pipe mounts are not supported on this version of Windows")
   310  	}
   311  	configuration.MappedPipes = mps
   312  
   313  	if len(spec.Windows.Devices) > 0 {
   314  		// Add any device assignments
   315  		if configuration.HvPartition {
   316  			return errors.New("device assignment is not supported for HyperV containers")
   317  		}
   318  		if osversion.Build() < osversion.RS5 {
   319  			return errors.New("device assignment requires Windows builds RS5 (17763+) or later")
   320  		}
   321  		for _, d := range spec.Windows.Devices {
   322  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   323  		}
   324  	}
   325  
   326  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   327  	if err != nil {
   328  		return err
   329  	}
   330  
   331  	// Construct a container object for calling start on it.
   332  	ctr := &container{
   333  		id:           id,
   334  		execs:        make(map[string]*process),
   335  		ociSpec:      spec,
   336  		hcsContainer: hcsContainer,
   337  		status:       containerd.Created,
   338  		waitCh:       make(chan struct{}),
   339  	}
   340  
   341  	logger.Debug("starting container")
   342  	if err = hcsContainer.Start(); err != nil {
   343  		c.logger.WithError(err).Error("failed to start container")
   344  		ctr.Lock()
   345  		if err := c.terminateContainer(ctr); err != nil {
   346  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   347  		} else {
   348  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   349  		}
   350  		ctr.Unlock()
   351  		return err
   352  	}
   353  
   354  	c.Lock()
   355  	c.containers[id] = ctr
   356  	c.Unlock()
   357  
   358  	logger.Debug("createWindows() completed successfully")
   359  	return nil
   360  
   361  }
   362  
   363  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   364  	if spec.Windows.Resources != nil {
   365  		if spec.Windows.Resources.CPU != nil {
   366  			if spec.Windows.Resources.CPU.Count != nil {
   367  				// This check is being done here rather than in adaptContainerSettings
   368  				// because we don't want to update the HostConfig in case this container
   369  				// is moved to a host with more CPUs than this one.
   370  				cpuCount := *spec.Windows.Resources.CPU.Count
   371  				hostCPUCount := uint64(sysinfo.NumCPU())
   372  				if cpuCount > hostCPUCount {
   373  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   374  					cpuCount = hostCPUCount
   375  				}
   376  				configuration.ProcessorCount = uint32(cpuCount)
   377  			}
   378  			if spec.Windows.Resources.CPU.Shares != nil {
   379  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   380  			}
   381  			if spec.Windows.Resources.CPU.Maximum != nil {
   382  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   383  			}
   384  		}
   385  		if spec.Windows.Resources.Memory != nil {
   386  			if spec.Windows.Resources.Memory.Limit != nil {
   387  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   388  			}
   389  		}
   390  	}
   391  }
   392  
   393  func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   394  	ctr := c.getContainer(id)
   395  	switch {
   396  	case ctr == nil:
   397  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   398  	case ctr.init != nil:
   399  		return -1, errors.WithStack(errdefs.NotModified(errors.New("container already started")))
   400  	}
   401  
   402  	logger := c.logger.WithField("container", id)
   403  
   404  	// Note we always tell HCS to create stdout as it's required
   405  	// regardless of '-i' or '-t' options, so that docker can always grab
   406  	// the output through logs. We also tell HCS to always create stdin,
   407  	// even if it's not used - it will be closed shortly. Stderr is only
   408  	// created if it we're not -t.
   409  	var (
   410  		emulateConsole   bool
   411  		createStdErrPipe bool
   412  	)
   413  	if ctr.ociSpec.Process != nil {
   414  		emulateConsole = ctr.ociSpec.Process.Terminal
   415  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   416  	}
   417  
   418  	createProcessParms := &hcsshim.ProcessConfig{
   419  		EmulateConsole:   emulateConsole,
   420  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   421  		CreateStdInPipe:  true,
   422  		CreateStdOutPipe: true,
   423  		CreateStdErrPipe: createStdErrPipe,
   424  	}
   425  
   426  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   427  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   428  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   429  	}
   430  
   431  	// Configure the environment for the process
   432  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   433  
   434  	// Configure the CommandLine/CommandArgs
   435  	setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
   436  	logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   437  
   438  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   439  
   440  	ctr.Lock()
   441  
   442  	// Start the command running in the container.
   443  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   444  	if err != nil {
   445  		logger.WithError(err).Error("CreateProcess() failed")
   446  		// Fix for https://github.com/moby/moby/issues/38719.
   447  		// If the init process failed to launch, we still need to reap the
   448  		// container to avoid leaking it.
   449  		//
   450  		// Note we use the explicit exit code of 127 which is the
   451  		// Linux shell equivalent of "command not found". Windows cannot
   452  		// know ahead of time whether or not the command exists, especially
   453  		// in the case of Hyper-V containers.
   454  		ctr.Unlock()
   455  		exitedAt := time.Now()
   456  		p := &process{
   457  			id:  libcontainerdtypes.InitProcessName,
   458  			pid: 0,
   459  		}
   460  		c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
   461  		return -1, err
   462  	}
   463  
   464  	defer ctr.Unlock()
   465  
   466  	defer func() {
   467  		if err != nil {
   468  			if err := newProcess.Kill(); err != nil {
   469  				logger.WithError(err).Error("failed to kill process")
   470  			}
   471  			go func() {
   472  				if err := newProcess.Wait(); err != nil {
   473  					logger.WithError(err).Error("failed to wait for process")
   474  				}
   475  				if err := newProcess.Close(); err != nil {
   476  					logger.WithError(err).Error("failed to clean process resources")
   477  				}
   478  			}()
   479  		}
   480  	}()
   481  	p := &process{
   482  		hcsProcess: newProcess,
   483  		id:         libcontainerdtypes.InitProcessName,
   484  		pid:        newProcess.Pid(),
   485  	}
   486  	logger.WithField("pid", p.pid).Debug("init process started")
   487  
   488  	ctr.status = containerd.Running
   489  	ctr.init = p
   490  
   491  	// Spin up a go routine waiting for exit to handle cleanup
   492  	go c.reapProcess(ctr, p)
   493  
   494  	// Don't shadow err here due to our deferred clean-up.
   495  	var dio *cio.DirectIO
   496  	dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   497  	if err != nil {
   498  		logger.WithError(err).Error("failed to get stdio pipes")
   499  		return -1, err
   500  	}
   501  	_, err = attachStdio(dio)
   502  	if err != nil {
   503  		logger.WithError(err).Error("failed to attach stdio")
   504  		return -1, err
   505  	}
   506  
   507  	// Generate the associated event
   508  	c.eventQ.Append(id, func() {
   509  		ei := libcontainerdtypes.EventInfo{
   510  			ContainerID: id,
   511  			ProcessID:   libcontainerdtypes.InitProcessName,
   512  			Pid:         uint32(p.pid),
   513  		}
   514  		c.logger.WithFields(logrus.Fields{
   515  			"container":  ctr.id,
   516  			"event":      libcontainerdtypes.EventStart,
   517  			"event-info": ei,
   518  		}).Info("sending event")
   519  		err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   520  		if err != nil {
   521  			c.logger.WithError(err).WithFields(logrus.Fields{
   522  				"container":  id,
   523  				"event":      libcontainerdtypes.EventStart,
   524  				"event-info": ei,
   525  			}).Error("failed to process event")
   526  		}
   527  	})
   528  	logger.Debug("start() completed")
   529  	return p.pid, nil
   530  }
   531  
   532  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   533  func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   534  	if process.CommandLine != "" {
   535  		createProcessParms.CommandLine = process.CommandLine
   536  	} else {
   537  		createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   538  	}
   539  }
   540  
   541  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   542  	stdin, stdout, stderr, err := newProcess.Stdio()
   543  	if err != nil {
   544  		return nil, err
   545  	}
   546  
   547  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   548  
   549  	// Convert io.ReadClosers to io.Readers
   550  	if stdout != nil {
   551  		dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
   552  	}
   553  	if stderr != nil {
   554  		dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
   555  	}
   556  	return dio, nil
   557  }
   558  
   559  // Exec adds a process in an running container
   560  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   561  	ctr := c.getContainer(containerID)
   562  	switch {
   563  	case ctr == nil:
   564  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   565  	case ctr.hcsContainer == nil:
   566  		return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
   567  	case ctr.execs != nil && ctr.execs[processID] != nil:
   568  		return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   569  	}
   570  	logger := c.logger.WithFields(logrus.Fields{
   571  		"container": containerID,
   572  		"exec":      processID,
   573  	})
   574  
   575  	// Note we always tell HCS to
   576  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   577  	// docker can always grab the output through logs. We also tell HCS to always
   578  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   579  	// is only created if it we're not -t.
   580  	createProcessParms := &hcsshim.ProcessConfig{
   581  		CreateStdInPipe:  true,
   582  		CreateStdOutPipe: true,
   583  		CreateStdErrPipe: !spec.Terminal,
   584  	}
   585  	if spec.Terminal {
   586  		createProcessParms.EmulateConsole = true
   587  		if spec.ConsoleSize != nil {
   588  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   589  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   590  		}
   591  	}
   592  
   593  	// Take working directory from the process to add if it is defined,
   594  	// otherwise take from the first process.
   595  	if spec.Cwd != "" {
   596  		createProcessParms.WorkingDirectory = spec.Cwd
   597  	} else {
   598  		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
   599  	}
   600  
   601  	// Configure the environment for the process
   602  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   603  
   604  	// Configure the CommandLine/CommandArgs
   605  	setCommandLineAndArgs(spec, createProcessParms)
   606  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   607  
   608  	createProcessParms.User = spec.User.Username
   609  
   610  	// Start the command running in the container.
   611  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   612  	if err != nil {
   613  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   614  		return -1, err
   615  	}
   616  	pid := newProcess.Pid()
   617  	defer func() {
   618  		if err != nil {
   619  			if err := newProcess.Kill(); err != nil {
   620  				logger.WithError(err).Error("failed to kill process")
   621  			}
   622  			go func() {
   623  				if err := newProcess.Wait(); err != nil {
   624  					logger.WithError(err).Error("failed to wait for process")
   625  				}
   626  				if err := newProcess.Close(); err != nil {
   627  					logger.WithError(err).Error("failed to clean process resources")
   628  				}
   629  			}()
   630  		}
   631  	}()
   632  
   633  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   634  	if err != nil {
   635  		logger.WithError(err).Error("failed to get stdio pipes")
   636  		return -1, err
   637  	}
   638  	// Tell the engine to attach streams back to the client
   639  	_, err = attachStdio(dio)
   640  	if err != nil {
   641  		return -1, err
   642  	}
   643  
   644  	p := &process{
   645  		id:         processID,
   646  		pid:        pid,
   647  		hcsProcess: newProcess,
   648  	}
   649  
   650  	// Add the process to the container's list of processes
   651  	ctr.Lock()
   652  	ctr.execs[processID] = p
   653  	ctr.Unlock()
   654  
   655  	// Spin up a go routine waiting for exit to handle cleanup
   656  	go c.reapProcess(ctr, p)
   657  
   658  	c.eventQ.Append(ctr.id, func() {
   659  		ei := libcontainerdtypes.EventInfo{
   660  			ContainerID: ctr.id,
   661  			ProcessID:   p.id,
   662  			Pid:         uint32(p.pid),
   663  		}
   664  		c.logger.WithFields(logrus.Fields{
   665  			"container":  ctr.id,
   666  			"event":      libcontainerdtypes.EventExecAdded,
   667  			"event-info": ei,
   668  		}).Info("sending event")
   669  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
   670  		if err != nil {
   671  			c.logger.WithError(err).WithFields(logrus.Fields{
   672  				"container":  ctr.id,
   673  				"event":      libcontainerdtypes.EventExecAdded,
   674  				"event-info": ei,
   675  			}).Error("failed to process event")
   676  		}
   677  		err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
   678  		if err != nil {
   679  			c.logger.WithError(err).WithFields(logrus.Fields{
   680  				"container":  ctr.id,
   681  				"event":      libcontainerdtypes.EventExecStarted,
   682  				"event-info": ei,
   683  			}).Error("failed to process event")
   684  		}
   685  	})
   686  
   687  	return pid, nil
   688  }
   689  
   690  // SignalProcess handles `docker stop` on Windows. While Linux has support for
   691  // the full range of signals, signals aren't really implemented on Windows.
   692  // We fake supporting regular stop and -9 to force kill.
   693  func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
   694  	ctr, p, err := c.getProcess(containerID, processID)
   695  	if err != nil {
   696  		return err
   697  	}
   698  
   699  	logger := c.logger.WithFields(logrus.Fields{
   700  		"container": containerID,
   701  		"process":   processID,
   702  		"pid":       p.pid,
   703  		"signal":    signal,
   704  	})
   705  	logger.Debug("Signal()")
   706  
   707  	if processID == libcontainerdtypes.InitProcessName {
   708  		if syscall.Signal(signal) == syscall.SIGKILL {
   709  			// Terminate the compute system
   710  			ctr.Lock()
   711  			ctr.terminateInvoked = true
   712  			if err := ctr.hcsContainer.Terminate(); err != nil {
   713  				if !hcsshim.IsPending(err) {
   714  					logger.WithError(err).Error("failed to terminate hccshim container")
   715  				}
   716  			}
   717  			ctr.Unlock()
   718  		} else {
   719  			// Shut down the container
   720  			if err := ctr.hcsContainer.Shutdown(); err != nil {
   721  				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   722  					// ignore errors
   723  					logger.WithError(err).Error("failed to shutdown hccshim container")
   724  				}
   725  			}
   726  		}
   727  	} else {
   728  		return p.hcsProcess.Kill()
   729  	}
   730  
   731  	return nil
   732  }
   733  
   734  // ResizeTerminal handles a CLI event to resize an interactive docker run or docker
   735  // exec window.
   736  func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
   737  	_, p, err := c.getProcess(containerID, processID)
   738  	if err != nil {
   739  		return err
   740  	}
   741  
   742  	c.logger.WithFields(logrus.Fields{
   743  		"container": containerID,
   744  		"process":   processID,
   745  		"height":    height,
   746  		"width":     width,
   747  		"pid":       p.pid,
   748  	}).Debug("resizing")
   749  	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
   750  }
   751  
   752  func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
   753  	_, p, err := c.getProcess(containerID, processID)
   754  	if err != nil {
   755  		return err
   756  	}
   757  
   758  	return p.hcsProcess.CloseStdin()
   759  }
   760  
   761  // Pause handles pause requests for containers
   762  func (c *client) Pause(_ context.Context, containerID string) error {
   763  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   764  	if err != nil {
   765  		return err
   766  	}
   767  
   768  	if ctr.ociSpec.Windows.HyperV == nil {
   769  		return containerderrdefs.ErrNotImplemented
   770  	}
   771  
   772  	ctr.Lock()
   773  	defer ctr.Unlock()
   774  
   775  	if err = ctr.hcsContainer.Pause(); err != nil {
   776  		return err
   777  	}
   778  
   779  	ctr.status = containerd.Paused
   780  
   781  	c.eventQ.Append(containerID, func() {
   782  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
   783  			ContainerID: containerID,
   784  			ProcessID:   libcontainerdtypes.InitProcessName,
   785  		})
   786  		c.logger.WithFields(logrus.Fields{
   787  			"container": ctr.id,
   788  			"event":     libcontainerdtypes.EventPaused,
   789  		}).Info("sending event")
   790  		if err != nil {
   791  			c.logger.WithError(err).WithFields(logrus.Fields{
   792  				"container": containerID,
   793  				"event":     libcontainerdtypes.EventPaused,
   794  			}).Error("failed to process event")
   795  		}
   796  	})
   797  
   798  	return nil
   799  }
   800  
   801  // Resume handles resume requests for containers
   802  func (c *client) Resume(_ context.Context, containerID string) error {
   803  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   804  	if err != nil {
   805  		return err
   806  	}
   807  
   808  	if ctr.ociSpec.Windows.HyperV == nil {
   809  		return errors.New("cannot resume Windows Server Containers")
   810  	}
   811  
   812  	ctr.Lock()
   813  	defer ctr.Unlock()
   814  
   815  	if err = ctr.hcsContainer.Resume(); err != nil {
   816  		return err
   817  	}
   818  
   819  	ctr.status = containerd.Running
   820  
   821  	c.eventQ.Append(containerID, func() {
   822  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
   823  			ContainerID: containerID,
   824  			ProcessID:   libcontainerdtypes.InitProcessName,
   825  		})
   826  		c.logger.WithFields(logrus.Fields{
   827  			"container": ctr.id,
   828  			"event":     libcontainerdtypes.EventResumed,
   829  		}).Info("sending event")
   830  		if err != nil {
   831  			c.logger.WithError(err).WithFields(logrus.Fields{
   832  				"container": containerID,
   833  				"event":     libcontainerdtypes.EventResumed,
   834  			}).Error("failed to process event")
   835  		}
   836  	})
   837  
   838  	return nil
   839  }
   840  
   841  // Stats handles stats requests for containers
   842  func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
   843  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   844  	if err != nil {
   845  		return nil, err
   846  	}
   847  
   848  	readAt := time.Now()
   849  	s, err := ctr.hcsContainer.Statistics()
   850  	if err != nil {
   851  		return nil, err
   852  	}
   853  	return &libcontainerdtypes.Stats{
   854  		Read:     readAt,
   855  		HCSStats: &s,
   856  	}, nil
   857  }
   858  
   859  // Restore is the handler for restoring a container
   860  func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, libcontainerdtypes.Process, error) {
   861  	c.logger.WithField("container", id).Debug("restore()")
   862  
   863  	// TODO Windows: On RS1, a re-attach isn't possible.
   864  	// However, there is a scenario in which there is an issue.
   865  	// Consider a background container. The daemon dies unexpectedly.
   866  	// HCS will still have the compute service alive and running.
   867  	// For consistence, we call in to shoot it regardless if HCS knows about it
   868  	// We explicitly just log a warning if the terminate fails.
   869  	// Then we tell the backend the container exited.
   870  	if hc, err := hcsshim.OpenContainer(id); err == nil {
   871  		const terminateTimeout = time.Minute * 2
   872  		err := hc.Terminate()
   873  
   874  		if hcsshim.IsPending(err) {
   875  			err = hc.WaitTimeout(terminateTimeout)
   876  		} else if hcsshim.IsAlreadyStopped(err) {
   877  			err = nil
   878  		}
   879  
   880  		if err != nil {
   881  			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
   882  			return false, -1, nil, err
   883  		}
   884  	}
   885  	return false, -1, &restoredProcess{
   886  		c:  c,
   887  		id: id,
   888  	}, nil
   889  }
   890  
   891  // ListPids returns a list of process IDs running in a container. It is not
   892  // implemented on Windows.
   893  func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
   894  	return nil, errors.New("not implemented on Windows")
   895  }
   896  
   897  // Summary returns a summary of the processes running in a container.
   898  // This is present in Windows to support docker top. In linux, the
   899  // engine shells out to ps to get process information. On Windows, as
   900  // the containers could be Hyper-V containers, they would not be
   901  // visible on the container host. However, libcontainerd does have
   902  // that information.
   903  func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
   904  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   905  	if err != nil {
   906  		return nil, err
   907  	}
   908  
   909  	p, err := ctr.hcsContainer.ProcessList()
   910  	if err != nil {
   911  		return nil, err
   912  	}
   913  
   914  	pl := make([]libcontainerdtypes.Summary, len(p))
   915  	for i := range p {
   916  		pl[i] = libcontainerdtypes.Summary{
   917  			ImageName:                    p[i].ImageName,
   918  			CreatedAt:                    p[i].CreateTimestamp,
   919  			KernelTime_100Ns:             p[i].KernelTime100ns,
   920  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
   921  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
   922  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
   923  			ProcessID:                    p[i].ProcessId,
   924  			UserTime_100Ns:               p[i].UserTime100ns,
   925  			ExecID:                       "",
   926  		}
   927  	}
   928  	return pl, nil
   929  }
   930  
   931  type restoredProcess struct {
   932  	id string
   933  	c  *client
   934  }
   935  
   936  func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
   937  	return p.c.DeleteTask(ctx, p.id)
   938  }
   939  
   940  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
   941  	ec := -1
   942  	ctr := c.getContainer(containerID)
   943  	if ctr == nil {
   944  		return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   945  	}
   946  
   947  	select {
   948  	case <-ctx.Done():
   949  		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
   950  	case <-ctr.waitCh:
   951  	default:
   952  		return uint32(ec), time.Now(), errors.New("container is not stopped")
   953  	}
   954  
   955  	ctr.Lock()
   956  	defer ctr.Unlock()
   957  	return ctr.exitCode, ctr.exitedAt, nil
   958  }
   959  
   960  func (c *client) Delete(_ context.Context, containerID string) error {
   961  	c.Lock()
   962  	defer c.Unlock()
   963  	ctr := c.containers[containerID]
   964  	if ctr == nil {
   965  		return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   966  	}
   967  
   968  	ctr.Lock()
   969  	defer ctr.Unlock()
   970  
   971  	switch ctr.status {
   972  	case containerd.Created:
   973  		if err := c.shutdownContainer(ctr); err != nil {
   974  			return err
   975  		}
   976  		fallthrough
   977  	case containerd.Stopped:
   978  		delete(c.containers, containerID)
   979  		return nil
   980  	}
   981  
   982  	return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
   983  }
   984  
   985  func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
   986  	c.Lock()
   987  	defer c.Unlock()
   988  	ctr := c.containers[containerID]
   989  	if ctr == nil {
   990  		return containerd.Unknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   991  	}
   992  
   993  	ctr.Lock()
   994  	defer ctr.Unlock()
   995  	return ctr.status, nil
   996  }
   997  
   998  func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
   999  	// Updating resource isn't supported on Windows
  1000  	// but we should return nil for enabling updating container
  1001  	return nil
  1002  }
  1003  
  1004  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1005  	return errors.New("Windows: Containers do not support checkpoints")
  1006  }
  1007  
  1008  func (c *client) getContainer(id string) *container {
  1009  	c.Lock()
  1010  	ctr := c.containers[id]
  1011  	c.Unlock()
  1012  
  1013  	return ctr
  1014  }
  1015  
  1016  func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1017  	ctr := c.getContainer(containerID)
  1018  	switch {
  1019  	case ctr == nil:
  1020  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1021  	case ctr.init == nil:
  1022  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  1023  	case processID == libcontainerdtypes.InitProcessName:
  1024  		return ctr, ctr.init, nil
  1025  	default:
  1026  		ctr.Lock()
  1027  		defer ctr.Unlock()
  1028  		if ctr.execs == nil {
  1029  			return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  1030  		}
  1031  	}
  1032  
  1033  	p := ctr.execs[processID]
  1034  	if p == nil {
  1035  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  1036  	}
  1037  
  1038  	return ctr, p, nil
  1039  }
  1040  
  1041  // ctr mutex must be held when calling this function.
  1042  func (c *client) shutdownContainer(ctr *container) error {
  1043  	var err error
  1044  	const waitTimeout = time.Minute * 5
  1045  
  1046  	if !ctr.terminateInvoked {
  1047  		err = ctr.hcsContainer.Shutdown()
  1048  	}
  1049  
  1050  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1051  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1052  	} else if hcsshim.IsAlreadyStopped(err) {
  1053  		err = nil
  1054  	}
  1055  
  1056  	if err != nil {
  1057  		c.logger.WithError(err).WithField("container", ctr.id).
  1058  			Debug("failed to shutdown container, terminating it")
  1059  		terminateErr := c.terminateContainer(ctr)
  1060  		if terminateErr != nil {
  1061  			c.logger.WithError(terminateErr).WithField("container", ctr.id).
  1062  				Error("failed to shutdown container, and subsequent terminate also failed")
  1063  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1064  		}
  1065  		return err
  1066  	}
  1067  
  1068  	return nil
  1069  }
  1070  
  1071  // ctr mutex must be held when calling this function.
  1072  func (c *client) terminateContainer(ctr *container) error {
  1073  	const terminateTimeout = time.Minute * 5
  1074  	ctr.terminateInvoked = true
  1075  	err := ctr.hcsContainer.Terminate()
  1076  
  1077  	if hcsshim.IsPending(err) {
  1078  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1079  	} else if hcsshim.IsAlreadyStopped(err) {
  1080  		err = nil
  1081  	}
  1082  
  1083  	if err != nil {
  1084  		c.logger.WithError(err).WithField("container", ctr.id).
  1085  			Debug("failed to terminate container")
  1086  		return err
  1087  	}
  1088  
  1089  	return nil
  1090  }
  1091  
  1092  func (c *client) reapProcess(ctr *container, p *process) int {
  1093  	logger := c.logger.WithFields(logrus.Fields{
  1094  		"container": ctr.id,
  1095  		"process":   p.id,
  1096  	})
  1097  
  1098  	var eventErr error
  1099  
  1100  	// Block indefinitely for the process to exit.
  1101  	if err := p.hcsProcess.Wait(); err != nil {
  1102  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1103  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1104  		}
  1105  		// Fall through here, do not return. This ensures we attempt to
  1106  		// continue the shutdown in HCS and tell the docker engine that the
  1107  		// process/container has exited to avoid a container being dropped on
  1108  		// the floor.
  1109  	}
  1110  	exitedAt := time.Now()
  1111  
  1112  	exitCode, err := p.hcsProcess.ExitCode()
  1113  	if err != nil {
  1114  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1115  			logger.WithError(err).Warnf("unable to get exit code for process")
  1116  		}
  1117  		// Since we got an error retrieving the exit code, make sure that the
  1118  		// code we return doesn't incorrectly indicate success.
  1119  		exitCode = -1
  1120  
  1121  		// Fall through here, do not return. This ensures we attempt to
  1122  		// continue the shutdown in HCS and tell the docker engine that the
  1123  		// process/container has exited to avoid a container being dropped on
  1124  		// the floor.
  1125  	}
  1126  
  1127  	if err := p.hcsProcess.Close(); err != nil {
  1128  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1129  		exitCode = -1
  1130  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1131  	}
  1132  
  1133  	if p.id == libcontainerdtypes.InitProcessName {
  1134  		exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  1135  	}
  1136  
  1137  	c.eventQ.Append(ctr.id, func() {
  1138  		ei := libcontainerdtypes.EventInfo{
  1139  			ContainerID: ctr.id,
  1140  			ProcessID:   p.id,
  1141  			Pid:         uint32(p.pid),
  1142  			ExitCode:    uint32(exitCode),
  1143  			ExitedAt:    exitedAt,
  1144  			Error:       eventErr,
  1145  		}
  1146  		c.logger.WithFields(logrus.Fields{
  1147  			"container":  ctr.id,
  1148  			"event":      libcontainerdtypes.EventExit,
  1149  			"event-info": ei,
  1150  		}).Info("sending event")
  1151  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1152  		if err != nil {
  1153  			c.logger.WithError(err).WithFields(logrus.Fields{
  1154  				"container":  ctr.id,
  1155  				"event":      libcontainerdtypes.EventExit,
  1156  				"event-info": ei,
  1157  			}).Error("failed to process event")
  1158  		}
  1159  		if p.id != libcontainerdtypes.InitProcessName {
  1160  			ctr.Lock()
  1161  			delete(ctr.execs, p.id)
  1162  			ctr.Unlock()
  1163  		}
  1164  	})
  1165  
  1166  	return exitCode
  1167  }
  1168  
  1169  // reapContainer shuts down the container and releases associated resources. It returns
  1170  // the error to be logged in the eventInfo sent back to the monitor.
  1171  func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1172  	// Update container status
  1173  	ctr.Lock()
  1174  	ctr.status = containerd.Stopped
  1175  	ctr.exitedAt = exitedAt
  1176  	ctr.exitCode = uint32(exitCode)
  1177  	close(ctr.waitCh)
  1178  
  1179  	if err := c.shutdownContainer(ctr); err != nil {
  1180  		exitCode = -1
  1181  		logger.WithError(err).Warn("failed to shutdown container")
  1182  		thisErr := errors.Wrap(err, "failed to shutdown container")
  1183  		if eventErr != nil {
  1184  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1185  		} else {
  1186  			eventErr = thisErr
  1187  		}
  1188  	} else {
  1189  		logger.Debug("completed container shutdown")
  1190  	}
  1191  	ctr.Unlock()
  1192  
  1193  	if err := ctr.hcsContainer.Close(); err != nil {
  1194  		exitCode = -1
  1195  		logger.WithError(err).Error("failed to clean hcs container resources")
  1196  		thisErr := errors.Wrap(err, "failed to terminate container")
  1197  		if eventErr != nil {
  1198  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1199  		} else {
  1200  			eventErr = thisErr
  1201  		}
  1202  	}
  1203  	return exitCode, eventErr
  1204  }