gopkg.in/docker/docker.v23@v23.0.11/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/docker/docker/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"os"
    11  	"path/filepath"
    12  	"regexp"
    13  	"strings"
    14  	"sync"
    15  	"syscall"
    16  	"time"
    17  
    18  	"github.com/Microsoft/hcsshim"
    19  	"github.com/containerd/containerd"
    20  	"github.com/containerd/containerd/cio"
    21  	containerderrdefs "github.com/containerd/containerd/errdefs"
    22  
    23  	"github.com/docker/docker/errdefs"
    24  	"github.com/docker/docker/libcontainerd/queue"
    25  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    26  	"github.com/docker/docker/pkg/sysinfo"
    27  	"github.com/docker/docker/pkg/system"
    28  	specs "github.com/opencontainers/runtime-spec/specs-go"
    29  	"github.com/pkg/errors"
    30  	"github.com/sirupsen/logrus"
    31  	"golang.org/x/sys/windows"
    32  )
    33  
    34  type process struct {
    35  	id         string
    36  	pid        int
    37  	hcsProcess hcsshim.Process
    38  }
    39  
    40  type container struct {
    41  	sync.Mutex
    42  
    43  	// The ociSpec is required, as client.Create() needs a spec, but can
    44  	// be called from the RestartManager context which does not otherwise
    45  	// have access to the Spec
    46  	ociSpec *specs.Spec
    47  
    48  	hcsContainer hcsshim.Container
    49  
    50  	id               string
    51  	status           containerd.ProcessStatus
    52  	exitedAt         time.Time
    53  	exitCode         uint32
    54  	waitCh           chan struct{}
    55  	init             *process
    56  	execs            map[string]*process
    57  	terminateInvoked bool
    58  }
    59  
    60  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    61  // container creator management stacks. We hard code "docker" in the case
    62  // of docker.
    63  const defaultOwner = "docker"
    64  
    65  type client struct {
    66  	sync.Mutex
    67  
    68  	stateDir   string
    69  	backend    libcontainerdtypes.Backend
    70  	logger     *logrus.Entry
    71  	eventQ     queue.Queue
    72  	containers map[string]*container
    73  }
    74  
    75  // NewClient creates a new local executor for windows
    76  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    77  	c := &client{
    78  		stateDir:   stateDir,
    79  		backend:    b,
    80  		logger:     logrus.WithField("module", "libcontainerd").WithField("module", "libcontainerd").WithField("namespace", ns),
    81  		containers: make(map[string]*container),
    82  	}
    83  
    84  	return c, nil
    85  }
    86  
    87  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    88  	return containerd.Version{}, errors.New("not implemented on Windows")
    89  }
    90  
    91  // Create is the entrypoint to create a container from a spec.
    92  // Table below shows the fields required for HCS JSON calling parameters,
    93  // where if not populated, is omitted.
    94  // +-----------------+--------------------------------------------+---------------------------------------------------+
    95  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
    96  // +-----------------+--------------------------------------------+---------------------------------------------------+
    97  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
    98  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
    99  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   100  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   101  // +-----------------+--------------------------------------------+---------------------------------------------------+
   102  //
   103  // Isolation=Process example:
   104  //
   105  //	{
   106  //		"SystemType": "Container",
   107  //		"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   108  //		"Owner": "docker",
   109  //		"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   110  //		"IgnoreFlushesDuringBoot": true,
   111  //		"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   112  //		"Layers": [{
   113  //			"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   114  //			"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   115  //		}],
   116  //		"HostName": "5e0055c814a6",
   117  //		"MappedDirectories": [],
   118  //		"HvPartition": false,
   119  //		"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   120  //	}
   121  //
   122  // Isolation=Hyper-V example:
   123  //
   124  //	{
   125  //		"SystemType": "Container",
   126  //		"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   127  //		"Owner": "docker",
   128  //		"IgnoreFlushesDuringBoot": true,
   129  //		"Layers": [{
   130  //			"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   131  //			"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   132  //		}],
   133  //		"HostName": "475c2c58933b",
   134  //		"MappedDirectories": [],
   135  //		"HvPartition": true,
   136  //		"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   137  //		"DNSSearchList": "a.com,b.com,c.com",
   138  //		"HvRuntime": {
   139  //			"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   140  //		},
   141  //	}
   142  func (c *client) Create(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) error {
   143  	if ctr := c.getContainer(id); ctr != nil {
   144  		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   145  	}
   146  
   147  	var err error
   148  	if spec.Linux != nil {
   149  		return errors.New("linux containers are not supported on this platform")
   150  	}
   151  	err = c.createWindows(id, spec, runtimeOptions)
   152  
   153  	if err == nil {
   154  		c.eventQ.Append(id, func() {
   155  			ei := libcontainerdtypes.EventInfo{
   156  				ContainerID: id,
   157  			}
   158  			c.logger.WithFields(logrus.Fields{
   159  				"container": id,
   160  				"event":     libcontainerdtypes.EventCreate,
   161  			}).Info("sending event")
   162  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   163  			if err != nil {
   164  				c.logger.WithError(err).WithFields(logrus.Fields{
   165  					"container": id,
   166  					"event":     libcontainerdtypes.EventCreate,
   167  				}).Error("failed to process event")
   168  			}
   169  		})
   170  	}
   171  	return err
   172  }
   173  
   174  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
   175  	logger := c.logger.WithField("container", id)
   176  	configuration := &hcsshim.ContainerConfig{
   177  		SystemType:              "Container",
   178  		Name:                    id,
   179  		Owner:                   defaultOwner,
   180  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   181  		HostName:                spec.Hostname,
   182  		HvPartition:             false,
   183  	}
   184  
   185  	c.extractResourcesFromSpec(spec, configuration)
   186  
   187  	if spec.Windows.Resources != nil {
   188  		if spec.Windows.Resources.Storage != nil {
   189  			if spec.Windows.Resources.Storage.Bps != nil {
   190  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   191  			}
   192  			if spec.Windows.Resources.Storage.Iops != nil {
   193  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   194  			}
   195  		}
   196  	}
   197  
   198  	if spec.Windows.HyperV != nil {
   199  		configuration.HvPartition = true
   200  	}
   201  
   202  	if spec.Windows.Network != nil {
   203  		configuration.EndpointList = spec.Windows.Network.EndpointList
   204  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   205  		if spec.Windows.Network.DNSSearchList != nil {
   206  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   207  		}
   208  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   209  	}
   210  
   211  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   212  		configuration.Credentials = cs
   213  	}
   214  
   215  	// We must have least two layers in the spec, the bottom one being a
   216  	// base image, the top one being the RW layer.
   217  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   218  		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   219  	}
   220  
   221  	// Strip off the top-most layer as that's passed in separately to HCS
   222  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   223  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   224  
   225  	if configuration.HvPartition {
   226  		// We don't currently support setting the utility VM image explicitly.
   227  		// TODO circa RS5, this may be re-locatable.
   228  		if spec.Windows.HyperV.UtilityVMPath != "" {
   229  			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   230  		}
   231  
   232  		// Find the upper-most utility VM image.
   233  		var uvmImagePath string
   234  		for _, path := range layerFolders {
   235  			fullPath := filepath.Join(path, "UtilityVM")
   236  			_, err := os.Stat(fullPath)
   237  			if err == nil {
   238  				uvmImagePath = fullPath
   239  				break
   240  			}
   241  			if !os.IsNotExist(err) {
   242  				return err
   243  			}
   244  		}
   245  		if uvmImagePath == "" {
   246  			return errors.New("utility VM image could not be found")
   247  		}
   248  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   249  
   250  		if spec.Root.Path != "" {
   251  			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   252  		}
   253  	} else {
   254  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   255  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   256  			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   257  		}
   258  		// HCS API requires the trailing backslash to be removed
   259  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   260  	}
   261  
   262  	if spec.Root.Readonly {
   263  		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   264  	}
   265  
   266  	for _, layerPath := range layerFolders {
   267  		_, filename := filepath.Split(layerPath)
   268  		g, err := hcsshim.NameToGuid(filename)
   269  		if err != nil {
   270  			return err
   271  		}
   272  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   273  			ID:   g.ToString(),
   274  			Path: layerPath,
   275  		})
   276  	}
   277  
   278  	// Add the mounts (volumes, bind mounts etc) to the structure
   279  	var mds []hcsshim.MappedDir
   280  	var mps []hcsshim.MappedPipe
   281  	for _, mount := range spec.Mounts {
   282  		const pipePrefix = `\\.\pipe\`
   283  		if mount.Type != "" {
   284  			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   285  		}
   286  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   287  			mp := hcsshim.MappedPipe{
   288  				HostPath:          mount.Source,
   289  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   290  			}
   291  			mps = append(mps, mp)
   292  		} else {
   293  			md := hcsshim.MappedDir{
   294  				HostPath:      mount.Source,
   295  				ContainerPath: mount.Destination,
   296  				ReadOnly:      false,
   297  			}
   298  			for _, o := range mount.Options {
   299  				if strings.ToLower(o) == "ro" {
   300  					md.ReadOnly = true
   301  				}
   302  			}
   303  			mds = append(mds, md)
   304  		}
   305  	}
   306  	configuration.MappedDirectories = mds
   307  	configuration.MappedPipes = mps
   308  
   309  	if len(spec.Windows.Devices) > 0 {
   310  		// Add any device assignments
   311  		if configuration.HvPartition {
   312  			return errors.New("device assignment is not supported for HyperV containers")
   313  		}
   314  		for _, d := range spec.Windows.Devices {
   315  			// Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
   316  			// these represent an Interface Class GUID.
   317  			if d.IDType != "class" && d.IDType != "vpci-class-guid" {
   318  				return errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
   319  			}
   320  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   321  		}
   322  	}
   323  
   324  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   325  	if err != nil {
   326  		return err
   327  	}
   328  
   329  	// Construct a container object for calling start on it.
   330  	ctr := &container{
   331  		id:           id,
   332  		execs:        make(map[string]*process),
   333  		ociSpec:      spec,
   334  		hcsContainer: hcsContainer,
   335  		status:       containerd.Created,
   336  		waitCh:       make(chan struct{}),
   337  	}
   338  
   339  	logger.Debug("starting container")
   340  	if err = hcsContainer.Start(); err != nil {
   341  		c.logger.WithError(err).Error("failed to start container")
   342  		ctr.Lock()
   343  		if err := c.terminateContainer(ctr); err != nil {
   344  			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
   345  		} else {
   346  			c.logger.Debug("cleaned up after failed Start by calling Terminate")
   347  		}
   348  		ctr.Unlock()
   349  		return err
   350  	}
   351  
   352  	c.Lock()
   353  	c.containers[id] = ctr
   354  	c.Unlock()
   355  
   356  	logger.Debug("createWindows() completed successfully")
   357  	return nil
   358  
   359  }
   360  
   361  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   362  	if spec.Windows.Resources != nil {
   363  		if spec.Windows.Resources.CPU != nil {
   364  			if spec.Windows.Resources.CPU.Count != nil {
   365  				// This check is being done here rather than in adaptContainerSettings
   366  				// because we don't want to update the HostConfig in case this container
   367  				// is moved to a host with more CPUs than this one.
   368  				cpuCount := *spec.Windows.Resources.CPU.Count
   369  				hostCPUCount := uint64(sysinfo.NumCPU())
   370  				if cpuCount > hostCPUCount {
   371  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   372  					cpuCount = hostCPUCount
   373  				}
   374  				configuration.ProcessorCount = uint32(cpuCount)
   375  			}
   376  			if spec.Windows.Resources.CPU.Shares != nil {
   377  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   378  			}
   379  			if spec.Windows.Resources.CPU.Maximum != nil {
   380  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   381  			}
   382  		}
   383  		if spec.Windows.Resources.Memory != nil {
   384  			if spec.Windows.Resources.Memory.Limit != nil {
   385  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   386  			}
   387  		}
   388  	}
   389  }
   390  
   391  func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   392  	ctr := c.getContainer(id)
   393  	switch {
   394  	case ctr == nil:
   395  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   396  	case ctr.init != nil:
   397  		return -1, errors.WithStack(errdefs.NotModified(errors.New("container already started")))
   398  	}
   399  
   400  	logger := c.logger.WithField("container", id)
   401  
   402  	// Note we always tell HCS to create stdout as it's required
   403  	// regardless of '-i' or '-t' options, so that docker can always grab
   404  	// the output through logs. We also tell HCS to always create stdin,
   405  	// even if it's not used - it will be closed shortly. Stderr is only
   406  	// created if it we're not -t.
   407  	var (
   408  		emulateConsole   bool
   409  		createStdErrPipe bool
   410  	)
   411  	if ctr.ociSpec.Process != nil {
   412  		emulateConsole = ctr.ociSpec.Process.Terminal
   413  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   414  	}
   415  
   416  	createProcessParms := &hcsshim.ProcessConfig{
   417  		EmulateConsole:   emulateConsole,
   418  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   419  		CreateStdInPipe:  true,
   420  		CreateStdOutPipe: true,
   421  		CreateStdErrPipe: createStdErrPipe,
   422  	}
   423  
   424  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   425  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   426  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   427  	}
   428  
   429  	// Configure the environment for the process
   430  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   431  
   432  	// Configure the CommandLine/CommandArgs
   433  	setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
   434  	logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   435  
   436  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   437  
   438  	ctr.Lock()
   439  
   440  	// Start the command running in the container.
   441  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   442  	if err != nil {
   443  		logger.WithError(err).Error("CreateProcess() failed")
   444  		// Fix for https://github.com/moby/moby/issues/38719.
   445  		// If the init process failed to launch, we still need to reap the
   446  		// container to avoid leaking it.
   447  		//
   448  		// Note we use the explicit exit code of 127 which is the
   449  		// Linux shell equivalent of "command not found". Windows cannot
   450  		// know ahead of time whether or not the command exists, especially
   451  		// in the case of Hyper-V containers.
   452  		ctr.Unlock()
   453  		exitedAt := time.Now()
   454  		p := &process{
   455  			id:  libcontainerdtypes.InitProcessName,
   456  			pid: 0,
   457  		}
   458  		c.reapContainer(ctr, p, 127, exitedAt, nil, logger)
   459  		return -1, err
   460  	}
   461  
   462  	defer ctr.Unlock()
   463  
   464  	defer func() {
   465  		if err != nil {
   466  			if err := newProcess.Kill(); err != nil {
   467  				logger.WithError(err).Error("failed to kill process")
   468  			}
   469  			go func() {
   470  				if err := newProcess.Wait(); err != nil {
   471  					logger.WithError(err).Error("failed to wait for process")
   472  				}
   473  				if err := newProcess.Close(); err != nil {
   474  					logger.WithError(err).Error("failed to clean process resources")
   475  				}
   476  			}()
   477  		}
   478  	}()
   479  	p := &process{
   480  		hcsProcess: newProcess,
   481  		id:         libcontainerdtypes.InitProcessName,
   482  		pid:        newProcess.Pid(),
   483  	}
   484  	logger.WithField("pid", p.pid).Debug("init process started")
   485  
   486  	ctr.status = containerd.Running
   487  	ctr.init = p
   488  
   489  	// Spin up a go routine waiting for exit to handle cleanup
   490  	go c.reapProcess(ctr, p)
   491  
   492  	// Don't shadow err here due to our deferred clean-up.
   493  	var dio *cio.DirectIO
   494  	dio, err = newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   495  	if err != nil {
   496  		logger.WithError(err).Error("failed to get stdio pipes")
   497  		return -1, err
   498  	}
   499  	_, err = attachStdio(dio)
   500  	if err != nil {
   501  		logger.WithError(err).Error("failed to attach stdio")
   502  		return -1, err
   503  	}
   504  
   505  	// Generate the associated event
   506  	c.eventQ.Append(id, func() {
   507  		ei := libcontainerdtypes.EventInfo{
   508  			ContainerID: id,
   509  			ProcessID:   libcontainerdtypes.InitProcessName,
   510  			Pid:         uint32(p.pid),
   511  		}
   512  		c.logger.WithFields(logrus.Fields{
   513  			"container":  ctr.id,
   514  			"event":      libcontainerdtypes.EventStart,
   515  			"event-info": ei,
   516  		}).Info("sending event")
   517  		err := c.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   518  		if err != nil {
   519  			c.logger.WithError(err).WithFields(logrus.Fields{
   520  				"container":  id,
   521  				"event":      libcontainerdtypes.EventStart,
   522  				"event-info": ei,
   523  			}).Error("failed to process event")
   524  		}
   525  	})
   526  	logger.Debug("start() completed")
   527  	return p.pid, nil
   528  }
   529  
   530  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   531  func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   532  	if process.CommandLine != "" {
   533  		createProcessParms.CommandLine = process.CommandLine
   534  	} else {
   535  		createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   536  	}
   537  }
   538  
   539  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   540  	stdin, stdout, stderr, err := newProcess.Stdio()
   541  	if err != nil {
   542  		return nil, err
   543  	}
   544  
   545  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   546  
   547  	// Convert io.ReadClosers to io.Readers
   548  	if stdout != nil {
   549  		dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
   550  	}
   551  	if stderr != nil {
   552  		dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
   553  	}
   554  	return dio, nil
   555  }
   556  
   557  // Exec adds a process in an running container
   558  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   559  	ctr := c.getContainer(containerID)
   560  	switch {
   561  	case ctr == nil:
   562  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   563  	case ctr.hcsContainer == nil:
   564  		return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
   565  	case ctr.execs != nil && ctr.execs[processID] != nil:
   566  		return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   567  	}
   568  	logger := c.logger.WithFields(logrus.Fields{
   569  		"container": containerID,
   570  		"exec":      processID,
   571  	})
   572  
   573  	// Note we always tell HCS to
   574  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   575  	// docker can always grab the output through logs. We also tell HCS to always
   576  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   577  	// is only created if it we're not -t.
   578  	createProcessParms := &hcsshim.ProcessConfig{
   579  		CreateStdInPipe:  true,
   580  		CreateStdOutPipe: true,
   581  		CreateStdErrPipe: !spec.Terminal,
   582  	}
   583  	if spec.Terminal {
   584  		createProcessParms.EmulateConsole = true
   585  		if spec.ConsoleSize != nil {
   586  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   587  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   588  		}
   589  	}
   590  
   591  	// Take working directory from the process to add if it is defined,
   592  	// otherwise take from the first process.
   593  	if spec.Cwd != "" {
   594  		createProcessParms.WorkingDirectory = spec.Cwd
   595  	} else {
   596  		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
   597  	}
   598  
   599  	// Configure the environment for the process
   600  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   601  
   602  	// Configure the CommandLine/CommandArgs
   603  	setCommandLineAndArgs(spec, createProcessParms)
   604  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   605  
   606  	createProcessParms.User = spec.User.Username
   607  
   608  	// Start the command running in the container.
   609  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   610  	if err != nil {
   611  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   612  		return -1, err
   613  	}
   614  	pid := newProcess.Pid()
   615  	defer func() {
   616  		if err != nil {
   617  			if err := newProcess.Kill(); err != nil {
   618  				logger.WithError(err).Error("failed to kill process")
   619  			}
   620  			go func() {
   621  				if err := newProcess.Wait(); err != nil {
   622  					logger.WithError(err).Error("failed to wait for process")
   623  				}
   624  				if err := newProcess.Close(); err != nil {
   625  					logger.WithError(err).Error("failed to clean process resources")
   626  				}
   627  			}()
   628  		}
   629  	}()
   630  
   631  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   632  	if err != nil {
   633  		logger.WithError(err).Error("failed to get stdio pipes")
   634  		return -1, err
   635  	}
   636  	// Tell the engine to attach streams back to the client
   637  	_, err = attachStdio(dio)
   638  	if err != nil {
   639  		return -1, err
   640  	}
   641  
   642  	p := &process{
   643  		id:         processID,
   644  		pid:        pid,
   645  		hcsProcess: newProcess,
   646  	}
   647  
   648  	// Add the process to the container's list of processes
   649  	ctr.Lock()
   650  	ctr.execs[processID] = p
   651  	ctr.Unlock()
   652  
   653  	// Spin up a go routine waiting for exit to handle cleanup
   654  	go c.reapProcess(ctr, p)
   655  
   656  	c.eventQ.Append(ctr.id, func() {
   657  		ei := libcontainerdtypes.EventInfo{
   658  			ContainerID: ctr.id,
   659  			ProcessID:   p.id,
   660  			Pid:         uint32(p.pid),
   661  		}
   662  		c.logger.WithFields(logrus.Fields{
   663  			"container":  ctr.id,
   664  			"event":      libcontainerdtypes.EventExecAdded,
   665  			"event-info": ei,
   666  		}).Info("sending event")
   667  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecAdded, ei)
   668  		if err != nil {
   669  			c.logger.WithError(err).WithFields(logrus.Fields{
   670  				"container":  ctr.id,
   671  				"event":      libcontainerdtypes.EventExecAdded,
   672  				"event-info": ei,
   673  			}).Error("failed to process event")
   674  		}
   675  		err = c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExecStarted, ei)
   676  		if err != nil {
   677  			c.logger.WithError(err).WithFields(logrus.Fields{
   678  				"container":  ctr.id,
   679  				"event":      libcontainerdtypes.EventExecStarted,
   680  				"event-info": ei,
   681  			}).Error("failed to process event")
   682  		}
   683  	})
   684  
   685  	return pid, nil
   686  }
   687  
   688  // SignalProcess handles `docker stop` on Windows. While Linux has support for
   689  // the full range of signals, signals aren't really implemented on Windows.
   690  // We fake supporting regular stop and -9 to force kill.
   691  func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal syscall.Signal) error {
   692  	ctr, p, err := c.getProcess(containerID, processID)
   693  	if err != nil {
   694  		return err
   695  	}
   696  
   697  	logger := c.logger.WithFields(logrus.Fields{
   698  		"container": containerID,
   699  		"process":   processID,
   700  		"pid":       p.pid,
   701  		"signal":    signal,
   702  	})
   703  	logger.Debug("Signal()")
   704  
   705  	if processID == libcontainerdtypes.InitProcessName {
   706  		if syscall.Signal(signal) == syscall.SIGKILL {
   707  			// Terminate the compute system
   708  			ctr.Lock()
   709  			ctr.terminateInvoked = true
   710  			if err := ctr.hcsContainer.Terminate(); err != nil {
   711  				if !hcsshim.IsPending(err) {
   712  					logger.WithError(err).Error("failed to terminate hccshim container")
   713  				}
   714  			}
   715  			ctr.Unlock()
   716  		} else {
   717  			// Shut down the container
   718  			if err := ctr.hcsContainer.Shutdown(); err != nil {
   719  				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   720  					// ignore errors
   721  					logger.WithError(err).Error("failed to shutdown hccshim container")
   722  				}
   723  			}
   724  		}
   725  	} else {
   726  		return p.hcsProcess.Kill()
   727  	}
   728  
   729  	return nil
   730  }
   731  
   732  // ResizeTerminal handles a CLI event to resize an interactive docker run or docker
   733  // exec window.
   734  func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
   735  	_, p, err := c.getProcess(containerID, processID)
   736  	if err != nil {
   737  		return err
   738  	}
   739  
   740  	c.logger.WithFields(logrus.Fields{
   741  		"container": containerID,
   742  		"process":   processID,
   743  		"height":    height,
   744  		"width":     width,
   745  		"pid":       p.pid,
   746  	}).Debug("resizing")
   747  	return p.hcsProcess.ResizeConsole(uint16(width), uint16(height))
   748  }
   749  
   750  func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
   751  	_, p, err := c.getProcess(containerID, processID)
   752  	if err != nil {
   753  		return err
   754  	}
   755  
   756  	return p.hcsProcess.CloseStdin()
   757  }
   758  
   759  // Pause handles pause requests for containers
   760  func (c *client) Pause(_ context.Context, containerID string) error {
   761  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   762  	if err != nil {
   763  		return err
   764  	}
   765  
   766  	if ctr.ociSpec.Windows.HyperV == nil {
   767  		return containerderrdefs.ErrNotImplemented
   768  	}
   769  
   770  	ctr.Lock()
   771  	defer ctr.Unlock()
   772  
   773  	if err = ctr.hcsContainer.Pause(); err != nil {
   774  		return err
   775  	}
   776  
   777  	ctr.status = containerd.Paused
   778  
   779  	c.eventQ.Append(containerID, func() {
   780  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
   781  			ContainerID: containerID,
   782  			ProcessID:   libcontainerdtypes.InitProcessName,
   783  		})
   784  		c.logger.WithFields(logrus.Fields{
   785  			"container": ctr.id,
   786  			"event":     libcontainerdtypes.EventPaused,
   787  		}).Info("sending event")
   788  		if err != nil {
   789  			c.logger.WithError(err).WithFields(logrus.Fields{
   790  				"container": containerID,
   791  				"event":     libcontainerdtypes.EventPaused,
   792  			}).Error("failed to process event")
   793  		}
   794  	})
   795  
   796  	return nil
   797  }
   798  
   799  // Resume handles resume requests for containers
   800  func (c *client) Resume(_ context.Context, containerID string) error {
   801  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   802  	if err != nil {
   803  		return err
   804  	}
   805  
   806  	if ctr.ociSpec.Windows.HyperV == nil {
   807  		return errors.New("cannot resume Windows Server Containers")
   808  	}
   809  
   810  	ctr.Lock()
   811  	defer ctr.Unlock()
   812  
   813  	if err = ctr.hcsContainer.Resume(); err != nil {
   814  		return err
   815  	}
   816  
   817  	ctr.status = containerd.Running
   818  
   819  	c.eventQ.Append(containerID, func() {
   820  		err := c.backend.ProcessEvent(containerID, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
   821  			ContainerID: containerID,
   822  			ProcessID:   libcontainerdtypes.InitProcessName,
   823  		})
   824  		c.logger.WithFields(logrus.Fields{
   825  			"container": ctr.id,
   826  			"event":     libcontainerdtypes.EventResumed,
   827  		}).Info("sending event")
   828  		if err != nil {
   829  			c.logger.WithError(err).WithFields(logrus.Fields{
   830  				"container": containerID,
   831  				"event":     libcontainerdtypes.EventResumed,
   832  			}).Error("failed to process event")
   833  		}
   834  	})
   835  
   836  	return nil
   837  }
   838  
   839  // Stats handles stats requests for containers
   840  func (c *client) Stats(_ context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
   841  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   842  	if err != nil {
   843  		return nil, err
   844  	}
   845  
   846  	readAt := time.Now()
   847  	s, err := ctr.hcsContainer.Statistics()
   848  	if err != nil {
   849  		return nil, err
   850  	}
   851  	return &libcontainerdtypes.Stats{
   852  		Read:     readAt,
   853  		HCSStats: &s,
   854  	}, nil
   855  }
   856  
   857  // Restore is the handler for restoring a container
   858  func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (bool, int, libcontainerdtypes.Process, error) {
   859  	c.logger.WithField("container", id).Debug("restore()")
   860  
   861  	// TODO Windows: On RS1, a re-attach isn't possible.
   862  	// However, there is a scenario in which there is an issue.
   863  	// Consider a background container. The daemon dies unexpectedly.
   864  	// HCS will still have the compute service alive and running.
   865  	// For consistence, we call in to shoot it regardless if HCS knows about it
   866  	// We explicitly just log a warning if the terminate fails.
   867  	// Then we tell the backend the container exited.
   868  	if hc, err := hcsshim.OpenContainer(id); err == nil {
   869  		const terminateTimeout = time.Minute * 2
   870  		err := hc.Terminate()
   871  
   872  		if hcsshim.IsPending(err) {
   873  			err = hc.WaitTimeout(terminateTimeout)
   874  		} else if hcsshim.IsAlreadyStopped(err) {
   875  			err = nil
   876  		}
   877  
   878  		if err != nil {
   879  			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
   880  			return false, -1, nil, err
   881  		}
   882  	}
   883  	return false, -1, &restoredProcess{
   884  		c:  c,
   885  		id: id,
   886  	}, nil
   887  }
   888  
   889  // ListPids returns a list of process IDs running in a container. It is not
   890  // implemented on Windows.
   891  func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
   892  	return nil, errors.New("not implemented on Windows")
   893  }
   894  
   895  // Summary returns a summary of the processes running in a container.
   896  // This is present in Windows to support docker top. In linux, the
   897  // engine shells out to ps to get process information. On Windows, as
   898  // the containers could be Hyper-V containers, they would not be
   899  // visible on the container host. However, libcontainerd does have
   900  // that information.
   901  func (c *client) Summary(_ context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
   902  	ctr, _, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   903  	if err != nil {
   904  		return nil, err
   905  	}
   906  
   907  	p, err := ctr.hcsContainer.ProcessList()
   908  	if err != nil {
   909  		return nil, err
   910  	}
   911  
   912  	pl := make([]libcontainerdtypes.Summary, len(p))
   913  	for i := range p {
   914  		pl[i] = libcontainerdtypes.Summary{
   915  			ImageName:                    p[i].ImageName,
   916  			CreatedAt:                    p[i].CreateTimestamp,
   917  			KernelTime_100Ns:             p[i].KernelTime100ns,
   918  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
   919  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
   920  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
   921  			ProcessID:                    p[i].ProcessId,
   922  			UserTime_100Ns:               p[i].UserTime100ns,
   923  			ExecID:                       "",
   924  		}
   925  	}
   926  	return pl, nil
   927  }
   928  
   929  type restoredProcess struct {
   930  	id string
   931  	c  *client
   932  }
   933  
   934  func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) {
   935  	return p.c.DeleteTask(ctx, p.id)
   936  }
   937  
   938  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
   939  	ec := -1
   940  	ctr := c.getContainer(containerID)
   941  	if ctr == nil {
   942  		return uint32(ec), time.Now(), errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   943  	}
   944  
   945  	select {
   946  	case <-ctx.Done():
   947  		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
   948  	case <-ctr.waitCh:
   949  	default:
   950  		return uint32(ec), time.Now(), errors.New("container is not stopped")
   951  	}
   952  
   953  	ctr.Lock()
   954  	defer ctr.Unlock()
   955  	return ctr.exitCode, ctr.exitedAt, nil
   956  }
   957  
   958  func (c *client) Delete(_ context.Context, containerID string) error {
   959  	c.Lock()
   960  	defer c.Unlock()
   961  	ctr := c.containers[containerID]
   962  	if ctr == nil {
   963  		return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   964  	}
   965  
   966  	ctr.Lock()
   967  	defer ctr.Unlock()
   968  
   969  	switch ctr.status {
   970  	case containerd.Created:
   971  		if err := c.shutdownContainer(ctr); err != nil {
   972  			return err
   973  		}
   974  		fallthrough
   975  	case containerd.Stopped:
   976  		delete(c.containers, containerID)
   977  		return nil
   978  	}
   979  
   980  	return errors.WithStack(errdefs.InvalidParameter(errors.New("container is not stopped")))
   981  }
   982  
   983  func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) {
   984  	c.Lock()
   985  	defer c.Unlock()
   986  	ctr := c.containers[containerID]
   987  	if ctr == nil {
   988  		return containerd.Unknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   989  	}
   990  
   991  	ctr.Lock()
   992  	defer ctr.Unlock()
   993  	return ctr.status, nil
   994  }
   995  
   996  func (c *client) UpdateResources(ctx context.Context, containerID string, resources *libcontainerdtypes.Resources) error {
   997  	// Updating resource isn't supported on Windows
   998  	// but we should return nil for enabling updating container
   999  	return nil
  1000  }
  1001  
  1002  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
  1003  	return errors.New("Windows: Containers do not support checkpoints")
  1004  }
  1005  
  1006  func (c *client) getContainer(id string) *container {
  1007  	c.Lock()
  1008  	ctr := c.containers[id]
  1009  	c.Unlock()
  1010  
  1011  	return ctr
  1012  }
  1013  
  1014  func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
  1015  	ctr := c.getContainer(containerID)
  1016  	switch {
  1017  	case ctr == nil:
  1018  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
  1019  	case ctr.init == nil:
  1020  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
  1021  	case processID == libcontainerdtypes.InitProcessName:
  1022  		return ctr, ctr.init, nil
  1023  	default:
  1024  		ctr.Lock()
  1025  		defer ctr.Unlock()
  1026  		if ctr.execs == nil {
  1027  			return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no execs")))
  1028  		}
  1029  	}
  1030  
  1031  	p := ctr.execs[processID]
  1032  	if p == nil {
  1033  		return nil, nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
  1034  	}
  1035  
  1036  	return ctr, p, nil
  1037  }
  1038  
  1039  // ctr mutex must be held when calling this function.
  1040  func (c *client) shutdownContainer(ctr *container) error {
  1041  	var err error
  1042  	const waitTimeout = time.Minute * 5
  1043  
  1044  	if !ctr.terminateInvoked {
  1045  		err = ctr.hcsContainer.Shutdown()
  1046  	}
  1047  
  1048  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1049  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1050  	} else if hcsshim.IsAlreadyStopped(err) {
  1051  		err = nil
  1052  	}
  1053  
  1054  	if err != nil {
  1055  		c.logger.WithError(err).WithField("container", ctr.id).
  1056  			Debug("failed to shutdown container, terminating it")
  1057  		terminateErr := c.terminateContainer(ctr)
  1058  		if terminateErr != nil {
  1059  			c.logger.WithError(terminateErr).WithField("container", ctr.id).
  1060  				Error("failed to shutdown container, and subsequent terminate also failed")
  1061  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1062  		}
  1063  		return err
  1064  	}
  1065  
  1066  	return nil
  1067  }
  1068  
  1069  // ctr mutex must be held when calling this function.
  1070  func (c *client) terminateContainer(ctr *container) error {
  1071  	const terminateTimeout = time.Minute * 5
  1072  	ctr.terminateInvoked = true
  1073  	err := ctr.hcsContainer.Terminate()
  1074  
  1075  	if hcsshim.IsPending(err) {
  1076  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1077  	} else if hcsshim.IsAlreadyStopped(err) {
  1078  		err = nil
  1079  	}
  1080  
  1081  	if err != nil {
  1082  		c.logger.WithError(err).WithField("container", ctr.id).
  1083  			Debug("failed to terminate container")
  1084  		return err
  1085  	}
  1086  
  1087  	return nil
  1088  }
  1089  
  1090  func (c *client) reapProcess(ctr *container, p *process) int {
  1091  	logger := c.logger.WithFields(logrus.Fields{
  1092  		"container": ctr.id,
  1093  		"process":   p.id,
  1094  	})
  1095  
  1096  	var eventErr error
  1097  
  1098  	// Block indefinitely for the process to exit.
  1099  	if err := p.hcsProcess.Wait(); err != nil {
  1100  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1101  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1102  		}
  1103  		// Fall through here, do not return. This ensures we attempt to
  1104  		// continue the shutdown in HCS and tell the docker engine that the
  1105  		// process/container has exited to avoid a container being dropped on
  1106  		// the floor.
  1107  	}
  1108  	exitedAt := time.Now()
  1109  
  1110  	exitCode, err := p.hcsProcess.ExitCode()
  1111  	if err != nil {
  1112  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1113  			logger.WithError(err).Warnf("unable to get exit code for process")
  1114  		}
  1115  		// Since we got an error retrieving the exit code, make sure that the
  1116  		// code we return doesn't incorrectly indicate success.
  1117  		exitCode = -1
  1118  
  1119  		// Fall through here, do not return. This ensures we attempt to
  1120  		// continue the shutdown in HCS and tell the docker engine that the
  1121  		// process/container has exited to avoid a container being dropped on
  1122  		// the floor.
  1123  	}
  1124  
  1125  	if err := p.hcsProcess.Close(); err != nil {
  1126  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1127  		exitCode = -1
  1128  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1129  	}
  1130  
  1131  	if p.id == libcontainerdtypes.InitProcessName {
  1132  		exitCode, eventErr = c.reapContainer(ctr, p, exitCode, exitedAt, eventErr, logger)
  1133  	}
  1134  
  1135  	c.eventQ.Append(ctr.id, func() {
  1136  		ei := libcontainerdtypes.EventInfo{
  1137  			ContainerID: ctr.id,
  1138  			ProcessID:   p.id,
  1139  			Pid:         uint32(p.pid),
  1140  			ExitCode:    uint32(exitCode),
  1141  			ExitedAt:    exitedAt,
  1142  			Error:       eventErr,
  1143  		}
  1144  		c.logger.WithFields(logrus.Fields{
  1145  			"container":  ctr.id,
  1146  			"event":      libcontainerdtypes.EventExit,
  1147  			"event-info": ei,
  1148  		}).Info("sending event")
  1149  		err := c.backend.ProcessEvent(ctr.id, libcontainerdtypes.EventExit, ei)
  1150  		if err != nil {
  1151  			c.logger.WithError(err).WithFields(logrus.Fields{
  1152  				"container":  ctr.id,
  1153  				"event":      libcontainerdtypes.EventExit,
  1154  				"event-info": ei,
  1155  			}).Error("failed to process event")
  1156  		}
  1157  		if p.id != libcontainerdtypes.InitProcessName {
  1158  			ctr.Lock()
  1159  			delete(ctr.execs, p.id)
  1160  			ctr.Unlock()
  1161  		}
  1162  	})
  1163  
  1164  	return exitCode
  1165  }
  1166  
  1167  // reapContainer shuts down the container and releases associated resources. It returns
  1168  // the error to be logged in the eventInfo sent back to the monitor.
  1169  func (c *client) reapContainer(ctr *container, p *process, exitCode int, exitedAt time.Time, eventErr error, logger *logrus.Entry) (int, error) {
  1170  	// Update container status
  1171  	ctr.Lock()
  1172  	ctr.status = containerd.Stopped
  1173  	ctr.exitedAt = exitedAt
  1174  	ctr.exitCode = uint32(exitCode)
  1175  	close(ctr.waitCh)
  1176  
  1177  	if err := c.shutdownContainer(ctr); err != nil {
  1178  		exitCode = -1
  1179  		logger.WithError(err).Warn("failed to shutdown container")
  1180  		thisErr := errors.Wrap(err, "failed to shutdown container")
  1181  		if eventErr != nil {
  1182  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1183  		} else {
  1184  			eventErr = thisErr
  1185  		}
  1186  	} else {
  1187  		logger.Debug("completed container shutdown")
  1188  	}
  1189  	ctr.Unlock()
  1190  
  1191  	if err := ctr.hcsContainer.Close(); err != nil {
  1192  		exitCode = -1
  1193  		logger.WithError(err).Error("failed to clean hcs container resources")
  1194  		thisErr := errors.Wrap(err, "failed to terminate container")
  1195  		if eventErr != nil {
  1196  			eventErr = errors.Wrap(eventErr, thisErr.Error())
  1197  		} else {
  1198  			eventErr = thisErr
  1199  		}
  1200  	}
  1201  	return exitCode, eventErr
  1202  }