github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/libcontainerd/local/local_windows.go (about)

     1  package local // import "github.com/Prakhar-Agarwal-byte/moby/libcontainerd/local"
     2  
     3  // This package contains the legacy in-proc calls in HCS using the v1 schema
     4  // for Windows runtime purposes.
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"io"
    10  	"os"
    11  	"path/filepath"
    12  	"regexp"
    13  	"strings"
    14  	"sync"
    15  	"syscall"
    16  	"time"
    17  
    18  	"github.com/Microsoft/hcsshim"
    19  	"github.com/Prakhar-Agarwal-byte/moby/errdefs"
    20  	"github.com/Prakhar-Agarwal-byte/moby/libcontainerd/queue"
    21  	libcontainerdtypes "github.com/Prakhar-Agarwal-byte/moby/libcontainerd/types"
    22  	"github.com/Prakhar-Agarwal-byte/moby/pkg/sysinfo"
    23  	"github.com/Prakhar-Agarwal-byte/moby/pkg/system"
    24  	"github.com/containerd/containerd"
    25  	"github.com/containerd/containerd/cio"
    26  	cerrdefs "github.com/containerd/containerd/errdefs"
    27  	"github.com/containerd/log"
    28  	specs "github.com/opencontainers/runtime-spec/specs-go"
    29  	"github.com/pkg/errors"
    30  	"golang.org/x/sys/windows"
    31  )
    32  
    33  type process struct {
    34  	// mu guards the mutable fields of this struct.
    35  	//
    36  	// Always lock mu before ctr's mutex to prevent deadlocks.
    37  	mu         sync.Mutex
    38  	id         string                 // Invariants: immutable
    39  	ctr        *container             // Invariants: immutable, ctr != nil
    40  	hcsProcess hcsshim.Process        // Is set to nil on process exit
    41  	exited     *containerd.ExitStatus // Valid iff waitCh is closed
    42  	waitCh     chan struct{}
    43  }
    44  
    45  type task struct {
    46  	process
    47  }
    48  
    49  type container struct {
    50  	mu sync.Mutex
    51  
    52  	// The ociSpec is required, as client.Create() needs a spec, but can
    53  	// be called from the RestartManager context which does not otherwise
    54  	// have access to the Spec
    55  	//
    56  	// A container value with ociSpec == nil represents a container which
    57  	// has been loaded with (*client).LoadContainer, and is ineligible to
    58  	// be Start()ed.
    59  	ociSpec *specs.Spec
    60  
    61  	hcsContainer hcsshim.Container // Is set to nil on container delete
    62  	isPaused     bool
    63  
    64  	client           *client
    65  	id               string
    66  	terminateInvoked bool
    67  
    68  	// task is a reference to the current task for the container. As a
    69  	// corollary, when task == nil the container has no current task: the
    70  	// container was never Start()ed or the task was Delete()d.
    71  	task *task
    72  }
    73  
    74  // defaultOwner is a tag passed to HCS to allow it to differentiate between
    75  // container creator management stacks. We hard code "docker" in the case
    76  // of docker.
    77  const defaultOwner = "docker"
    78  
    79  type client struct {
    80  	stateDir string
    81  	backend  libcontainerdtypes.Backend
    82  	logger   *log.Entry
    83  	eventQ   queue.Queue
    84  }
    85  
    86  // NewClient creates a new local executor for windows
    87  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    88  	c := &client{
    89  		stateDir: stateDir,
    90  		backend:  b,
    91  		logger:   log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
    92  	}
    93  
    94  	return c, nil
    95  }
    96  
    97  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    98  	return containerd.Version{}, errors.New("not implemented on Windows")
    99  }
   100  
   101  // NewContainer is the entrypoint to create a container from a spec.
   102  // Table below shows the fields required for HCS JSON calling parameters,
   103  // where if not populated, is omitted.
   104  // +-----------------+--------------------------------------------+---------------------------------------------------+
   105  // |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
   106  // +-----------------+--------------------------------------------+---------------------------------------------------+
   107  // | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
   108  // | LayerFolderPath | %root%\windowsfilter\containerID           |                                                   |
   109  // | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
   110  // | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
   111  // +-----------------+--------------------------------------------+---------------------------------------------------+
   112  //
   113  // Isolation=Process example:
   114  //
   115  //	{
   116  //		"SystemType": "Container",
   117  //		"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   118  //		"Owner": "docker",
   119  //		"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
   120  //		"IgnoreFlushesDuringBoot": true,
   121  //		"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
   122  //		"Layers": [{
   123  //			"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   124  //			"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   125  //		}],
   126  //		"HostName": "5e0055c814a6",
   127  //		"MappedDirectories": [],
   128  //		"HvPartition": false,
   129  //		"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
   130  //	}
   131  //
   132  // Isolation=Hyper-V example:
   133  //
   134  //	{
   135  //		"SystemType": "Container",
   136  //		"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
   137  //		"Owner": "docker",
   138  //		"IgnoreFlushesDuringBoot": true,
   139  //		"Layers": [{
   140  //			"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
   141  //			"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
   142  //		}],
   143  //		"HostName": "475c2c58933b",
   144  //		"MappedDirectories": [],
   145  //		"HvPartition": true,
   146  //		"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
   147  //		"DNSSearchList": "a.com,b.com,c.com",
   148  //		"HvRuntime": {
   149  //			"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
   150  //		},
   151  //	}
   152  func (c *client) NewContainer(_ context.Context, id string, spec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
   153  	var err error
   154  	if spec.Linux != nil {
   155  		return nil, errors.New("linux containers are not supported on this platform")
   156  	}
   157  	ctr, err := c.createWindows(id, spec, runtimeOptions)
   158  
   159  	if err == nil {
   160  		c.eventQ.Append(id, func() {
   161  			ei := libcontainerdtypes.EventInfo{
   162  				ContainerID: id,
   163  			}
   164  			c.logger.WithFields(log.Fields{
   165  				"container": id,
   166  				"event":     libcontainerdtypes.EventCreate,
   167  			}).Info("sending event")
   168  			err := c.backend.ProcessEvent(id, libcontainerdtypes.EventCreate, ei)
   169  			if err != nil {
   170  				c.logger.WithError(err).WithFields(log.Fields{
   171  					"container": id,
   172  					"event":     libcontainerdtypes.EventCreate,
   173  				}).Error("failed to process event")
   174  			}
   175  		})
   176  	}
   177  	return ctr, err
   178  }
   179  
   180  func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) (*container, error) {
   181  	logger := c.logger.WithField("container", id)
   182  	configuration := &hcsshim.ContainerConfig{
   183  		SystemType:              "Container",
   184  		Name:                    id,
   185  		Owner:                   defaultOwner,
   186  		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
   187  		HostName:                spec.Hostname,
   188  		HvPartition:             false,
   189  	}
   190  
   191  	c.extractResourcesFromSpec(spec, configuration)
   192  
   193  	if spec.Windows.Resources != nil {
   194  		if spec.Windows.Resources.Storage != nil {
   195  			if spec.Windows.Resources.Storage.Bps != nil {
   196  				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
   197  			}
   198  			if spec.Windows.Resources.Storage.Iops != nil {
   199  				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
   200  			}
   201  		}
   202  	}
   203  
   204  	if spec.Windows.HyperV != nil {
   205  		configuration.HvPartition = true
   206  	}
   207  
   208  	if spec.Windows.Network != nil {
   209  		configuration.EndpointList = spec.Windows.Network.EndpointList
   210  		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
   211  		if spec.Windows.Network.DNSSearchList != nil {
   212  			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
   213  		}
   214  		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
   215  	}
   216  
   217  	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
   218  		configuration.Credentials = cs
   219  	}
   220  
   221  	// We must have least two layers in the spec, the bottom one being a
   222  	// base image, the top one being the RW layer.
   223  	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
   224  		return nil, fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
   225  	}
   226  
   227  	// Strip off the top-most layer as that's passed in separately to HCS
   228  	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
   229  	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
   230  
   231  	if configuration.HvPartition {
   232  		// We don't currently support setting the utility VM image explicitly.
   233  		// TODO circa RS5, this may be re-locatable.
   234  		if spec.Windows.HyperV.UtilityVMPath != "" {
   235  			return nil, errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
   236  		}
   237  
   238  		// Find the upper-most utility VM image.
   239  		var uvmImagePath string
   240  		for _, path := range layerFolders {
   241  			fullPath := filepath.Join(path, "UtilityVM")
   242  			_, err := os.Stat(fullPath)
   243  			if err == nil {
   244  				uvmImagePath = fullPath
   245  				break
   246  			}
   247  			if !os.IsNotExist(err) {
   248  				return nil, err
   249  			}
   250  		}
   251  		if uvmImagePath == "" {
   252  			return nil, errors.New("utility VM image could not be found")
   253  		}
   254  		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
   255  
   256  		if spec.Root.Path != "" {
   257  			return nil, errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
   258  		}
   259  	} else {
   260  		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
   261  		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
   262  			return nil, fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
   263  		}
   264  		// HCS API requires the trailing backslash to be removed
   265  		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
   266  	}
   267  
   268  	if spec.Root.Readonly {
   269  		return nil, errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
   270  	}
   271  
   272  	for _, layerPath := range layerFolders {
   273  		_, filename := filepath.Split(layerPath)
   274  		g, err := hcsshim.NameToGuid(filename)
   275  		if err != nil {
   276  			return nil, err
   277  		}
   278  		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
   279  			ID:   g.ToString(),
   280  			Path: layerPath,
   281  		})
   282  	}
   283  
   284  	// Add the mounts (volumes, bind mounts etc) to the structure
   285  	var mds []hcsshim.MappedDir
   286  	var mps []hcsshim.MappedPipe
   287  	for _, mount := range spec.Mounts {
   288  		const pipePrefix = `\\.\pipe\`
   289  		if mount.Type != "" {
   290  			return nil, fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
   291  		}
   292  		if strings.HasPrefix(mount.Destination, pipePrefix) {
   293  			mp := hcsshim.MappedPipe{
   294  				HostPath:          mount.Source,
   295  				ContainerPipeName: mount.Destination[len(pipePrefix):],
   296  			}
   297  			mps = append(mps, mp)
   298  		} else {
   299  			md := hcsshim.MappedDir{
   300  				HostPath:      mount.Source,
   301  				ContainerPath: mount.Destination,
   302  				ReadOnly:      false,
   303  			}
   304  			for _, o := range mount.Options {
   305  				if strings.ToLower(o) == "ro" {
   306  					md.ReadOnly = true
   307  				}
   308  			}
   309  			mds = append(mds, md)
   310  		}
   311  	}
   312  	configuration.MappedDirectories = mds
   313  	configuration.MappedPipes = mps
   314  
   315  	if len(spec.Windows.Devices) > 0 {
   316  		// Add any device assignments
   317  		if configuration.HvPartition {
   318  			return nil, errors.New("device assignment is not supported for HyperV containers")
   319  		}
   320  		for _, d := range spec.Windows.Devices {
   321  			// Per https://github.com/microsoft/hcsshim/blob/v0.9.2/internal/uvm/virtual_device.go#L17-L18,
   322  			// these represent an Interface Class GUID.
   323  			if d.IDType != "class" && d.IDType != "vpci-class-guid" {
   324  				return nil, errors.Errorf("device assignment of type '%s' is not supported", d.IDType)
   325  			}
   326  			configuration.AssignedDevices = append(configuration.AssignedDevices, hcsshim.AssignedDevice{InterfaceClassGUID: d.ID})
   327  		}
   328  	}
   329  
   330  	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
   331  	if err != nil {
   332  		return nil, err
   333  	}
   334  
   335  	// Construct a container object for calling start on it.
   336  	ctr := &container{
   337  		client:       c,
   338  		id:           id,
   339  		ociSpec:      spec,
   340  		hcsContainer: hcsContainer,
   341  	}
   342  
   343  	logger.Debug("starting container")
   344  	if err := ctr.hcsContainer.Start(); err != nil {
   345  		logger.WithError(err).Error("failed to start container")
   346  		ctr.mu.Lock()
   347  		if err := ctr.terminateContainer(); err != nil {
   348  			logger.WithError(err).Error("failed to cleanup after a failed Start")
   349  		} else {
   350  			logger.Debug("cleaned up after failed Start by calling Terminate")
   351  		}
   352  		ctr.mu.Unlock()
   353  		return nil, err
   354  	}
   355  
   356  	logger.Debug("createWindows() completed successfully")
   357  	return ctr, nil
   358  }
   359  
   360  func (c *client) extractResourcesFromSpec(spec *specs.Spec, configuration *hcsshim.ContainerConfig) {
   361  	if spec.Windows.Resources != nil {
   362  		if spec.Windows.Resources.CPU != nil {
   363  			if spec.Windows.Resources.CPU.Count != nil {
   364  				// This check is being done here rather than in adaptContainerSettings
   365  				// because we don't want to update the HostConfig in case this container
   366  				// is moved to a host with more CPUs than this one.
   367  				cpuCount := *spec.Windows.Resources.CPU.Count
   368  				hostCPUCount := uint64(sysinfo.NumCPU())
   369  				if cpuCount > hostCPUCount {
   370  					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
   371  					cpuCount = hostCPUCount
   372  				}
   373  				configuration.ProcessorCount = uint32(cpuCount)
   374  			}
   375  			if spec.Windows.Resources.CPU.Shares != nil {
   376  				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
   377  			}
   378  			if spec.Windows.Resources.CPU.Maximum != nil {
   379  				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
   380  			}
   381  		}
   382  		if spec.Windows.Resources.Memory != nil {
   383  			if spec.Windows.Resources.Memory.Limit != nil {
   384  				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
   385  			}
   386  		}
   387  	}
   388  }
   389  
   390  func (ctr *container) Start(_ context.Context, _ string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, retErr error) {
   391  	ctr.mu.Lock()
   392  	defer ctr.mu.Unlock()
   393  
   394  	switch {
   395  	case ctr.ociSpec == nil:
   396  		return nil, errors.WithStack(errdefs.NotImplemented(errors.New("a restored container cannot be started")))
   397  	case ctr.task != nil:
   398  		return nil, errors.WithStack(errdefs.NotModified(cerrdefs.ErrAlreadyExists))
   399  	}
   400  
   401  	logger := ctr.client.logger.WithField("container", ctr.id)
   402  
   403  	// Note we always tell HCS to create stdout as it's required
   404  	// regardless of '-i' or '-t' options, so that docker can always grab
   405  	// the output through logs. We also tell HCS to always create stdin,
   406  	// even if it's not used - it will be closed shortly. Stderr is only
   407  	// created if it we're not -t.
   408  	var (
   409  		emulateConsole   bool
   410  		createStdErrPipe bool
   411  	)
   412  	if ctr.ociSpec.Process != nil {
   413  		emulateConsole = ctr.ociSpec.Process.Terminal
   414  		createStdErrPipe = !ctr.ociSpec.Process.Terminal
   415  	}
   416  
   417  	createProcessParms := &hcsshim.ProcessConfig{
   418  		EmulateConsole:   emulateConsole,
   419  		WorkingDirectory: ctr.ociSpec.Process.Cwd,
   420  		CreateStdInPipe:  true,
   421  		CreateStdOutPipe: true,
   422  		CreateStdErrPipe: createStdErrPipe,
   423  	}
   424  
   425  	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
   426  		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
   427  		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
   428  	}
   429  
   430  	// Configure the environment for the process
   431  	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
   432  
   433  	// Configure the CommandLine/CommandArgs
   434  	setCommandLineAndArgs(ctr.ociSpec.Process, createProcessParms)
   435  	logger.Debugf("start commandLine: %s", createProcessParms.CommandLine)
   436  
   437  	createProcessParms.User = ctr.ociSpec.Process.User.Username
   438  
   439  	// Start the command running in the container.
   440  	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
   441  	if err != nil {
   442  		logger.WithError(err).Error("CreateProcess() failed")
   443  		return nil, err
   444  	}
   445  
   446  	defer func() {
   447  		if retErr != nil {
   448  			if err := newProcess.Kill(); err != nil {
   449  				logger.WithError(err).Error("failed to kill process")
   450  			}
   451  			go func() {
   452  				if err := newProcess.Wait(); err != nil {
   453  					logger.WithError(err).Error("failed to wait for process")
   454  				}
   455  				if err := newProcess.Close(); err != nil {
   456  					logger.WithError(err).Error("failed to clean process resources")
   457  				}
   458  			}()
   459  		}
   460  	}()
   461  
   462  	pid := newProcess.Pid()
   463  	logger.WithField("pid", pid).Debug("init process started")
   464  
   465  	dio, err := newIOFromProcess(newProcess, ctr.ociSpec.Process.Terminal)
   466  	if err != nil {
   467  		logger.WithError(err).Error("failed to get stdio pipes")
   468  		return nil, err
   469  	}
   470  	_, err = attachStdio(dio)
   471  	if err != nil {
   472  		logger.WithError(err).Error("failed to attach stdio")
   473  		return nil, err
   474  	}
   475  
   476  	t := &task{process{
   477  		id:         ctr.id,
   478  		ctr:        ctr,
   479  		hcsProcess: newProcess,
   480  		waitCh:     make(chan struct{}),
   481  	}}
   482  
   483  	// All fallible operations have succeeded so it is now safe to set the
   484  	// container's current task.
   485  	ctr.task = t
   486  
   487  	// Spin up a goroutine to notify the backend and clean up resources when
   488  	// the task exits. Defer until after the start event is sent so that the
   489  	// exit event is not sent out-of-order.
   490  	defer func() { go t.reap() }()
   491  
   492  	// Generate the associated event
   493  	ctr.client.eventQ.Append(ctr.id, func() {
   494  		ei := libcontainerdtypes.EventInfo{
   495  			ContainerID: ctr.id,
   496  			ProcessID:   t.id,
   497  			Pid:         uint32(pid),
   498  		}
   499  		ctr.client.logger.WithFields(log.Fields{
   500  			"container":  ctr.id,
   501  			"event":      libcontainerdtypes.EventStart,
   502  			"event-info": ei,
   503  		}).Info("sending event")
   504  		err := ctr.client.backend.ProcessEvent(ei.ContainerID, libcontainerdtypes.EventStart, ei)
   505  		if err != nil {
   506  			ctr.client.logger.WithError(err).WithFields(log.Fields{
   507  				"container":  ei.ContainerID,
   508  				"event":      libcontainerdtypes.EventStart,
   509  				"event-info": ei,
   510  			}).Error("failed to process event")
   511  		}
   512  	})
   513  	logger.Debug("start() completed")
   514  	return t, nil
   515  }
   516  
   517  func (ctr *container) Task(context.Context) (libcontainerdtypes.Task, error) {
   518  	ctr.mu.Lock()
   519  	defer ctr.mu.Unlock()
   520  	if ctr.task == nil {
   521  		return nil, errdefs.NotFound(cerrdefs.ErrNotFound)
   522  	}
   523  	return ctr.task, nil
   524  }
   525  
   526  // setCommandLineAndArgs configures the HCS ProcessConfig based on an OCI process spec
   527  func setCommandLineAndArgs(process *specs.Process, createProcessParms *hcsshim.ProcessConfig) {
   528  	if process.CommandLine != "" {
   529  		createProcessParms.CommandLine = process.CommandLine
   530  	} else {
   531  		createProcessParms.CommandLine = system.EscapeArgs(process.Args)
   532  	}
   533  }
   534  
   535  func newIOFromProcess(newProcess hcsshim.Process, terminal bool) (*cio.DirectIO, error) {
   536  	stdin, stdout, stderr, err := newProcess.Stdio()
   537  	if err != nil {
   538  		return nil, err
   539  	}
   540  
   541  	dio := cio.NewDirectIO(createStdInCloser(stdin, newProcess), nil, nil, terminal)
   542  
   543  	// Convert io.ReadClosers to io.Readers
   544  	if stdout != nil {
   545  		dio.Stdout = io.NopCloser(&autoClosingReader{ReadCloser: stdout})
   546  	}
   547  	if stderr != nil {
   548  		dio.Stderr = io.NopCloser(&autoClosingReader{ReadCloser: stderr})
   549  	}
   550  	return dio, nil
   551  }
   552  
   553  // Exec launches a process in a running container.
   554  //
   555  // The processID argument is entirely informational. As there is no mechanism
   556  // (exposed through the libcontainerd interfaces) to enumerate or reference an
   557  // exec'd process by ID, uniqueness is not currently enforced.
   558  func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Process, retErr error) {
   559  	hcsContainer, err := t.getHCSContainer()
   560  	if err != nil {
   561  		return nil, err
   562  	}
   563  	logger := t.ctr.client.logger.WithFields(log.Fields{
   564  		"container": t.ctr.id,
   565  		"exec":      processID,
   566  	})
   567  
   568  	// Note we always tell HCS to
   569  	// create stdout as it's required regardless of '-i' or '-t' options, so that
   570  	// docker can always grab the output through logs. We also tell HCS to always
   571  	// create stdin, even if it's not used - it will be closed shortly. Stderr
   572  	// is only created if it we're not -t.
   573  	createProcessParms := &hcsshim.ProcessConfig{
   574  		CreateStdInPipe:  true,
   575  		CreateStdOutPipe: true,
   576  		CreateStdErrPipe: !spec.Terminal,
   577  	}
   578  	if spec.Terminal {
   579  		createProcessParms.EmulateConsole = true
   580  		if spec.ConsoleSize != nil {
   581  			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
   582  			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
   583  		}
   584  	}
   585  
   586  	// Take working directory from the process to add if it is defined,
   587  	// otherwise take from the first process.
   588  	if spec.Cwd != "" {
   589  		createProcessParms.WorkingDirectory = spec.Cwd
   590  	} else {
   591  		createProcessParms.WorkingDirectory = t.ctr.ociSpec.Process.Cwd
   592  	}
   593  
   594  	// Configure the environment for the process
   595  	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
   596  
   597  	// Configure the CommandLine/CommandArgs
   598  	setCommandLineAndArgs(spec, createProcessParms)
   599  	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
   600  
   601  	createProcessParms.User = spec.User.Username
   602  
   603  	// Start the command running in the container.
   604  	newProcess, err := hcsContainer.CreateProcess(createProcessParms)
   605  	if err != nil {
   606  		logger.WithError(err).Errorf("exec's CreateProcess() failed")
   607  		return nil, err
   608  	}
   609  	defer func() {
   610  		if retErr != nil {
   611  			if err := newProcess.Kill(); err != nil {
   612  				logger.WithError(err).Error("failed to kill process")
   613  			}
   614  			go func() {
   615  				if err := newProcess.Wait(); err != nil {
   616  					logger.WithError(err).Error("failed to wait for process")
   617  				}
   618  				if err := newProcess.Close(); err != nil {
   619  					logger.WithError(err).Error("failed to clean process resources")
   620  				}
   621  			}()
   622  		}
   623  	}()
   624  
   625  	dio, err := newIOFromProcess(newProcess, spec.Terminal)
   626  	if err != nil {
   627  		logger.WithError(err).Error("failed to get stdio pipes")
   628  		return nil, err
   629  	}
   630  	// Tell the engine to attach streams back to the client
   631  	_, err = attachStdio(dio)
   632  	if err != nil {
   633  		return nil, err
   634  	}
   635  
   636  	p := &process{
   637  		id:         processID,
   638  		ctr:        t.ctr,
   639  		hcsProcess: newProcess,
   640  		waitCh:     make(chan struct{}),
   641  	}
   642  
   643  	// Spin up a goroutine to notify the backend and clean up resources when
   644  	// the process exits. Defer until after the start event is sent so that
   645  	// the exit event is not sent out-of-order.
   646  	defer func() { go p.reap() }()
   647  
   648  	pid := newProcess.Pid()
   649  	t.ctr.client.eventQ.Append(t.ctr.id, func() {
   650  		ei := libcontainerdtypes.EventInfo{
   651  			ContainerID: t.ctr.id,
   652  			ProcessID:   p.id,
   653  			Pid:         uint32(pid),
   654  		}
   655  		t.ctr.client.logger.WithFields(log.Fields{
   656  			"container":  t.ctr.id,
   657  			"event":      libcontainerdtypes.EventExecAdded,
   658  			"event-info": ei,
   659  		}).Info("sending event")
   660  		err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecAdded, ei)
   661  		if err != nil {
   662  			t.ctr.client.logger.WithError(err).WithFields(log.Fields{
   663  				"container":  t.ctr.id,
   664  				"event":      libcontainerdtypes.EventExecAdded,
   665  				"event-info": ei,
   666  			}).Error("failed to process event")
   667  		}
   668  		err = t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventExecStarted, ei)
   669  		if err != nil {
   670  			t.ctr.client.logger.WithError(err).WithFields(log.Fields{
   671  				"container":  t.ctr.id,
   672  				"event":      libcontainerdtypes.EventExecStarted,
   673  				"event-info": ei,
   674  			}).Error("failed to process event")
   675  		}
   676  	})
   677  
   678  	return p, nil
   679  }
   680  
   681  func (p *process) Pid() uint32 {
   682  	p.mu.Lock()
   683  	hcsProcess := p.hcsProcess
   684  	p.mu.Unlock()
   685  	if hcsProcess == nil {
   686  		return 0
   687  	}
   688  	return uint32(hcsProcess.Pid())
   689  }
   690  
   691  func (p *process) Kill(_ context.Context, signal syscall.Signal) error {
   692  	p.mu.Lock()
   693  	hcsProcess := p.hcsProcess
   694  	p.mu.Unlock()
   695  	if hcsProcess == nil {
   696  		return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
   697  	}
   698  	return hcsProcess.Kill()
   699  }
   700  
   701  // Kill handles `docker stop` on Windows. While Linux has support for
   702  // the full range of signals, signals aren't really implemented on Windows.
   703  // We fake supporting regular stop and -9 to force kill.
   704  func (t *task) Kill(_ context.Context, signal syscall.Signal) error {
   705  	hcsContainer, err := t.getHCSContainer()
   706  	if err != nil {
   707  		return err
   708  	}
   709  
   710  	logger := t.ctr.client.logger.WithFields(log.Fields{
   711  		"container": t.ctr.id,
   712  		"process":   t.id,
   713  		"pid":       t.Pid(),
   714  		"signal":    signal,
   715  	})
   716  	logger.Debug("Signal()")
   717  
   718  	var op string
   719  	if signal == syscall.SIGKILL {
   720  		// Terminate the compute system
   721  		t.ctr.mu.Lock()
   722  		t.ctr.terminateInvoked = true
   723  		t.ctr.mu.Unlock()
   724  		op, err = "terminate", hcsContainer.Terminate()
   725  	} else {
   726  		// Shut down the container
   727  		op, err = "shutdown", hcsContainer.Shutdown()
   728  	}
   729  	if err != nil {
   730  		if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
   731  			// ignore errors
   732  			logger.WithError(err).Errorf("failed to %s hccshim container", op)
   733  		}
   734  	}
   735  
   736  	return nil
   737  }
   738  
   739  // Resize handles a CLI event to resize an interactive docker run or docker
   740  // exec window.
   741  func (p *process) Resize(_ context.Context, width, height uint32) error {
   742  	p.mu.Lock()
   743  	hcsProcess := p.hcsProcess
   744  	p.mu.Unlock()
   745  	if hcsProcess == nil {
   746  		return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
   747  	}
   748  
   749  	p.ctr.client.logger.WithFields(log.Fields{
   750  		"container": p.ctr.id,
   751  		"process":   p.id,
   752  		"height":    height,
   753  		"width":     width,
   754  		"pid":       hcsProcess.Pid(),
   755  	}).Debug("resizing")
   756  	return hcsProcess.ResizeConsole(uint16(width), uint16(height))
   757  }
   758  
   759  func (p *process) CloseStdin(context.Context) error {
   760  	p.mu.Lock()
   761  	hcsProcess := p.hcsProcess
   762  	p.mu.Unlock()
   763  	if hcsProcess == nil {
   764  		return errors.WithStack(errdefs.NotFound(errors.New("process not found")))
   765  	}
   766  
   767  	return hcsProcess.CloseStdin()
   768  }
   769  
   770  // Pause handles pause requests for containers
   771  func (t *task) Pause(_ context.Context) error {
   772  	if t.ctr.ociSpec.Windows.HyperV == nil {
   773  		return cerrdefs.ErrNotImplemented
   774  	}
   775  
   776  	t.ctr.mu.Lock()
   777  	defer t.ctr.mu.Unlock()
   778  
   779  	if err := t.assertIsCurrentTask(); err != nil {
   780  		return err
   781  	}
   782  	if t.ctr.hcsContainer == nil {
   783  		return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
   784  	}
   785  	if err := t.ctr.hcsContainer.Pause(); err != nil {
   786  		return err
   787  	}
   788  
   789  	t.ctr.isPaused = true
   790  
   791  	t.ctr.client.eventQ.Append(t.ctr.id, func() {
   792  		err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
   793  			ContainerID: t.ctr.id,
   794  			ProcessID:   t.id,
   795  		})
   796  		t.ctr.client.logger.WithFields(log.Fields{
   797  			"container": t.ctr.id,
   798  			"event":     libcontainerdtypes.EventPaused,
   799  		}).Info("sending event")
   800  		if err != nil {
   801  			t.ctr.client.logger.WithError(err).WithFields(log.Fields{
   802  				"container": t.ctr.id,
   803  				"event":     libcontainerdtypes.EventPaused,
   804  			}).Error("failed to process event")
   805  		}
   806  	})
   807  
   808  	return nil
   809  }
   810  
   811  // Resume handles resume requests for containers
   812  func (t *task) Resume(ctx context.Context) error {
   813  	if t.ctr.ociSpec.Windows.HyperV == nil {
   814  		return errors.New("cannot resume Windows Server Containers")
   815  	}
   816  
   817  	t.ctr.mu.Lock()
   818  	defer t.ctr.mu.Unlock()
   819  
   820  	if err := t.assertIsCurrentTask(); err != nil {
   821  		return err
   822  	}
   823  	if t.ctr.hcsContainer == nil {
   824  		return errdefs.NotFound(errors.WithStack(fmt.Errorf("container %q not found", t.ctr.id)))
   825  	}
   826  	if err := t.ctr.hcsContainer.Resume(); err != nil {
   827  		return err
   828  	}
   829  
   830  	t.ctr.isPaused = false
   831  
   832  	t.ctr.client.eventQ.Append(t.ctr.id, func() {
   833  		err := t.ctr.client.backend.ProcessEvent(t.ctr.id, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
   834  			ContainerID: t.ctr.id,
   835  			ProcessID:   t.id,
   836  		})
   837  		t.ctr.client.logger.WithFields(log.Fields{
   838  			"container": t.ctr.id,
   839  			"event":     libcontainerdtypes.EventResumed,
   840  		}).Info("sending event")
   841  		if err != nil {
   842  			t.ctr.client.logger.WithError(err).WithFields(log.Fields{
   843  				"container": t.ctr.id,
   844  				"event":     libcontainerdtypes.EventResumed,
   845  			}).Error("failed to process event")
   846  		}
   847  	})
   848  
   849  	return nil
   850  }
   851  
   852  // Stats handles stats requests for containers
   853  func (t *task) Stats(_ context.Context) (*libcontainerdtypes.Stats, error) {
   854  	hc, err := t.getHCSContainer()
   855  	if err != nil {
   856  		return nil, err
   857  	}
   858  
   859  	readAt := time.Now()
   860  	s, err := hc.Statistics()
   861  	if err != nil {
   862  		return nil, err
   863  	}
   864  	return &libcontainerdtypes.Stats{
   865  		Read:     readAt,
   866  		HCSStats: &s,
   867  	}, nil
   868  }
   869  
   870  // LoadContainer is the handler for restoring a container
   871  func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
   872  	c.logger.WithField("container", id).Debug("LoadContainer()")
   873  
   874  	// TODO Windows: On RS1, a re-attach isn't possible.
   875  	// However, there is a scenario in which there is an issue.
   876  	// Consider a background container. The daemon dies unexpectedly.
   877  	// HCS will still have the compute service alive and running.
   878  	// For consistence, we call in to shoot it regardless if HCS knows about it
   879  	// We explicitly just log a warning if the terminate fails.
   880  	// Then we tell the backend the container exited.
   881  	hc, err := hcsshim.OpenContainer(id)
   882  	if err != nil {
   883  		return nil, errdefs.NotFound(errors.New("container not found"))
   884  	}
   885  	const terminateTimeout = time.Minute * 2
   886  	err = hc.Terminate()
   887  
   888  	if hcsshim.IsPending(err) {
   889  		err = hc.WaitTimeout(terminateTimeout)
   890  	} else if hcsshim.IsAlreadyStopped(err) {
   891  		err = nil
   892  	}
   893  
   894  	if err != nil {
   895  		c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
   896  		return nil, err
   897  	}
   898  	return &container{
   899  		client:       c,
   900  		hcsContainer: hc,
   901  		id:           id,
   902  	}, nil
   903  }
   904  
   905  // AttachTask is only called by the daemon when restoring containers. As
   906  // re-attach isn't possible (see LoadContainer), a NotFound error is
   907  // unconditionally returned to allow restore to make progress.
   908  func (*container) AttachTask(context.Context, libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
   909  	return nil, errdefs.NotFound(cerrdefs.ErrNotImplemented)
   910  }
   911  
   912  // Pids returns a list of process IDs running in a container. It is not
   913  // implemented on Windows.
   914  func (t *task) Pids(context.Context) ([]containerd.ProcessInfo, error) {
   915  	return nil, errors.New("not implemented on Windows")
   916  }
   917  
   918  // Summary returns a summary of the processes running in a container.
   919  // This is present in Windows to support docker top. In linux, the
   920  // engine shells out to ps to get process information. On Windows, as
   921  // the containers could be Hyper-V containers, they would not be
   922  // visible on the container host. However, libcontainerd does have
   923  // that information.
   924  func (t *task) Summary(_ context.Context) ([]libcontainerdtypes.Summary, error) {
   925  	hc, err := t.getHCSContainer()
   926  	if err != nil {
   927  		return nil, err
   928  	}
   929  
   930  	p, err := hc.ProcessList()
   931  	if err != nil {
   932  		return nil, err
   933  	}
   934  
   935  	pl := make([]libcontainerdtypes.Summary, len(p))
   936  	for i := range p {
   937  		pl[i] = libcontainerdtypes.Summary{
   938  			ImageName:                    p[i].ImageName,
   939  			CreatedAt:                    p[i].CreateTimestamp,
   940  			KernelTime_100Ns:             p[i].KernelTime100ns,
   941  			MemoryCommitBytes:            p[i].MemoryCommitBytes,
   942  			MemoryWorkingSetPrivateBytes: p[i].MemoryWorkingSetPrivateBytes,
   943  			MemoryWorkingSetSharedBytes:  p[i].MemoryWorkingSetSharedBytes,
   944  			ProcessID:                    p[i].ProcessId,
   945  			UserTime_100Ns:               p[i].UserTime100ns,
   946  			ExecID:                       "",
   947  		}
   948  	}
   949  	return pl, nil
   950  }
   951  
   952  func (p *process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   953  	select {
   954  	case <-ctx.Done():
   955  		return nil, errors.WithStack(ctx.Err())
   956  	case <-p.waitCh:
   957  	default:
   958  		return nil, errdefs.Conflict(errors.New("process is running"))
   959  	}
   960  	return p.exited, nil
   961  }
   962  
   963  func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   964  	select {
   965  	case <-ctx.Done():
   966  		return nil, errors.WithStack(ctx.Err())
   967  	case <-t.waitCh:
   968  	default:
   969  		return nil, errdefs.Conflict(errors.New("container is not stopped"))
   970  	}
   971  
   972  	t.ctr.mu.Lock()
   973  	defer t.ctr.mu.Unlock()
   974  	if err := t.assertIsCurrentTask(); err != nil {
   975  		return nil, err
   976  	}
   977  	t.ctr.task = nil
   978  	return t.exited, nil
   979  }
   980  
   981  func (t *task) ForceDelete(ctx context.Context) error {
   982  	select {
   983  	case <-t.waitCh: // Task is already stopped.
   984  		_, err := t.Delete(ctx)
   985  		return err
   986  	default:
   987  	}
   988  
   989  	if err := t.Kill(ctx, syscall.SIGKILL); err != nil {
   990  		return errors.Wrap(err, "could not force-kill task")
   991  	}
   992  
   993  	select {
   994  	case <-ctx.Done():
   995  		return ctx.Err()
   996  	case <-t.waitCh:
   997  		_, err := t.Delete(ctx)
   998  		return err
   999  	}
  1000  }
  1001  
  1002  func (t *task) Status(ctx context.Context) (containerd.Status, error) {
  1003  	select {
  1004  	case <-t.waitCh:
  1005  		return containerd.Status{
  1006  			Status:     containerd.Stopped,
  1007  			ExitStatus: t.exited.ExitCode(),
  1008  			ExitTime:   t.exited.ExitTime(),
  1009  		}, nil
  1010  	default:
  1011  	}
  1012  
  1013  	t.ctr.mu.Lock()
  1014  	defer t.ctr.mu.Unlock()
  1015  	s := containerd.Running
  1016  	if t.ctr.isPaused {
  1017  		s = containerd.Paused
  1018  	}
  1019  	return containerd.Status{Status: s}, nil
  1020  }
  1021  
  1022  func (*task) UpdateResources(ctx context.Context, resources *libcontainerdtypes.Resources) error {
  1023  	// Updating resource isn't supported on Windows
  1024  	// but we should return nil for enabling updating container
  1025  	return nil
  1026  }
  1027  
  1028  func (*task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
  1029  	return errors.New("Windows: Containers do not support checkpoints")
  1030  }
  1031  
  1032  // assertIsCurrentTask returns a non-nil error if the task has been deleted.
  1033  func (t *task) assertIsCurrentTask() error {
  1034  	if t.ctr.task != t {
  1035  		return errors.WithStack(errdefs.NotFound(fmt.Errorf("task %q not found", t.id)))
  1036  	}
  1037  	return nil
  1038  }
  1039  
  1040  // getHCSContainer returns a reference to the hcsshim Container for the task's
  1041  // container if neither the task nor container have been deleted.
  1042  //
  1043  // t.ctr.mu must not be locked by the calling goroutine when calling this
  1044  // function.
  1045  func (t *task) getHCSContainer() (hcsshim.Container, error) {
  1046  	t.ctr.mu.Lock()
  1047  	defer t.ctr.mu.Unlock()
  1048  	if err := t.assertIsCurrentTask(); err != nil {
  1049  		return nil, err
  1050  	}
  1051  	hc := t.ctr.hcsContainer
  1052  	if hc == nil {
  1053  		return nil, errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", t.ctr.id)))
  1054  	}
  1055  	return hc, nil
  1056  }
  1057  
  1058  // ctr mutex must be held when calling this function.
  1059  func (ctr *container) shutdownContainer() error {
  1060  	var err error
  1061  	const waitTimeout = time.Minute * 5
  1062  
  1063  	if !ctr.terminateInvoked {
  1064  		err = ctr.hcsContainer.Shutdown()
  1065  	}
  1066  
  1067  	if hcsshim.IsPending(err) || ctr.terminateInvoked {
  1068  		err = ctr.hcsContainer.WaitTimeout(waitTimeout)
  1069  	} else if hcsshim.IsAlreadyStopped(err) {
  1070  		err = nil
  1071  	}
  1072  
  1073  	if err != nil {
  1074  		ctr.client.logger.WithError(err).WithField("container", ctr.id).
  1075  			Debug("failed to shutdown container, terminating it")
  1076  		terminateErr := ctr.terminateContainer()
  1077  		if terminateErr != nil {
  1078  			ctr.client.logger.WithError(terminateErr).WithField("container", ctr.id).
  1079  				Error("failed to shutdown container, and subsequent terminate also failed")
  1080  			return fmt.Errorf("%s: subsequent terminate failed %s", err, terminateErr)
  1081  		}
  1082  		return err
  1083  	}
  1084  
  1085  	return nil
  1086  }
  1087  
  1088  // ctr mutex must be held when calling this function.
  1089  func (ctr *container) terminateContainer() error {
  1090  	const terminateTimeout = time.Minute * 5
  1091  	ctr.terminateInvoked = true
  1092  	err := ctr.hcsContainer.Terminate()
  1093  
  1094  	if hcsshim.IsPending(err) {
  1095  		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
  1096  	} else if hcsshim.IsAlreadyStopped(err) {
  1097  		err = nil
  1098  	}
  1099  
  1100  	if err != nil {
  1101  		ctr.client.logger.WithError(err).WithField("container", ctr.id).
  1102  			Debug("failed to terminate container")
  1103  		return err
  1104  	}
  1105  
  1106  	return nil
  1107  }
  1108  
  1109  func (p *process) reap() {
  1110  	logger := p.ctr.client.logger.WithFields(log.Fields{
  1111  		"container": p.ctr.id,
  1112  		"process":   p.id,
  1113  	})
  1114  
  1115  	var eventErr error
  1116  
  1117  	// Block indefinitely for the process to exit.
  1118  	if err := p.hcsProcess.Wait(); err != nil {
  1119  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1120  			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
  1121  		}
  1122  		// Fall through here, do not return. This ensures we tell the
  1123  		// docker engine that the process/container has exited to avoid
  1124  		// a container being dropped on the floor.
  1125  	}
  1126  	exitedAt := time.Now()
  1127  
  1128  	exitCode, err := p.hcsProcess.ExitCode()
  1129  	if err != nil {
  1130  		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
  1131  			logger.WithError(err).Warnf("unable to get exit code for process")
  1132  		}
  1133  		// Since we got an error retrieving the exit code, make sure that the
  1134  		// code we return doesn't incorrectly indicate success.
  1135  		exitCode = -1
  1136  
  1137  		// Fall through here, do not return. This ensures we tell the
  1138  		// docker engine that the process/container has exited to avoid
  1139  		// a container being dropped on the floor.
  1140  	}
  1141  
  1142  	p.mu.Lock()
  1143  	hcsProcess := p.hcsProcess
  1144  	p.hcsProcess = nil
  1145  	p.mu.Unlock()
  1146  
  1147  	if err := hcsProcess.Close(); err != nil {
  1148  		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
  1149  		exitCode = -1
  1150  		eventErr = fmt.Errorf("hcsProcess.Close() failed %s", err)
  1151  	}
  1152  
  1153  	// Explicit locking is not required as reads from exited are
  1154  	// synchronized using waitCh.
  1155  	p.exited = containerd.NewExitStatus(uint32(exitCode), exitedAt, nil)
  1156  	close(p.waitCh)
  1157  
  1158  	p.ctr.client.eventQ.Append(p.ctr.id, func() {
  1159  		ei := libcontainerdtypes.EventInfo{
  1160  			ContainerID: p.ctr.id,
  1161  			ProcessID:   p.id,
  1162  			Pid:         uint32(hcsProcess.Pid()),
  1163  			ExitCode:    uint32(exitCode),
  1164  			ExitedAt:    exitedAt,
  1165  			Error:       eventErr,
  1166  		}
  1167  		p.ctr.client.logger.WithFields(log.Fields{
  1168  			"container":  p.ctr.id,
  1169  			"event":      libcontainerdtypes.EventExit,
  1170  			"event-info": ei,
  1171  		}).Info("sending event")
  1172  		err := p.ctr.client.backend.ProcessEvent(p.ctr.id, libcontainerdtypes.EventExit, ei)
  1173  		if err != nil {
  1174  			p.ctr.client.logger.WithError(err).WithFields(log.Fields{
  1175  				"container":  p.ctr.id,
  1176  				"event":      libcontainerdtypes.EventExit,
  1177  				"event-info": ei,
  1178  			}).Error("failed to process event")
  1179  		}
  1180  	})
  1181  }
  1182  
  1183  func (ctr *container) Delete(context.Context) error {
  1184  	ctr.mu.Lock()
  1185  	defer ctr.mu.Unlock()
  1186  
  1187  	if ctr.hcsContainer == nil {
  1188  		return errors.WithStack(errdefs.NotFound(fmt.Errorf("container %q not found", ctr.id)))
  1189  	}
  1190  
  1191  	// Check that there is no task currently running.
  1192  	if ctr.task != nil {
  1193  		select {
  1194  		case <-ctr.task.waitCh:
  1195  		default:
  1196  			return errors.WithStack(errdefs.Conflict(errors.New("container is not stopped")))
  1197  		}
  1198  	}
  1199  
  1200  	var (
  1201  		logger = ctr.client.logger.WithFields(log.Fields{
  1202  			"container": ctr.id,
  1203  		})
  1204  		thisErr error
  1205  	)
  1206  
  1207  	if err := ctr.shutdownContainer(); err != nil {
  1208  		logger.WithError(err).Warn("failed to shutdown container")
  1209  		thisErr = errors.Wrap(err, "failed to shutdown container")
  1210  	} else {
  1211  		logger.Debug("completed container shutdown")
  1212  	}
  1213  
  1214  	if err := ctr.hcsContainer.Close(); err != nil {
  1215  		logger.WithError(err).Error("failed to clean hcs container resources")
  1216  		thisErr = errors.Wrap(err, "failed to terminate container")
  1217  	}
  1218  
  1219  	ctr.hcsContainer = nil
  1220  	return thisErr
  1221  }