github.com/jwhonce/docker@v0.6.7-0.20190327063223-da823cf3a5a3/libcontainerd/remote/client.go (about)

     1  package remote // import "github.com/docker/docker/libcontainerd/remote"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"io"
     7  	"os"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"strings"
    12  	"sync"
    13  	"syscall"
    14  	"time"
    15  
    16  	"github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options"
    17  	"github.com/containerd/containerd"
    18  	apievents "github.com/containerd/containerd/api/events"
    19  	"github.com/containerd/containerd/api/types"
    20  	"github.com/containerd/containerd/archive"
    21  	"github.com/containerd/containerd/cio"
    22  	"github.com/containerd/containerd/content"
    23  	containerderrors "github.com/containerd/containerd/errdefs"
    24  	"github.com/containerd/containerd/events"
    25  	"github.com/containerd/containerd/images"
    26  	"github.com/containerd/containerd/runtime/linux/runctypes"
    27  	"github.com/containerd/typeurl"
    28  	"github.com/docker/docker/errdefs"
    29  	"github.com/docker/docker/libcontainerd/queue"
    30  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    31  
    32  	"github.com/docker/docker/pkg/ioutils"
    33  	v1 "github.com/opencontainers/image-spec/specs-go/v1"
    34  	specs "github.com/opencontainers/runtime-spec/specs-go"
    35  	"github.com/pkg/errors"
    36  	"github.com/sirupsen/logrus"
    37  	"google.golang.org/grpc/codes"
    38  	"google.golang.org/grpc/status"
    39  )
    40  
    41  type container struct {
    42  	mu sync.Mutex
    43  
    44  	bundleDir string
    45  	ctr       containerd.Container
    46  	task      containerd.Task
    47  	execs     map[string]containerd.Process
    48  	oomKilled bool
    49  }
    50  
    51  func (c *container) setTask(t containerd.Task) {
    52  	c.mu.Lock()
    53  	c.task = t
    54  	c.mu.Unlock()
    55  }
    56  
    57  func (c *container) getTask() containerd.Task {
    58  	c.mu.Lock()
    59  	t := c.task
    60  	c.mu.Unlock()
    61  	return t
    62  }
    63  
    64  func (c *container) addProcess(id string, p containerd.Process) {
    65  	c.mu.Lock()
    66  	if c.execs == nil {
    67  		c.execs = make(map[string]containerd.Process)
    68  	}
    69  	c.execs[id] = p
    70  	c.mu.Unlock()
    71  }
    72  
    73  func (c *container) deleteProcess(id string) {
    74  	c.mu.Lock()
    75  	delete(c.execs, id)
    76  	c.mu.Unlock()
    77  }
    78  
    79  func (c *container) getProcess(id string) containerd.Process {
    80  	c.mu.Lock()
    81  	p := c.execs[id]
    82  	c.mu.Unlock()
    83  	return p
    84  }
    85  
    86  func (c *container) setOOMKilled(killed bool) {
    87  	c.mu.Lock()
    88  	c.oomKilled = killed
    89  	c.mu.Unlock()
    90  }
    91  
    92  func (c *container) getOOMKilled() bool {
    93  	c.mu.Lock()
    94  	killed := c.oomKilled
    95  	c.mu.Unlock()
    96  	return killed
    97  }
    98  
    99  type client struct {
   100  	sync.RWMutex // protects containers map
   101  
   102  	client   *containerd.Client
   103  	stateDir string
   104  	logger   *logrus.Entry
   105  	ns       string
   106  
   107  	backend    libcontainerdtypes.Backend
   108  	eventQ     queue.Queue
   109  	containers map[string]*container
   110  }
   111  
   112  // NewClient creates a new libcontainerd client from a containerd client
   113  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
   114  	c := &client{
   115  		client:     cli,
   116  		stateDir:   stateDir,
   117  		logger:     logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
   118  		ns:         ns,
   119  		backend:    b,
   120  		containers: make(map[string]*container),
   121  	}
   122  
   123  	go c.processEventStream(ctx, ns)
   124  
   125  	return c, nil
   126  }
   127  
   128  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
   129  	return c.client.Version(ctx)
   130  }
   131  
   132  // Restore loads the containerd container.
   133  // It should not be called concurrently with any other operation for the given ID.
   134  func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (alive bool, pid int, err error) {
   135  	c.Lock()
   136  	_, ok := c.containers[id]
   137  	if ok {
   138  		c.Unlock()
   139  		return false, 0, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   140  	}
   141  
   142  	cntr := &container{}
   143  	c.containers[id] = cntr
   144  	cntr.mu.Lock()
   145  	defer cntr.mu.Unlock()
   146  
   147  	c.Unlock()
   148  
   149  	defer func() {
   150  		if err != nil {
   151  			c.Lock()
   152  			delete(c.containers, id)
   153  			c.Unlock()
   154  		}
   155  	}()
   156  
   157  	var dio *cio.DirectIO
   158  	defer func() {
   159  		if err != nil && dio != nil {
   160  			dio.Cancel()
   161  			dio.Close()
   162  		}
   163  		err = wrapError(err)
   164  	}()
   165  
   166  	ctr, err := c.client.LoadContainer(ctx, id)
   167  	if err != nil {
   168  		return false, -1, errors.WithStack(wrapError(err))
   169  	}
   170  
   171  	attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
   172  		// dio must be assigned to the previously defined dio for the defer above
   173  		// to handle cleanup
   174  
   175  		dio, err = c.newDirectIO(ctx, fifos)
   176  		if err != nil {
   177  			return nil, err
   178  		}
   179  		return attachStdio(dio)
   180  	}
   181  	t, err := ctr.Task(ctx, attachIO)
   182  	if err != nil && !containerderrors.IsNotFound(err) {
   183  		return false, -1, errors.Wrap(wrapError(err), "error getting containerd task for container")
   184  	}
   185  
   186  	if t != nil {
   187  		s, err := t.Status(ctx)
   188  		if err != nil {
   189  			return false, -1, errors.Wrap(wrapError(err), "error getting task status")
   190  		}
   191  
   192  		alive = s.Status != containerd.Stopped
   193  		pid = int(t.Pid())
   194  	}
   195  
   196  	cntr.bundleDir = filepath.Join(c.stateDir, id)
   197  	cntr.ctr = ctr
   198  	cntr.task = t
   199  	// TODO(mlaventure): load execs
   200  
   201  	c.logger.WithFields(logrus.Fields{
   202  		"container": id,
   203  		"alive":     alive,
   204  		"pid":       pid,
   205  	}).Debug("restored container")
   206  
   207  	return alive, pid, nil
   208  }
   209  
   210  func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, runtimeOptions interface{}) error {
   211  	if ctr := c.getContainer(id); ctr != nil {
   212  		return errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   213  	}
   214  
   215  	bdir, err := prepareBundleDir(filepath.Join(c.stateDir, id), ociSpec)
   216  	if err != nil {
   217  		return errdefs.System(errors.Wrap(err, "prepare bundle dir failed"))
   218  	}
   219  
   220  	c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
   221  
   222  	cdCtr, err := c.client.NewContainer(ctx, id,
   223  		containerd.WithSpec(ociSpec),
   224  		containerd.WithRuntime(runtimeName, runtimeOptions))
   225  	if err != nil {
   226  		return wrapError(err)
   227  	}
   228  
   229  	c.Lock()
   230  	c.containers[id] = &container{
   231  		bundleDir: bdir,
   232  		ctr:       cdCtr,
   233  	}
   234  	c.Unlock()
   235  
   236  	return nil
   237  }
   238  
   239  // Start create and start a task for the specified containerd id
   240  func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   241  	ctr := c.getContainer(id)
   242  	if ctr == nil {
   243  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   244  	}
   245  	if t := ctr.getTask(); t != nil {
   246  		return -1, errors.WithStack(errdefs.Conflict(errors.New("container already started")))
   247  	}
   248  
   249  	var (
   250  		cp             *types.Descriptor
   251  		t              containerd.Task
   252  		rio            cio.IO
   253  		err            error
   254  		stdinCloseSync = make(chan struct{})
   255  	)
   256  
   257  	if checkpointDir != "" {
   258  		// write checkpoint to the content store
   259  		tar := archive.Diff(ctx, "", checkpointDir)
   260  		cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
   261  		// remove the checkpoint when we're done
   262  		defer func() {
   263  			if cp != nil {
   264  				err := c.client.ContentStore().Delete(context.Background(), cp.Digest)
   265  				if err != nil {
   266  					c.logger.WithError(err).WithFields(logrus.Fields{
   267  						"ref":    checkpointDir,
   268  						"digest": cp.Digest,
   269  					}).Warnf("failed to delete temporary checkpoint entry")
   270  				}
   271  			}
   272  		}()
   273  		if err := tar.Close(); err != nil {
   274  			return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
   275  		}
   276  		if err != nil {
   277  			return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
   278  		}
   279  	}
   280  
   281  	spec, err := ctr.ctr.Spec(ctx)
   282  	if err != nil {
   283  		return -1, errors.Wrap(err, "failed to retrieve spec")
   284  	}
   285  	uid, gid := getSpecUser(spec)
   286  	t, err = ctr.ctr.NewTask(ctx,
   287  		func(id string) (cio.IO, error) {
   288  			fifos := newFIFOSet(ctr.bundleDir, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal)
   289  
   290  			rio, err = c.createIO(fifos, id, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio)
   291  			return rio, err
   292  		},
   293  		func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
   294  			info.Checkpoint = cp
   295  			if runtime.GOOS != "windows" {
   296  				info.Options = &runctypes.CreateOptions{
   297  					IoUid:       uint32(uid),
   298  					IoGid:       uint32(gid),
   299  					NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
   300  				}
   301  			} else {
   302  				// Make sure we set the runhcs options to debug if we are at debug level.
   303  				if c.logger.Level == logrus.DebugLevel {
   304  					info.Options = &options.Options{Debug: true}
   305  				}
   306  			}
   307  			return nil
   308  		})
   309  	if err != nil {
   310  		close(stdinCloseSync)
   311  		if rio != nil {
   312  			rio.Cancel()
   313  			rio.Close()
   314  		}
   315  		return -1, wrapError(err)
   316  	}
   317  
   318  	ctr.setTask(t)
   319  
   320  	// Signal c.createIO that it can call CloseIO
   321  	close(stdinCloseSync)
   322  
   323  	if err := t.Start(ctx); err != nil {
   324  		if _, err := t.Delete(ctx); err != nil {
   325  			c.logger.WithError(err).WithField("container", id).
   326  				Error("failed to delete task after fail start")
   327  		}
   328  		ctr.setTask(nil)
   329  		return -1, wrapError(err)
   330  	}
   331  
   332  	return int(t.Pid()), nil
   333  }
   334  
   335  // Exec creates exec process.
   336  //
   337  // The containerd client calls Exec to register the exec config in the shim side.
   338  // When the client calls Start, the shim will create stdin fifo if needs. But
   339  // for the container main process, the stdin fifo will be created in Create not
   340  // the Start call. stdinCloseSync channel should be closed after Start exec
   341  // process.
   342  func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) {
   343  	ctr := c.getContainer(containerID)
   344  	if ctr == nil {
   345  		return -1, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   346  	}
   347  	t := ctr.getTask()
   348  	if t == nil {
   349  		return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running")))
   350  	}
   351  
   352  	if p := ctr.getProcess(processID); p != nil {
   353  		return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   354  	}
   355  
   356  	var (
   357  		p              containerd.Process
   358  		rio            cio.IO
   359  		err            error
   360  		stdinCloseSync = make(chan struct{})
   361  	)
   362  
   363  	fifos := newFIFOSet(ctr.bundleDir, processID, withStdin, spec.Terminal)
   364  
   365  	defer func() {
   366  		if err != nil {
   367  			if rio != nil {
   368  				rio.Cancel()
   369  				rio.Close()
   370  			}
   371  		}
   372  	}()
   373  
   374  	p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
   375  		rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio)
   376  		return rio, err
   377  	})
   378  	if err != nil {
   379  		close(stdinCloseSync)
   380  		return -1, wrapError(err)
   381  	}
   382  
   383  	ctr.addProcess(processID, p)
   384  
   385  	// Signal c.createIO that it can call CloseIO
   386  	//
   387  	// the stdin of exec process will be created after p.Start in containerd
   388  	defer close(stdinCloseSync)
   389  
   390  	if err = p.Start(ctx); err != nil {
   391  		// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
   392  		// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
   393  		// older containerd-shim
   394  		ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
   395  		defer cancel()
   396  		p.Delete(ctx)
   397  		ctr.deleteProcess(processID)
   398  		return -1, wrapError(err)
   399  	}
   400  
   401  	return int(p.Pid()), nil
   402  }
   403  
   404  func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
   405  	p, err := c.getProcess(containerID, processID)
   406  	if err != nil {
   407  		return err
   408  	}
   409  	return wrapError(p.Kill(ctx, syscall.Signal(signal)))
   410  }
   411  
   412  func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
   413  	p, err := c.getProcess(containerID, processID)
   414  	if err != nil {
   415  		return err
   416  	}
   417  
   418  	return p.Resize(ctx, uint32(width), uint32(height))
   419  }
   420  
   421  func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
   422  	p, err := c.getProcess(containerID, processID)
   423  	if err != nil {
   424  		return err
   425  	}
   426  
   427  	return p.CloseIO(ctx, containerd.WithStdinCloser)
   428  }
   429  
   430  func (c *client) Pause(ctx context.Context, containerID string) error {
   431  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   432  	if err != nil {
   433  		return err
   434  	}
   435  
   436  	return wrapError(p.(containerd.Task).Pause(ctx))
   437  }
   438  
   439  func (c *client) Resume(ctx context.Context, containerID string) error {
   440  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   441  	if err != nil {
   442  		return err
   443  	}
   444  
   445  	return p.(containerd.Task).Resume(ctx)
   446  }
   447  
   448  func (c *client) Stats(ctx context.Context, containerID string) (*libcontainerdtypes.Stats, error) {
   449  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   450  	if err != nil {
   451  		return nil, err
   452  	}
   453  
   454  	m, err := p.(containerd.Task).Metrics(ctx)
   455  	if err != nil {
   456  		return nil, err
   457  	}
   458  
   459  	v, err := typeurl.UnmarshalAny(m.Data)
   460  	if err != nil {
   461  		return nil, err
   462  	}
   463  	return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
   464  }
   465  
   466  func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
   467  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   468  	if err != nil {
   469  		return nil, err
   470  	}
   471  
   472  	pis, err := p.(containerd.Task).Pids(ctx)
   473  	if err != nil {
   474  		return nil, err
   475  	}
   476  
   477  	var pids []uint32
   478  	for _, i := range pis {
   479  		pids = append(pids, i.Pid)
   480  	}
   481  
   482  	return pids, nil
   483  }
   484  
   485  func (c *client) Summary(ctx context.Context, containerID string) ([]libcontainerdtypes.Summary, error) {
   486  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   487  	if err != nil {
   488  		return nil, err
   489  	}
   490  
   491  	pis, err := p.(containerd.Task).Pids(ctx)
   492  	if err != nil {
   493  		return nil, err
   494  	}
   495  
   496  	var infos []libcontainerdtypes.Summary
   497  	for _, pi := range pis {
   498  		i, err := typeurl.UnmarshalAny(pi.Info)
   499  		if err != nil {
   500  			return nil, errors.Wrap(err, "unable to decode process details")
   501  		}
   502  		s, err := summaryFromInterface(i)
   503  		if err != nil {
   504  			return nil, err
   505  		}
   506  		infos = append(infos, *s)
   507  	}
   508  
   509  	return infos, nil
   510  }
   511  
   512  func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
   513  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   514  	if err != nil {
   515  		return 255, time.Now(), nil
   516  	}
   517  
   518  	status, err := p.(containerd.Task).Delete(ctx)
   519  	if err != nil {
   520  		return 255, time.Now(), nil
   521  	}
   522  
   523  	if ctr := c.getContainer(containerID); ctr != nil {
   524  		ctr.setTask(nil)
   525  	}
   526  	return status.ExitCode(), status.ExitTime(), nil
   527  }
   528  
   529  func (c *client) Delete(ctx context.Context, containerID string) error {
   530  	ctr := c.getContainer(containerID)
   531  	if ctr == nil {
   532  		return errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   533  	}
   534  
   535  	if err := ctr.ctr.Delete(ctx); err != nil {
   536  		return wrapError(err)
   537  	}
   538  
   539  	if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
   540  		if err := os.RemoveAll(ctr.bundleDir); err != nil {
   541  			c.logger.WithError(err).WithFields(logrus.Fields{
   542  				"container": containerID,
   543  				"bundle":    ctr.bundleDir,
   544  			}).Error("failed to remove state dir")
   545  		}
   546  	}
   547  
   548  	c.removeContainer(containerID)
   549  
   550  	return nil
   551  }
   552  
   553  func (c *client) Status(ctx context.Context, containerID string) (libcontainerdtypes.Status, error) {
   554  	ctr := c.getContainer(containerID)
   555  	if ctr == nil {
   556  		return libcontainerdtypes.StatusUnknown, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   557  	}
   558  
   559  	t := ctr.getTask()
   560  	if t == nil {
   561  		return libcontainerdtypes.StatusUnknown, errors.WithStack(errdefs.NotFound(errors.New("no such task")))
   562  	}
   563  
   564  	s, err := t.Status(ctx)
   565  	if err != nil {
   566  		return libcontainerdtypes.StatusUnknown, wrapError(err)
   567  	}
   568  
   569  	return libcontainerdtypes.Status(s.Status), nil
   570  }
   571  
   572  func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
   573  	p, err := c.getProcess(containerID, libcontainerdtypes.InitProcessName)
   574  	if err != nil {
   575  		return err
   576  	}
   577  
   578  	opts := []containerd.CheckpointTaskOpts{}
   579  	if exit {
   580  		opts = append(opts, func(r *containerd.CheckpointTaskInfo) error {
   581  			if r.Options == nil {
   582  				r.Options = &runctypes.CheckpointOptions{
   583  					Exit: true,
   584  				}
   585  			} else {
   586  				opts, _ := r.Options.(*runctypes.CheckpointOptions)
   587  				opts.Exit = true
   588  			}
   589  			return nil
   590  		})
   591  	}
   592  	img, err := p.(containerd.Task).Checkpoint(ctx, opts...)
   593  	if err != nil {
   594  		return wrapError(err)
   595  	}
   596  	// Whatever happens, delete the checkpoint from containerd
   597  	defer func() {
   598  		err := c.client.ImageService().Delete(context.Background(), img.Name())
   599  		if err != nil {
   600  			c.logger.WithError(err).WithField("digest", img.Target().Digest).
   601  				Warnf("failed to delete checkpoint image")
   602  		}
   603  	}()
   604  
   605  	b, err := content.ReadBlob(ctx, c.client.ContentStore(), img.Target())
   606  	if err != nil {
   607  		return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
   608  	}
   609  	var index v1.Index
   610  	if err := json.Unmarshal(b, &index); err != nil {
   611  		return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
   612  	}
   613  
   614  	var cpDesc *v1.Descriptor
   615  	for _, m := range index.Manifests {
   616  		if m.MediaType == images.MediaTypeContainerd1Checkpoint {
   617  			cpDesc = &m
   618  			break
   619  		}
   620  	}
   621  	if cpDesc == nil {
   622  		return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
   623  	}
   624  
   625  	rat, err := c.client.ContentStore().ReaderAt(ctx, *cpDesc)
   626  	if err != nil {
   627  		return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
   628  	}
   629  	defer rat.Close()
   630  	_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
   631  	if err != nil {
   632  		return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
   633  	}
   634  
   635  	return err
   636  }
   637  
   638  func (c *client) getContainer(id string) *container {
   639  	c.RLock()
   640  	ctr := c.containers[id]
   641  	c.RUnlock()
   642  
   643  	return ctr
   644  }
   645  
   646  func (c *client) removeContainer(id string) {
   647  	c.Lock()
   648  	delete(c.containers, id)
   649  	c.Unlock()
   650  }
   651  
   652  func (c *client) getProcess(containerID, processID string) (containerd.Process, error) {
   653  	ctr := c.getContainer(containerID)
   654  	if ctr == nil {
   655  		return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   656  	}
   657  
   658  	t := ctr.getTask()
   659  	if t == nil {
   660  		return nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running")))
   661  	}
   662  	if processID == libcontainerdtypes.InitProcessName {
   663  		return t, nil
   664  	}
   665  
   666  	p := ctr.getProcess(processID)
   667  	if p == nil {
   668  		return nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec")))
   669  	}
   670  	return p, nil
   671  }
   672  
   673  // createIO creates the io to be used by a process
   674  // This needs to get a pointer to interface as upon closure the process may not have yet been registered
   675  func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
   676  	var (
   677  		io  *cio.DirectIO
   678  		err error
   679  	)
   680  	io, err = c.newDirectIO(context.Background(), fifos)
   681  	if err != nil {
   682  		return nil, err
   683  	}
   684  
   685  	if io.Stdin != nil {
   686  		var (
   687  			err       error
   688  			stdinOnce sync.Once
   689  		)
   690  		pipe := io.Stdin
   691  		io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
   692  			stdinOnce.Do(func() {
   693  				err = pipe.Close()
   694  				// Do the rest in a new routine to avoid a deadlock if the
   695  				// Exec/Start call failed.
   696  				go func() {
   697  					<-stdinCloseSync
   698  					p, err := c.getProcess(containerID, processID)
   699  					if err == nil {
   700  						err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
   701  						if err != nil && strings.Contains(err.Error(), "transport is closing") {
   702  							err = nil
   703  						}
   704  					}
   705  				}()
   706  			})
   707  			return err
   708  		})
   709  	}
   710  
   711  	rio, err := attachStdio(io)
   712  	if err != nil {
   713  		io.Cancel()
   714  		io.Close()
   715  	}
   716  	return rio, err
   717  }
   718  
   719  func (c *client) processEvent(ctr *container, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
   720  	c.eventQ.Append(ei.ContainerID, func() {
   721  		err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
   722  		if err != nil {
   723  			c.logger.WithError(err).WithFields(logrus.Fields{
   724  				"container":  ei.ContainerID,
   725  				"event":      et,
   726  				"event-info": ei,
   727  			}).Error("failed to process event")
   728  		}
   729  
   730  		if et == libcontainerdtypes.EventExit && ei.ProcessID != ei.ContainerID {
   731  			p := ctr.getProcess(ei.ProcessID)
   732  			if p == nil {
   733  				c.logger.WithError(errors.New("no such process")).
   734  					WithFields(logrus.Fields{
   735  						"container": ei.ContainerID,
   736  						"process":   ei.ProcessID,
   737  					}).Error("exit event")
   738  				return
   739  			}
   740  			_, err = p.Delete(context.Background())
   741  			if err != nil {
   742  				c.logger.WithError(err).WithFields(logrus.Fields{
   743  					"container": ei.ContainerID,
   744  					"process":   ei.ProcessID,
   745  				}).Warn("failed to delete process")
   746  			}
   747  			ctr.deleteProcess(ei.ProcessID)
   748  
   749  			ctr := c.getContainer(ei.ContainerID)
   750  			if ctr == nil {
   751  				c.logger.WithFields(logrus.Fields{
   752  					"container": ei.ContainerID,
   753  				}).Error("failed to find container")
   754  			} else {
   755  				newFIFOSet(ctr.bundleDir, ei.ProcessID, true, false).Close()
   756  			}
   757  		}
   758  	})
   759  }
   760  
   761  func (c *client) processEventStream(ctx context.Context, ns string) {
   762  	var (
   763  		err error
   764  		ev  *events.Envelope
   765  		et  libcontainerdtypes.EventType
   766  		ei  libcontainerdtypes.EventInfo
   767  		ctr *container
   768  	)
   769  
   770  	// Filter on both namespace *and* topic. To create an "and" filter,
   771  	// this must be a single, comma-separated string
   772  	eventStream, errC := c.client.EventService().Subscribe(ctx, "namespace=="+ns+",topic~=|^/tasks/|")
   773  
   774  	c.logger.Debug("processing event stream")
   775  
   776  	var oomKilled bool
   777  	for {
   778  		select {
   779  		case err = <-errC:
   780  			if err != nil {
   781  				errStatus, ok := status.FromError(err)
   782  				if !ok || errStatus.Code() != codes.Canceled {
   783  					c.logger.WithError(err).Error("failed to get event")
   784  					go c.processEventStream(ctx, ns)
   785  				} else {
   786  					c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
   787  				}
   788  			}
   789  			return
   790  		case ev = <-eventStream:
   791  			if ev.Event == nil {
   792  				c.logger.WithField("event", ev).Warn("invalid event")
   793  				continue
   794  			}
   795  
   796  			v, err := typeurl.UnmarshalAny(ev.Event)
   797  			if err != nil {
   798  				c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
   799  				continue
   800  			}
   801  
   802  			c.logger.WithField("topic", ev.Topic).Debug("event")
   803  
   804  			switch t := v.(type) {
   805  			case *apievents.TaskCreate:
   806  				et = libcontainerdtypes.EventCreate
   807  				ei = libcontainerdtypes.EventInfo{
   808  					ContainerID: t.ContainerID,
   809  					ProcessID:   t.ContainerID,
   810  					Pid:         t.Pid,
   811  				}
   812  			case *apievents.TaskStart:
   813  				et = libcontainerdtypes.EventStart
   814  				ei = libcontainerdtypes.EventInfo{
   815  					ContainerID: t.ContainerID,
   816  					ProcessID:   t.ContainerID,
   817  					Pid:         t.Pid,
   818  				}
   819  			case *apievents.TaskExit:
   820  				et = libcontainerdtypes.EventExit
   821  				ei = libcontainerdtypes.EventInfo{
   822  					ContainerID: t.ContainerID,
   823  					ProcessID:   t.ID,
   824  					Pid:         t.Pid,
   825  					ExitCode:    t.ExitStatus,
   826  					ExitedAt:    t.ExitedAt,
   827  				}
   828  			case *apievents.TaskOOM:
   829  				et = libcontainerdtypes.EventOOM
   830  				ei = libcontainerdtypes.EventInfo{
   831  					ContainerID: t.ContainerID,
   832  					OOMKilled:   true,
   833  				}
   834  				oomKilled = true
   835  			case *apievents.TaskExecAdded:
   836  				et = libcontainerdtypes.EventExecAdded
   837  				ei = libcontainerdtypes.EventInfo{
   838  					ContainerID: t.ContainerID,
   839  					ProcessID:   t.ExecID,
   840  				}
   841  			case *apievents.TaskExecStarted:
   842  				et = libcontainerdtypes.EventExecStarted
   843  				ei = libcontainerdtypes.EventInfo{
   844  					ContainerID: t.ContainerID,
   845  					ProcessID:   t.ExecID,
   846  					Pid:         t.Pid,
   847  				}
   848  			case *apievents.TaskPaused:
   849  				et = libcontainerdtypes.EventPaused
   850  				ei = libcontainerdtypes.EventInfo{
   851  					ContainerID: t.ContainerID,
   852  				}
   853  			case *apievents.TaskResumed:
   854  				et = libcontainerdtypes.EventResumed
   855  				ei = libcontainerdtypes.EventInfo{
   856  					ContainerID: t.ContainerID,
   857  				}
   858  			default:
   859  				c.logger.WithFields(logrus.Fields{
   860  					"topic": ev.Topic,
   861  					"type":  reflect.TypeOf(t)},
   862  				).Info("ignoring event")
   863  				continue
   864  			}
   865  
   866  			ctr = c.getContainer(ei.ContainerID)
   867  			if ctr == nil {
   868  				c.logger.WithField("container", ei.ContainerID).Warn("unknown container")
   869  				continue
   870  			}
   871  
   872  			if oomKilled {
   873  				ctr.setOOMKilled(true)
   874  				oomKilled = false
   875  			}
   876  			ei.OOMKilled = ctr.getOOMKilled()
   877  
   878  			c.processEvent(ctr, et, ei)
   879  		}
   880  	}
   881  }
   882  
   883  func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
   884  	writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
   885  	if err != nil {
   886  		return nil, err
   887  	}
   888  	defer writer.Close()
   889  	size, err := io.Copy(writer, r)
   890  	if err != nil {
   891  		return nil, err
   892  	}
   893  	labels := map[string]string{
   894  		"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
   895  	}
   896  	if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
   897  		return nil, err
   898  	}
   899  	return &types.Descriptor{
   900  		MediaType: mediaType,
   901  		Digest:    writer.Digest(),
   902  		Size_:     size,
   903  	}, nil
   904  }
   905  
   906  func wrapError(err error) error {
   907  	switch {
   908  	case err == nil:
   909  		return nil
   910  	case containerderrors.IsNotFound(err):
   911  		return errdefs.NotFound(err)
   912  	}
   913  
   914  	msg := err.Error()
   915  	for _, s := range []string{"container does not exist", "not found", "no such container"} {
   916  		if strings.Contains(msg, s) {
   917  			return errdefs.NotFound(err)
   918  		}
   919  	}
   920  	return err
   921  }