github.com/rawahars/moby@v24.0.4+incompatible/libcontainerd/remote/client.go (about)

     1  package remote // import "github.com/docker/docker/libcontainerd/remote"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"io"
     7  	"os"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"strings"
    12  	"sync"
    13  	"syscall"
    14  	"time"
    15  
    16  	"github.com/containerd/containerd"
    17  	apievents "github.com/containerd/containerd/api/events"
    18  	"github.com/containerd/containerd/api/types"
    19  	"github.com/containerd/containerd/archive"
    20  	"github.com/containerd/containerd/cio"
    21  	"github.com/containerd/containerd/content"
    22  	cerrdefs "github.com/containerd/containerd/errdefs"
    23  	"github.com/containerd/containerd/events"
    24  	"github.com/containerd/containerd/images"
    25  	v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
    26  	"github.com/containerd/typeurl/v2"
    27  	"github.com/docker/docker/errdefs"
    28  	"github.com/docker/docker/libcontainerd/queue"
    29  	libcontainerdtypes "github.com/docker/docker/libcontainerd/types"
    30  	"github.com/docker/docker/pkg/ioutils"
    31  	"github.com/hashicorp/go-multierror"
    32  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    33  	specs "github.com/opencontainers/runtime-spec/specs-go"
    34  	"github.com/pkg/errors"
    35  	"github.com/sirupsen/logrus"
    36  	"google.golang.org/grpc/codes"
    37  	"google.golang.org/grpc/status"
    38  )
    39  
    40  // DockerContainerBundlePath is the label key pointing to the container's bundle path
    41  const DockerContainerBundlePath = "com.docker/engine.bundle.path"
    42  
    43  type client struct {
    44  	client   *containerd.Client
    45  	stateDir string
    46  	logger   *logrus.Entry
    47  	ns       string
    48  
    49  	backend libcontainerdtypes.Backend
    50  	eventQ  queue.Queue
    51  }
    52  
    53  type container struct {
    54  	client *client
    55  	c8dCtr containerd.Container
    56  
    57  	v2runcoptions *v2runcoptions.Options
    58  }
    59  
    60  type task struct {
    61  	containerd.Task
    62  	ctr *container
    63  
    64  	// Workaround for https://github.com/containerd/containerd/issues/8557.
    65  	// See also https://github.com/moby/moby/issues/45595.
    66  	serializeExecStartsWorkaround sync.Mutex
    67  }
    68  
    69  type process struct {
    70  	containerd.Process
    71  }
    72  
    73  // NewClient creates a new libcontainerd client from a containerd client
    74  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    75  	c := &client{
    76  		client:   cli,
    77  		stateDir: stateDir,
    78  		logger:   logrus.WithField("module", "libcontainerd").WithField("namespace", ns),
    79  		ns:       ns,
    80  		backend:  b,
    81  	}
    82  
    83  	go c.processEventStream(ctx, ns)
    84  
    85  	return c, nil
    86  }
    87  
    88  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    89  	return c.client.Version(ctx)
    90  }
    91  
    92  func (c *container) newTask(t containerd.Task) *task {
    93  	return &task{Task: t, ctr: c}
    94  }
    95  
    96  func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
    97  	var dio *cio.DirectIO
    98  	defer func() {
    99  		if err != nil && dio != nil {
   100  			dio.Cancel()
   101  			dio.Close()
   102  		}
   103  	}()
   104  
   105  	attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
   106  		// dio must be assigned to the previously defined dio for the defer above
   107  		// to handle cleanup
   108  		dio, err = c.client.newDirectIO(ctx, fifos)
   109  		if err != nil {
   110  			return nil, err
   111  		}
   112  		return attachStdio(dio)
   113  	}
   114  	t, err := c.c8dCtr.Task(ctx, attachIO)
   115  	if err != nil {
   116  		return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
   117  	}
   118  	return c.newTask(t), nil
   119  }
   120  
   121  func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
   122  	bdir := c.bundleDir(id)
   123  	c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
   124  
   125  	newOpts := []containerd.NewContainerOpts{
   126  		containerd.WithSpec(ociSpec),
   127  		containerd.WithRuntime(shim, runtimeOptions),
   128  		WithBundle(bdir, ociSpec),
   129  	}
   130  	opts = append(opts, newOpts...)
   131  
   132  	ctr, err := c.client.NewContainer(ctx, id, opts...)
   133  	if err != nil {
   134  		if cerrdefs.IsAlreadyExists(err) {
   135  			return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   136  		}
   137  		return nil, wrapError(err)
   138  	}
   139  
   140  	created := container{
   141  		client: c,
   142  		c8dCtr: ctr,
   143  	}
   144  	if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
   145  		created.v2runcoptions = x
   146  	}
   147  	return &created, nil
   148  }
   149  
   150  // Start create and start a task for the specified containerd id
   151  func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
   152  	var (
   153  		checkpoint     *types.Descriptor
   154  		t              containerd.Task
   155  		rio            cio.IO
   156  		stdinCloseSync = make(chan containerd.Process, 1)
   157  	)
   158  
   159  	if checkpointDir != "" {
   160  		// write checkpoint to the content store
   161  		tar := archive.Diff(ctx, "", checkpointDir)
   162  		var err error
   163  		checkpoint, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
   164  		// remove the checkpoint when we're done
   165  		defer func() {
   166  			if checkpoint != nil {
   167  				err := c.client.client.ContentStore().Delete(ctx, checkpoint.Digest)
   168  				if err != nil {
   169  					c.client.logger.WithError(err).WithFields(logrus.Fields{
   170  						"ref":    checkpointDir,
   171  						"digest": checkpoint.Digest,
   172  					}).Warnf("failed to delete temporary checkpoint entry")
   173  				}
   174  			}
   175  		}()
   176  		if err := tar.Close(); err != nil {
   177  			return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
   178  		}
   179  		if err != nil {
   180  			return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
   181  		}
   182  	}
   183  
   184  	// Optimization: assume the relevant metadata has not changed in the
   185  	// moment since the container was created. Elide redundant RPC requests
   186  	// to refresh the metadata separately for spec and labels.
   187  	md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
   188  	if err != nil {
   189  		return nil, errors.Wrap(err, "failed to retrieve metadata")
   190  	}
   191  	bundle := md.Labels[DockerContainerBundlePath]
   192  
   193  	var spec specs.Spec
   194  	if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
   195  		return nil, errors.Wrap(err, "failed to retrieve spec")
   196  	}
   197  	uid, gid := getSpecUser(&spec)
   198  
   199  	taskOpts := []containerd.NewTaskOpts{
   200  		func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
   201  			info.Checkpoint = checkpoint
   202  			return nil
   203  		},
   204  	}
   205  
   206  	if runtime.GOOS != "windows" {
   207  		taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
   208  			if c.v2runcoptions != nil {
   209  				opts := *c.v2runcoptions
   210  				opts.IoUid = uint32(uid)
   211  				opts.IoGid = uint32(gid)
   212  				info.Options = &opts
   213  			}
   214  			return nil
   215  		})
   216  	} else {
   217  		taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
   218  	}
   219  
   220  	t, err = c.c8dCtr.NewTask(ctx,
   221  		func(id string) (cio.IO, error) {
   222  			fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal)
   223  
   224  			rio, err = c.createIO(fifos, stdinCloseSync, attachStdio)
   225  			return rio, err
   226  		},
   227  		taskOpts...,
   228  	)
   229  	if err != nil {
   230  		close(stdinCloseSync)
   231  		if rio != nil {
   232  			rio.Cancel()
   233  			rio.Close()
   234  		}
   235  		return nil, errors.Wrap(wrapError(err), "failed to create task for container")
   236  	}
   237  
   238  	// Signal c.createIO that it can call CloseIO
   239  	stdinCloseSync <- t
   240  
   241  	if err := t.Start(ctx); err != nil {
   242  		// Only Stopped tasks can be deleted. Created tasks have to be
   243  		// killed first, to transition them to Stopped.
   244  		if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil {
   245  			c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()).
   246  				Error("failed to delete task after fail start")
   247  		}
   248  		return nil, wrapError(err)
   249  	}
   250  
   251  	return c.newTask(t), nil
   252  }
   253  
   254  // Exec creates exec process.
   255  //
   256  // The containerd client calls Exec to register the exec config in the shim side.
   257  // When the client calls Start, the shim will create stdin fifo if needs. But
   258  // for the container main process, the stdin fifo will be created in Create not
   259  // the Start call. stdinCloseSync channel should be closed after Start exec
   260  // process.
   261  func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
   262  	var (
   263  		p              containerd.Process
   264  		rio            cio.IO
   265  		stdinCloseSync = make(chan containerd.Process, 1)
   266  	)
   267  
   268  	// Optimization: assume the DockerContainerBundlePath label has not been
   269  	// updated since the container metadata was last loaded/refreshed.
   270  	md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
   271  	if err != nil {
   272  		return nil, wrapError(err)
   273  	}
   274  
   275  	fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
   276  
   277  	defer func() {
   278  		if err != nil {
   279  			if rio != nil {
   280  				rio.Cancel()
   281  				rio.Close()
   282  			}
   283  		}
   284  	}()
   285  
   286  	p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
   287  		rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio)
   288  		return rio, err
   289  	})
   290  	if err != nil {
   291  		close(stdinCloseSync)
   292  		if cerrdefs.IsAlreadyExists(err) {
   293  			return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   294  		}
   295  		return nil, wrapError(err)
   296  	}
   297  
   298  	// Signal c.createIO that it can call CloseIO
   299  	//
   300  	// the stdin of exec process will be created after p.Start in containerd
   301  	defer func() { stdinCloseSync <- p }()
   302  
   303  	err = func() error {
   304  		t.serializeExecStartsWorkaround.Lock()
   305  		defer t.serializeExecStartsWorkaround.Unlock()
   306  		return p.Start(ctx)
   307  	}()
   308  	if err != nil {
   309  		// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
   310  		// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
   311  		// older containerd-shim
   312  		ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
   313  		defer cancel()
   314  		p.Delete(ctx)
   315  		return nil, wrapError(err)
   316  	}
   317  	return process{p}, nil
   318  }
   319  
   320  func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
   321  	return wrapError(t.Task.Kill(ctx, signal))
   322  }
   323  
   324  func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
   325  	return wrapError(p.Process.Kill(ctx, signal))
   326  }
   327  
   328  func (t *task) Pause(ctx context.Context) error {
   329  	return wrapError(t.Task.Pause(ctx))
   330  }
   331  
   332  func (t *task) Resume(ctx context.Context) error {
   333  	return wrapError(t.Task.Resume(ctx))
   334  }
   335  
   336  func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
   337  	m, err := t.Metrics(ctx)
   338  	if err != nil {
   339  		return nil, err
   340  	}
   341  
   342  	v, err := typeurl.UnmarshalAny(m.Data)
   343  	if err != nil {
   344  		return nil, err
   345  	}
   346  	return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil
   347  }
   348  
   349  func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
   350  	pis, err := t.Pids(ctx)
   351  	if err != nil {
   352  		return nil, err
   353  	}
   354  
   355  	var infos []libcontainerdtypes.Summary
   356  	for _, pi := range pis {
   357  		i, err := typeurl.UnmarshalAny(pi.Info)
   358  		if err != nil {
   359  			return nil, errors.Wrap(err, "unable to decode process details")
   360  		}
   361  		s, err := summaryFromInterface(i)
   362  		if err != nil {
   363  			return nil, err
   364  		}
   365  		infos = append(infos, *s)
   366  	}
   367  
   368  	return infos, nil
   369  }
   370  
   371  func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   372  	s, err := t.Task.Delete(ctx)
   373  	return s, wrapError(err)
   374  }
   375  
   376  func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   377  	s, err := p.Process.Delete(ctx)
   378  	return s, wrapError(err)
   379  }
   380  
   381  func (c *container) Delete(ctx context.Context) error {
   382  	// Optimization: assume the DockerContainerBundlePath label has not been
   383  	// updated since the container metadata was last loaded/refreshed.
   384  	md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
   385  	if err != nil {
   386  		return err
   387  	}
   388  	bundle := md.Labels[DockerContainerBundlePath]
   389  	if err := c.c8dCtr.Delete(ctx); err != nil {
   390  		return wrapError(err)
   391  	}
   392  	if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
   393  		if err := os.RemoveAll(bundle); err != nil {
   394  			c.client.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
   395  				"container": c.c8dCtr.ID(),
   396  				"bundle":    bundle,
   397  			}).Error("failed to remove state dir")
   398  		}
   399  	}
   400  	return nil
   401  }
   402  
   403  func (t *task) ForceDelete(ctx context.Context) error {
   404  	_, err := t.Task.Delete(ctx, containerd.WithProcessKill)
   405  	return wrapError(err)
   406  }
   407  
   408  func (t *task) Status(ctx context.Context) (containerd.Status, error) {
   409  	s, err := t.Task.Status(ctx)
   410  	return s, wrapError(err)
   411  }
   412  
   413  func (p process) Status(ctx context.Context) (containerd.Status, error) {
   414  	s, err := p.Process.Status(ctx)
   415  	return s, wrapError(err)
   416  }
   417  
   418  func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
   419  	return func(r *containerd.CheckpointTaskInfo) error {
   420  		if r.Options == nil && c.v2runcoptions != nil {
   421  			r.Options = &v2runcoptions.CheckpointOptions{}
   422  		}
   423  
   424  		switch opts := r.Options.(type) {
   425  		case *v2runcoptions.CheckpointOptions:
   426  			opts.Exit = exit
   427  		}
   428  
   429  		return nil
   430  	}
   431  }
   432  
   433  func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
   434  	img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
   435  	if err != nil {
   436  		return wrapError(err)
   437  	}
   438  	// Whatever happens, delete the checkpoint from containerd
   439  	defer func() {
   440  		err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
   441  		if err != nil {
   442  			t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
   443  				Warnf("failed to delete checkpoint image")
   444  		}
   445  	}()
   446  
   447  	b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
   448  	if err != nil {
   449  		return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
   450  	}
   451  	var index ocispec.Index
   452  	if err := json.Unmarshal(b, &index); err != nil {
   453  		return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
   454  	}
   455  
   456  	var cpDesc *ocispec.Descriptor
   457  	for _, m := range index.Manifests {
   458  		m := m
   459  		if m.MediaType == images.MediaTypeContainerd1Checkpoint {
   460  			cpDesc = &m //nolint:gosec
   461  			break
   462  		}
   463  	}
   464  	if cpDesc == nil {
   465  		return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
   466  	}
   467  
   468  	rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
   469  	if err != nil {
   470  		return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
   471  	}
   472  	defer rat.Close()
   473  	_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
   474  	if err != nil {
   475  		return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
   476  	}
   477  
   478  	return err
   479  }
   480  
   481  // LoadContainer loads the containerd container.
   482  func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
   483  	ctr, err := c.client.LoadContainer(ctx, id)
   484  	if err != nil {
   485  		if cerrdefs.IsNotFound(err) {
   486  			return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   487  		}
   488  		return nil, wrapError(err)
   489  	}
   490  	return &container{client: c, c8dCtr: ctr}, nil
   491  }
   492  
   493  func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
   494  	t, err := c.c8dCtr.Task(ctx, nil)
   495  	if err != nil {
   496  		return nil, wrapError(err)
   497  	}
   498  	return c.newTask(t), nil
   499  }
   500  
   501  // createIO creates the io to be used by a process
   502  // This needs to get a pointer to interface as upon closure the process may not have yet been registered
   503  func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
   504  	var (
   505  		io  *cio.DirectIO
   506  		err error
   507  	)
   508  	io, err = c.client.newDirectIO(context.Background(), fifos)
   509  	if err != nil {
   510  		return nil, err
   511  	}
   512  
   513  	if io.Stdin != nil {
   514  		var (
   515  			closeErr  error
   516  			stdinOnce sync.Once
   517  		)
   518  		pipe := io.Stdin
   519  		io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
   520  			stdinOnce.Do(func() {
   521  				closeErr = pipe.Close()
   522  
   523  				select {
   524  				case p, ok := <-stdinCloseSync:
   525  					if !ok {
   526  						return
   527  					}
   528  					if err := closeStdin(context.Background(), p); err != nil {
   529  						if closeErr != nil {
   530  							closeErr = multierror.Append(closeErr, err)
   531  						} else {
   532  							// Avoid wrapping a single error in a multierror.
   533  							closeErr = err
   534  						}
   535  					}
   536  				default:
   537  					// The process wasn't ready. Close its stdin asynchronously.
   538  					go func() {
   539  						p, ok := <-stdinCloseSync
   540  						if !ok {
   541  							return
   542  						}
   543  						if err := closeStdin(context.Background(), p); err != nil {
   544  							c.client.logger.WithError(err).
   545  								WithField("container", c.c8dCtr.ID()).
   546  								Error("failed to close container stdin")
   547  						}
   548  					}()
   549  				}
   550  			})
   551  			return closeErr
   552  		})
   553  	}
   554  
   555  	rio, err := attachStdio(io)
   556  	if err != nil {
   557  		io.Cancel()
   558  		io.Close()
   559  	}
   560  	return rio, err
   561  }
   562  
   563  func closeStdin(ctx context.Context, p containerd.Process) error {
   564  	err := p.CloseIO(ctx, containerd.WithStdinCloser)
   565  	if err != nil && strings.Contains(err.Error(), "transport is closing") {
   566  		err = nil
   567  	}
   568  	return err
   569  }
   570  
   571  func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
   572  	c.eventQ.Append(ei.ContainerID, func() {
   573  		err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
   574  		if err != nil {
   575  			c.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{
   576  				"container":  ei.ContainerID,
   577  				"event":      et,
   578  				"event-info": ei,
   579  			}).Error("failed to process event")
   580  		}
   581  	})
   582  }
   583  
   584  func (c *client) waitServe(ctx context.Context) bool {
   585  	t := 100 * time.Millisecond
   586  	delay := time.NewTimer(t)
   587  	if !delay.Stop() {
   588  		<-delay.C
   589  	}
   590  	defer delay.Stop()
   591  
   592  	// `IsServing` will actually block until the service is ready.
   593  	// However it can return early, so we'll loop with a delay to handle it.
   594  	for {
   595  		serving, err := c.client.IsServing(ctx)
   596  		if err != nil {
   597  			if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
   598  				return false
   599  			}
   600  			logrus.WithError(err).Warn("Error while testing if containerd API is ready")
   601  		}
   602  
   603  		if serving {
   604  			return true
   605  		}
   606  
   607  		delay.Reset(t)
   608  		select {
   609  		case <-ctx.Done():
   610  			return false
   611  		case <-delay.C:
   612  		}
   613  	}
   614  }
   615  
   616  func (c *client) processEventStream(ctx context.Context, ns string) {
   617  	var (
   618  		err error
   619  		ev  *events.Envelope
   620  		et  libcontainerdtypes.EventType
   621  		ei  libcontainerdtypes.EventInfo
   622  	)
   623  
   624  	// Create a new context specifically for this subscription.
   625  	// The context must be cancelled to cancel the subscription.
   626  	// In cases where we have to restart event stream processing,
   627  	//   we'll need the original context b/c this one will be cancelled
   628  	subCtx, cancel := context.WithCancel(ctx)
   629  	defer cancel()
   630  
   631  	// Filter on both namespace *and* topic. To create an "and" filter,
   632  	// this must be a single, comma-separated string
   633  	eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
   634  
   635  	c.logger.Debug("processing event stream")
   636  
   637  	for {
   638  		select {
   639  		case err = <-errC:
   640  			if err != nil {
   641  				errStatus, ok := status.FromError(err)
   642  				if !ok || errStatus.Code() != codes.Canceled {
   643  					c.logger.WithError(err).Error("Failed to get event")
   644  					c.logger.Info("Waiting for containerd to be ready to restart event processing")
   645  					if c.waitServe(ctx) {
   646  						go c.processEventStream(ctx, ns)
   647  						return
   648  					}
   649  				}
   650  				c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
   651  			}
   652  			return
   653  		case ev = <-eventStream:
   654  			if ev.Event == nil {
   655  				c.logger.WithField("event", ev).Warn("invalid event")
   656  				continue
   657  			}
   658  
   659  			v, err := typeurl.UnmarshalAny(ev.Event)
   660  			if err != nil {
   661  				c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
   662  				continue
   663  			}
   664  
   665  			c.logger.WithField("topic", ev.Topic).Debug("event")
   666  
   667  			switch t := v.(type) {
   668  			case *apievents.TaskCreate:
   669  				et = libcontainerdtypes.EventCreate
   670  				ei = libcontainerdtypes.EventInfo{
   671  					ContainerID: t.ContainerID,
   672  					ProcessID:   t.ContainerID,
   673  					Pid:         t.Pid,
   674  				}
   675  			case *apievents.TaskStart:
   676  				et = libcontainerdtypes.EventStart
   677  				ei = libcontainerdtypes.EventInfo{
   678  					ContainerID: t.ContainerID,
   679  					ProcessID:   t.ContainerID,
   680  					Pid:         t.Pid,
   681  				}
   682  			case *apievents.TaskExit:
   683  				et = libcontainerdtypes.EventExit
   684  				ei = libcontainerdtypes.EventInfo{
   685  					ContainerID: t.ContainerID,
   686  					ProcessID:   t.ID,
   687  					Pid:         t.Pid,
   688  					ExitCode:    t.ExitStatus,
   689  					ExitedAt:    t.ExitedAt,
   690  				}
   691  			case *apievents.TaskOOM:
   692  				et = libcontainerdtypes.EventOOM
   693  				ei = libcontainerdtypes.EventInfo{
   694  					ContainerID: t.ContainerID,
   695  				}
   696  			case *apievents.TaskExecAdded:
   697  				et = libcontainerdtypes.EventExecAdded
   698  				ei = libcontainerdtypes.EventInfo{
   699  					ContainerID: t.ContainerID,
   700  					ProcessID:   t.ExecID,
   701  				}
   702  			case *apievents.TaskExecStarted:
   703  				et = libcontainerdtypes.EventExecStarted
   704  				ei = libcontainerdtypes.EventInfo{
   705  					ContainerID: t.ContainerID,
   706  					ProcessID:   t.ExecID,
   707  					Pid:         t.Pid,
   708  				}
   709  			case *apievents.TaskPaused:
   710  				et = libcontainerdtypes.EventPaused
   711  				ei = libcontainerdtypes.EventInfo{
   712  					ContainerID: t.ContainerID,
   713  				}
   714  			case *apievents.TaskResumed:
   715  				et = libcontainerdtypes.EventResumed
   716  				ei = libcontainerdtypes.EventInfo{
   717  					ContainerID: t.ContainerID,
   718  				}
   719  			case *apievents.TaskDelete:
   720  				c.logger.WithFields(logrus.Fields{
   721  					"topic":     ev.Topic,
   722  					"type":      reflect.TypeOf(t),
   723  					"container": t.ContainerID},
   724  				).Info("ignoring event")
   725  				continue
   726  			default:
   727  				c.logger.WithFields(logrus.Fields{
   728  					"topic": ev.Topic,
   729  					"type":  reflect.TypeOf(t)},
   730  				).Info("ignoring event")
   731  				continue
   732  			}
   733  
   734  			c.processEvent(ctx, et, ei)
   735  		}
   736  	}
   737  }
   738  
   739  func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
   740  	writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
   741  	if err != nil {
   742  		return nil, err
   743  	}
   744  	defer writer.Close()
   745  	size, err := io.Copy(writer, r)
   746  	if err != nil {
   747  		return nil, err
   748  	}
   749  	labels := map[string]string{
   750  		"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
   751  	}
   752  	if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
   753  		return nil, err
   754  	}
   755  	return &types.Descriptor{
   756  		MediaType: mediaType,
   757  		Digest:    writer.Digest(),
   758  		Size_:     size,
   759  	}, nil
   760  }
   761  
   762  func (c *client) bundleDir(id string) string {
   763  	return filepath.Join(c.stateDir, id)
   764  }
   765  
   766  func wrapError(err error) error {
   767  	switch {
   768  	case err == nil:
   769  		return nil
   770  	case cerrdefs.IsNotFound(err):
   771  		return errdefs.NotFound(err)
   772  	}
   773  
   774  	msg := err.Error()
   775  	for _, s := range []string{"container does not exist", "not found", "no such container"} {
   776  		if strings.Contains(msg, s) {
   777  			return errdefs.NotFound(err)
   778  		}
   779  	}
   780  	return err
   781  }