github.com/Prakhar-Agarwal-byte/moby@v0.0.0-20231027092010-a14e3e8ab87e/libcontainerd/remote/client.go (about)

     1  package remote // import "github.com/Prakhar-Agarwal-byte/moby/libcontainerd/remote"
     2  
     3  import (
     4  	"context"
     5  	"encoding/json"
     6  	"io"
     7  	"os"
     8  	"path/filepath"
     9  	"reflect"
    10  	"runtime"
    11  	"strings"
    12  	"sync"
    13  	"syscall"
    14  	"time"
    15  
    16  	"github.com/containerd/containerd"
    17  	apievents "github.com/containerd/containerd/api/events"
    18  	"github.com/containerd/containerd/api/types"
    19  	"github.com/containerd/containerd/archive"
    20  	"github.com/containerd/containerd/cio"
    21  	"github.com/containerd/containerd/content"
    22  	cerrdefs "github.com/containerd/containerd/errdefs"
    23  	"github.com/containerd/containerd/images"
    24  	"github.com/containerd/containerd/protobuf"
    25  	v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options"
    26  	"github.com/containerd/log"
    27  	"github.com/containerd/typeurl/v2"
    28  	"github.com/Prakhar-Agarwal-byte/moby/errdefs"
    29  	"github.com/Prakhar-Agarwal-byte/moby/libcontainerd/queue"
    30  	libcontainerdtypes "github.com/Prakhar-Agarwal-byte/moby/libcontainerd/types"
    31  	"github.com/Prakhar-Agarwal-byte/moby/pkg/ioutils"
    32  	"github.com/hashicorp/go-multierror"
    33  	"github.com/opencontainers/go-digest"
    34  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    35  	specs "github.com/opencontainers/runtime-spec/specs-go"
    36  	"github.com/pkg/errors"
    37  	"google.golang.org/grpc/codes"
    38  	"google.golang.org/grpc/status"
    39  	"google.golang.org/protobuf/proto"
    40  )
    41  
    42  // DockerContainerBundlePath is the label key pointing to the container's bundle path
    43  const DockerContainerBundlePath = "com.docker/engine.bundle.path"
    44  
    45  type client struct {
    46  	client   *containerd.Client
    47  	stateDir string
    48  	logger   *log.Entry
    49  	ns       string
    50  
    51  	backend libcontainerdtypes.Backend
    52  	eventQ  queue.Queue
    53  }
    54  
    55  type container struct {
    56  	client *client
    57  	c8dCtr containerd.Container
    58  
    59  	v2runcoptions *v2runcoptions.Options
    60  }
    61  
    62  type task struct {
    63  	containerd.Task
    64  	ctr *container
    65  
    66  	// Workaround for https://github.com/containerd/containerd/issues/8557.
    67  	// See also https://github.com/moby/moby/issues/45595.
    68  	serializeExecStartsWorkaround sync.Mutex
    69  }
    70  
    71  type process struct {
    72  	containerd.Process
    73  }
    74  
    75  // NewClient creates a new libcontainerd client from a containerd client
    76  func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) {
    77  	c := &client{
    78  		client:   cli,
    79  		stateDir: stateDir,
    80  		logger:   log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns),
    81  		ns:       ns,
    82  		backend:  b,
    83  	}
    84  
    85  	go c.processEventStream(ctx, ns)
    86  
    87  	return c, nil
    88  }
    89  
    90  func (c *client) Version(ctx context.Context) (containerd.Version, error) {
    91  	return c.client.Version(ctx)
    92  }
    93  
    94  func (c *container) newTask(t containerd.Task) *task {
    95  	return &task{Task: t, ctr: c}
    96  }
    97  
    98  func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) {
    99  	var dio *cio.DirectIO
   100  	defer func() {
   101  		if err != nil && dio != nil {
   102  			dio.Cancel()
   103  			dio.Close()
   104  		}
   105  	}()
   106  
   107  	attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) {
   108  		// dio must be assigned to the previously defined dio for the defer above
   109  		// to handle cleanup
   110  		dio, err = c.client.newDirectIO(ctx, fifos)
   111  		if err != nil {
   112  			return nil, err
   113  		}
   114  		return attachStdio(dio)
   115  	}
   116  	t, err := c.c8dCtr.Task(ctx, attachIO)
   117  	if err != nil {
   118  		return nil, errors.Wrap(wrapError(err), "error getting containerd task for container")
   119  	}
   120  	return c.newTask(t), nil
   121  }
   122  
   123  func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) {
   124  	bdir := c.bundleDir(id)
   125  	c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
   126  
   127  	newOpts := []containerd.NewContainerOpts{
   128  		containerd.WithSpec(ociSpec),
   129  		containerd.WithRuntime(shim, runtimeOptions),
   130  		WithBundle(bdir, ociSpec),
   131  	}
   132  	opts = append(opts, newOpts...)
   133  
   134  	ctr, err := c.client.NewContainer(ctx, id, opts...)
   135  	if err != nil {
   136  		if cerrdefs.IsAlreadyExists(err) {
   137  			return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   138  		}
   139  		return nil, wrapError(err)
   140  	}
   141  
   142  	created := container{
   143  		client: c,
   144  		c8dCtr: ctr,
   145  	}
   146  	if x, ok := runtimeOptions.(*v2runcoptions.Options); ok {
   147  		created.v2runcoptions = x
   148  	}
   149  	return &created, nil
   150  }
   151  
   152  // Start create and start a task for the specified containerd id
   153  func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) {
   154  	var (
   155  		checkpoint     *types.Descriptor
   156  		t              containerd.Task
   157  		rio            cio.IO
   158  		stdinCloseSync = make(chan containerd.Process, 1)
   159  	)
   160  
   161  	if checkpointDir != "" {
   162  		// write checkpoint to the content store
   163  		tar := archive.Diff(ctx, "", checkpointDir)
   164  		var err error
   165  		checkpoint, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
   166  		// remove the checkpoint when we're done
   167  		defer func() {
   168  			if checkpoint != nil {
   169  				err := c.client.client.ContentStore().Delete(ctx, digest.Digest(checkpoint.Digest))
   170  				if err != nil {
   171  					c.client.logger.WithError(err).WithFields(log.Fields{
   172  						"ref":    checkpointDir,
   173  						"digest": checkpoint.Digest,
   174  					}).Warnf("failed to delete temporary checkpoint entry")
   175  				}
   176  			}
   177  		}()
   178  		if err := tar.Close(); err != nil {
   179  			return nil, errors.Wrap(err, "failed to close checkpoint tar stream")
   180  		}
   181  		if err != nil {
   182  			return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd")
   183  		}
   184  	}
   185  
   186  	// Optimization: assume the relevant metadata has not changed in the
   187  	// moment since the container was created. Elide redundant RPC requests
   188  	// to refresh the metadata separately for spec and labels.
   189  	md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
   190  	if err != nil {
   191  		return nil, errors.Wrap(err, "failed to retrieve metadata")
   192  	}
   193  	bundle := md.Labels[DockerContainerBundlePath]
   194  
   195  	var spec specs.Spec
   196  	if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil {
   197  		return nil, errors.Wrap(err, "failed to retrieve spec")
   198  	}
   199  	uid, gid := getSpecUser(&spec)
   200  
   201  	taskOpts := []containerd.NewTaskOpts{
   202  		func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
   203  			info.Checkpoint = checkpoint
   204  			return nil
   205  		},
   206  	}
   207  
   208  	if runtime.GOOS != "windows" {
   209  		taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
   210  			if c.v2runcoptions != nil {
   211  				opts := proto.Clone(c.v2runcoptions).(*v2runcoptions.Options)
   212  				opts.IoUid = uint32(uid)
   213  				opts.IoGid = uint32(gid)
   214  				info.Options = opts
   215  			}
   216  			return nil
   217  		})
   218  	} else {
   219  		taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level))
   220  	}
   221  
   222  	t, err = c.c8dCtr.NewTask(ctx,
   223  		func(id string) (cio.IO, error) {
   224  			fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal)
   225  
   226  			rio, err = c.createIO(fifos, stdinCloseSync, attachStdio)
   227  			return rio, err
   228  		},
   229  		taskOpts...,
   230  	)
   231  	if err != nil {
   232  		close(stdinCloseSync)
   233  		if rio != nil {
   234  			rio.Cancel()
   235  			rio.Close()
   236  		}
   237  		return nil, errors.Wrap(wrapError(err), "failed to create task for container")
   238  	}
   239  
   240  	// Signal c.createIO that it can call CloseIO
   241  	stdinCloseSync <- t
   242  
   243  	if err := t.Start(ctx); err != nil {
   244  		// Only Stopped tasks can be deleted. Created tasks have to be
   245  		// killed first, to transition them to Stopped.
   246  		if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil {
   247  			c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()).
   248  				Error("failed to delete task after fail start")
   249  		}
   250  		return nil, wrapError(err)
   251  	}
   252  
   253  	return c.newTask(t), nil
   254  }
   255  
   256  // Exec creates exec process.
   257  //
   258  // The containerd client calls Exec to register the exec config in the shim side.
   259  // When the client calls Start, the shim will create stdin fifo if needs. But
   260  // for the container main process, the stdin fifo will be created in Create not
   261  // the Start call. stdinCloseSync channel should be closed after Start exec
   262  // process.
   263  func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) {
   264  	var (
   265  		p              containerd.Process
   266  		rio            cio.IO
   267  		stdinCloseSync = make(chan containerd.Process, 1)
   268  	)
   269  
   270  	// Optimization: assume the DockerContainerBundlePath label has not been
   271  	// updated since the container metadata was last loaded/refreshed.
   272  	md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
   273  	if err != nil {
   274  		return nil, wrapError(err)
   275  	}
   276  
   277  	fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal)
   278  
   279  	defer func() {
   280  		if err != nil {
   281  			if rio != nil {
   282  				rio.Cancel()
   283  				rio.Close()
   284  			}
   285  		}
   286  	}()
   287  
   288  	p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) {
   289  		rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio)
   290  		return rio, err
   291  	})
   292  	if err != nil {
   293  		close(stdinCloseSync)
   294  		if cerrdefs.IsAlreadyExists(err) {
   295  			return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use")))
   296  		}
   297  		return nil, wrapError(err)
   298  	}
   299  
   300  	// Signal c.createIO that it can call CloseIO
   301  	//
   302  	// the stdin of exec process will be created after p.Start in containerd
   303  	defer func() { stdinCloseSync <- p }()
   304  
   305  	err = func() error {
   306  		t.serializeExecStartsWorkaround.Lock()
   307  		defer t.serializeExecStartsWorkaround.Unlock()
   308  		return p.Start(ctx)
   309  	}()
   310  	if err != nil {
   311  		// use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure
   312  		// we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in
   313  		// older containerd-shim
   314  		ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second)
   315  		defer cancel()
   316  		p.Delete(ctx)
   317  		return nil, wrapError(err)
   318  	}
   319  	return process{p}, nil
   320  }
   321  
   322  func (t *task) Kill(ctx context.Context, signal syscall.Signal) error {
   323  	return wrapError(t.Task.Kill(ctx, signal))
   324  }
   325  
   326  func (p process) Kill(ctx context.Context, signal syscall.Signal) error {
   327  	return wrapError(p.Process.Kill(ctx, signal))
   328  }
   329  
   330  func (t *task) Pause(ctx context.Context) error {
   331  	return wrapError(t.Task.Pause(ctx))
   332  }
   333  
   334  func (t *task) Resume(ctx context.Context) error {
   335  	return wrapError(t.Task.Resume(ctx))
   336  }
   337  
   338  func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) {
   339  	m, err := t.Metrics(ctx)
   340  	if err != nil {
   341  		return nil, err
   342  	}
   343  
   344  	v, err := typeurl.UnmarshalAny(m.Data)
   345  	if err != nil {
   346  		return nil, err
   347  	}
   348  	return libcontainerdtypes.InterfaceToStats(protobuf.FromTimestamp(m.Timestamp), v), nil
   349  }
   350  
   351  func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) {
   352  	pis, err := t.Pids(ctx)
   353  	if err != nil {
   354  		return nil, err
   355  	}
   356  
   357  	var infos []libcontainerdtypes.Summary
   358  	for _, pi := range pis {
   359  		i, err := typeurl.UnmarshalAny(pi.Info)
   360  		if err != nil {
   361  			return nil, errors.Wrap(err, "unable to decode process details")
   362  		}
   363  		s, err := summaryFromInterface(i)
   364  		if err != nil {
   365  			return nil, err
   366  		}
   367  		infos = append(infos, *s)
   368  	}
   369  
   370  	return infos, nil
   371  }
   372  
   373  func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   374  	s, err := t.Task.Delete(ctx)
   375  	return s, wrapError(err)
   376  }
   377  
   378  func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) {
   379  	s, err := p.Process.Delete(ctx)
   380  	return s, wrapError(err)
   381  }
   382  
   383  func (c *container) Delete(ctx context.Context) error {
   384  	// Optimization: assume the DockerContainerBundlePath label has not been
   385  	// updated since the container metadata was last loaded/refreshed.
   386  	md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata)
   387  	if err != nil {
   388  		return err
   389  	}
   390  	bundle := md.Labels[DockerContainerBundlePath]
   391  	if err := c.c8dCtr.Delete(ctx); err != nil {
   392  		return wrapError(err)
   393  	}
   394  	if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" {
   395  		if err := os.RemoveAll(bundle); err != nil {
   396  			c.client.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{
   397  				"container": c.c8dCtr.ID(),
   398  				"bundle":    bundle,
   399  			}).Error("failed to remove state dir")
   400  		}
   401  	}
   402  	return nil
   403  }
   404  
   405  func (t *task) ForceDelete(ctx context.Context) error {
   406  	_, err := t.Task.Delete(ctx, containerd.WithProcessKill)
   407  	return wrapError(err)
   408  }
   409  
   410  func (t *task) Status(ctx context.Context) (containerd.Status, error) {
   411  	s, err := t.Task.Status(ctx)
   412  	return s, wrapError(err)
   413  }
   414  
   415  func (p process) Status(ctx context.Context) (containerd.Status, error) {
   416  	s, err := p.Process.Status(ctx)
   417  	return s, wrapError(err)
   418  }
   419  
   420  func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts {
   421  	return func(r *containerd.CheckpointTaskInfo) error {
   422  		if r.Options == nil && c.v2runcoptions != nil {
   423  			r.Options = &v2runcoptions.CheckpointOptions{}
   424  		}
   425  
   426  		switch opts := r.Options.(type) {
   427  		case *v2runcoptions.CheckpointOptions:
   428  			opts.Exit = exit
   429  		}
   430  
   431  		return nil
   432  	}
   433  }
   434  
   435  func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error {
   436  	img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit))
   437  	if err != nil {
   438  		return wrapError(err)
   439  	}
   440  	// Whatever happens, delete the checkpoint from containerd
   441  	defer func() {
   442  		err := t.ctr.client.client.ImageService().Delete(ctx, img.Name())
   443  		if err != nil {
   444  			t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest).
   445  				Warnf("failed to delete checkpoint image")
   446  		}
   447  	}()
   448  
   449  	b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target())
   450  	if err != nil {
   451  		return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data"))
   452  	}
   453  	var index ocispec.Index
   454  	if err := json.Unmarshal(b, &index); err != nil {
   455  		return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data"))
   456  	}
   457  
   458  	var cpDesc *ocispec.Descriptor
   459  	for _, m := range index.Manifests {
   460  		m := m
   461  		if m.MediaType == images.MediaTypeContainerd1Checkpoint {
   462  			cpDesc = &m //nolint:gosec
   463  			break
   464  		}
   465  	}
   466  	if cpDesc == nil {
   467  		return errdefs.System(errors.Wrapf(err, "invalid checkpoint"))
   468  	}
   469  
   470  	rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc)
   471  	if err != nil {
   472  		return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader"))
   473  	}
   474  	defer rat.Close()
   475  	_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
   476  	if err != nil {
   477  		return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader"))
   478  	}
   479  
   480  	return err
   481  }
   482  
   483  // LoadContainer loads the containerd container.
   484  func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) {
   485  	ctr, err := c.client.LoadContainer(ctx, id)
   486  	if err != nil {
   487  		if cerrdefs.IsNotFound(err) {
   488  			return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container")))
   489  		}
   490  		return nil, wrapError(err)
   491  	}
   492  	return &container{client: c, c8dCtr: ctr}, nil
   493  }
   494  
   495  func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) {
   496  	t, err := c.c8dCtr.Task(ctx, nil)
   497  	if err != nil {
   498  		return nil, wrapError(err)
   499  	}
   500  	return c.newTask(t), nil
   501  }
   502  
   503  // createIO creates the io to be used by a process
   504  // This needs to get a pointer to interface as upon closure the process may not have yet been registered
   505  func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) {
   506  	var (
   507  		io  *cio.DirectIO
   508  		err error
   509  	)
   510  	io, err = c.client.newDirectIO(context.Background(), fifos)
   511  	if err != nil {
   512  		return nil, err
   513  	}
   514  
   515  	if io.Stdin != nil {
   516  		var (
   517  			closeErr  error
   518  			stdinOnce sync.Once
   519  		)
   520  		pipe := io.Stdin
   521  		io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
   522  			stdinOnce.Do(func() {
   523  				closeErr = pipe.Close()
   524  
   525  				select {
   526  				case p, ok := <-stdinCloseSync:
   527  					if !ok {
   528  						return
   529  					}
   530  					if err := closeStdin(context.Background(), p); err != nil {
   531  						if closeErr != nil {
   532  							closeErr = multierror.Append(closeErr, err)
   533  						} else {
   534  							// Avoid wrapping a single error in a multierror.
   535  							closeErr = err
   536  						}
   537  					}
   538  				default:
   539  					// The process wasn't ready. Close its stdin asynchronously.
   540  					go func() {
   541  						p, ok := <-stdinCloseSync
   542  						if !ok {
   543  							return
   544  						}
   545  						if err := closeStdin(context.Background(), p); err != nil {
   546  							c.client.logger.WithError(err).
   547  								WithField("container", c.c8dCtr.ID()).
   548  								Error("failed to close container stdin")
   549  						}
   550  					}()
   551  				}
   552  			})
   553  			return closeErr
   554  		})
   555  	}
   556  
   557  	rio, err := attachStdio(io)
   558  	if err != nil {
   559  		io.Cancel()
   560  		io.Close()
   561  	}
   562  	return rio, err
   563  }
   564  
   565  func closeStdin(ctx context.Context, p containerd.Process) error {
   566  	err := p.CloseIO(ctx, containerd.WithStdinCloser)
   567  	if err != nil && strings.Contains(err.Error(), "transport is closing") {
   568  		err = nil
   569  	}
   570  	return err
   571  }
   572  
   573  func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) {
   574  	c.eventQ.Append(ei.ContainerID, func() {
   575  		err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
   576  		if err != nil {
   577  			c.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{
   578  				"container":  ei.ContainerID,
   579  				"event":      et,
   580  				"event-info": ei,
   581  			}).Error("failed to process event")
   582  		}
   583  	})
   584  }
   585  
   586  func (c *client) waitServe(ctx context.Context) bool {
   587  	t := 100 * time.Millisecond
   588  	delay := time.NewTimer(t)
   589  	if !delay.Stop() {
   590  		<-delay.C
   591  	}
   592  	defer delay.Stop()
   593  
   594  	// `IsServing` will actually block until the service is ready.
   595  	// However it can return early, so we'll loop with a delay to handle it.
   596  	for {
   597  		serving, err := c.client.IsServing(ctx)
   598  		if err != nil {
   599  			if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) {
   600  				return false
   601  			}
   602  			log.G(ctx).WithError(err).Warn("Error while testing if containerd API is ready")
   603  		}
   604  
   605  		if serving {
   606  			return true
   607  		}
   608  
   609  		delay.Reset(t)
   610  		select {
   611  		case <-ctx.Done():
   612  			return false
   613  		case <-delay.C:
   614  		}
   615  	}
   616  }
   617  
   618  func (c *client) processEventStream(ctx context.Context, ns string) {
   619  	// Create a new context specifically for this subscription.
   620  	// The context must be cancelled to cancel the subscription.
   621  	// In cases where we have to restart event stream processing,
   622  	//   we'll need the original context b/c this one will be cancelled
   623  	subCtx, cancel := context.WithCancel(ctx)
   624  	defer cancel()
   625  
   626  	// Filter on both namespace *and* topic. To create an "and" filter,
   627  	// this must be a single, comma-separated string
   628  	eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|")
   629  
   630  	c.logger.Debug("processing event stream")
   631  
   632  	for {
   633  		select {
   634  		case err := <-errC:
   635  			if err != nil {
   636  				errStatus, ok := status.FromError(err)
   637  				if !ok || errStatus.Code() != codes.Canceled {
   638  					c.logger.WithError(err).Error("Failed to get event")
   639  					c.logger.Info("Waiting for containerd to be ready to restart event processing")
   640  					if c.waitServe(ctx) {
   641  						go c.processEventStream(ctx, ns)
   642  						return
   643  					}
   644  				}
   645  				c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown")
   646  			}
   647  			return
   648  		case ev := <-eventStream:
   649  			if ev.Event == nil {
   650  				c.logger.WithField("event", ev).Warn("invalid event")
   651  				continue
   652  			}
   653  
   654  			v, err := typeurl.UnmarshalAny(ev.Event)
   655  			if err != nil {
   656  				c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
   657  				continue
   658  			}
   659  
   660  			c.logger.WithField("topic", ev.Topic).Debug("event")
   661  
   662  			switch t := v.(type) {
   663  			case *apievents.TaskCreate:
   664  				c.processEvent(ctx, libcontainerdtypes.EventCreate, libcontainerdtypes.EventInfo{
   665  					ContainerID: t.ContainerID,
   666  					ProcessID:   t.ContainerID,
   667  					Pid:         t.Pid,
   668  				})
   669  			case *apievents.TaskStart:
   670  				c.processEvent(ctx, libcontainerdtypes.EventStart, libcontainerdtypes.EventInfo{
   671  					ContainerID: t.ContainerID,
   672  					ProcessID:   t.ContainerID,
   673  					Pid:         t.Pid,
   674  				})
   675  			case *apievents.TaskExit:
   676  				c.processEvent(ctx, libcontainerdtypes.EventExit, libcontainerdtypes.EventInfo{
   677  					ContainerID: t.ContainerID,
   678  					ProcessID:   t.ID,
   679  					Pid:         t.Pid,
   680  					ExitCode:    t.ExitStatus,
   681  					ExitedAt:    protobuf.FromTimestamp(t.ExitedAt),
   682  				})
   683  			case *apievents.TaskOOM:
   684  				c.processEvent(ctx, libcontainerdtypes.EventOOM, libcontainerdtypes.EventInfo{
   685  					ContainerID: t.ContainerID,
   686  				})
   687  			case *apievents.TaskExecAdded:
   688  				c.processEvent(ctx, libcontainerdtypes.EventExecAdded, libcontainerdtypes.EventInfo{
   689  					ContainerID: t.ContainerID,
   690  					ProcessID:   t.ExecID,
   691  				})
   692  			case *apievents.TaskExecStarted:
   693  				c.processEvent(ctx, libcontainerdtypes.EventExecStarted, libcontainerdtypes.EventInfo{
   694  					ContainerID: t.ContainerID,
   695  					ProcessID:   t.ExecID,
   696  					Pid:         t.Pid,
   697  				})
   698  			case *apievents.TaskPaused:
   699  				c.processEvent(ctx, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{
   700  					ContainerID: t.ContainerID,
   701  				})
   702  			case *apievents.TaskResumed:
   703  				c.processEvent(ctx, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{
   704  					ContainerID: t.ContainerID,
   705  				})
   706  			case *apievents.TaskDelete:
   707  				c.logger.WithFields(log.Fields{
   708  					"topic":     ev.Topic,
   709  					"type":      reflect.TypeOf(t),
   710  					"container": t.ContainerID,
   711  				}).Info("ignoring event")
   712  			default:
   713  				c.logger.WithFields(log.Fields{
   714  					"topic": ev.Topic,
   715  					"type":  reflect.TypeOf(t),
   716  				}).Info("ignoring event")
   717  			}
   718  		}
   719  	}
   720  }
   721  
   722  func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
   723  	writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref))
   724  	if err != nil {
   725  		return nil, err
   726  	}
   727  	defer writer.Close()
   728  	size, err := io.Copy(writer, r)
   729  	if err != nil {
   730  		return nil, err
   731  	}
   732  	labels := map[string]string{
   733  		"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
   734  	}
   735  	if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
   736  		return nil, err
   737  	}
   738  	return &types.Descriptor{
   739  		MediaType: mediaType,
   740  		Digest:    writer.Digest().Encoded(),
   741  		Size:      size,
   742  	}, nil
   743  }
   744  
   745  func (c *client) bundleDir(id string) string {
   746  	return filepath.Join(c.stateDir, id)
   747  }
   748  
   749  func wrapError(err error) error {
   750  	switch {
   751  	case err == nil:
   752  		return nil
   753  	case cerrdefs.IsNotFound(err):
   754  		return errdefs.NotFound(err)
   755  	}
   756  
   757  	msg := err.Error()
   758  	for _, s := range []string{"container does not exist", "not found", "no such container"} {
   759  		if strings.Contains(msg, s) {
   760  			return errdefs.NotFound(err)
   761  		}
   762  	}
   763  	return err
   764  }