github.com/devdivbcp/moby@v17.12.0-ce-rc1.0.20200726071732-2d4bfdc789ad+incompatible/libcontainerd/remote/client.go (about) 1 package remote // import "github.com/docker/docker/libcontainerd/remote" 2 3 import ( 4 "context" 5 "encoding/json" 6 "io" 7 "os" 8 "path/filepath" 9 "reflect" 10 "runtime" 11 "strings" 12 "sync" 13 "syscall" 14 "time" 15 16 "github.com/Microsoft/hcsshim/cmd/containerd-shim-runhcs-v1/options" 17 "github.com/containerd/containerd" 18 apievents "github.com/containerd/containerd/api/events" 19 "github.com/containerd/containerd/api/types" 20 "github.com/containerd/containerd/archive" 21 "github.com/containerd/containerd/cio" 22 "github.com/containerd/containerd/content" 23 containerderrors "github.com/containerd/containerd/errdefs" 24 "github.com/containerd/containerd/events" 25 "github.com/containerd/containerd/images" 26 "github.com/containerd/containerd/runtime/linux/runctypes" 27 "github.com/containerd/typeurl" 28 "github.com/docker/docker/errdefs" 29 "github.com/docker/docker/libcontainerd/queue" 30 libcontainerdtypes "github.com/docker/docker/libcontainerd/types" 31 32 "github.com/docker/docker/pkg/ioutils" 33 v1 "github.com/opencontainers/image-spec/specs-go/v1" 34 specs "github.com/opencontainers/runtime-spec/specs-go" 35 "github.com/pkg/errors" 36 "github.com/sirupsen/logrus" 37 "google.golang.org/grpc/codes" 38 "google.golang.org/grpc/status" 39 ) 40 41 // DockerContainerBundlePath is the label key pointing to the container's bundle path 42 const DockerContainerBundlePath = "com.docker/engine.bundle.path" 43 44 type client struct { 45 client *containerd.Client 46 stateDir string 47 logger *logrus.Entry 48 ns string 49 50 backend libcontainerdtypes.Backend 51 eventQ queue.Queue 52 oomMu sync.Mutex 53 oom map[string]bool 54 } 55 56 // NewClient creates a new libcontainerd client from a containerd client 57 func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) { 58 c := &client{ 59 client: cli, 60 stateDir: stateDir, 61 logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns), 62 ns: ns, 63 backend: b, 64 oom: make(map[string]bool), 65 } 66 67 go c.processEventStream(ctx, ns) 68 69 return c, nil 70 } 71 72 func (c *client) Version(ctx context.Context) (containerd.Version, error) { 73 return c.client.Version(ctx) 74 } 75 76 // Restore loads the containerd container. 77 // It should not be called concurrently with any other operation for the given ID. 78 func (c *client) Restore(ctx context.Context, id string, attachStdio libcontainerdtypes.StdioCallback) (alive bool, pid int, p libcontainerdtypes.Process, err error) { 79 var dio *cio.DirectIO 80 defer func() { 81 if err != nil && dio != nil { 82 dio.Cancel() 83 dio.Close() 84 } 85 err = wrapError(err) 86 }() 87 88 ctr, err := c.client.LoadContainer(ctx, id) 89 if err != nil { 90 return false, -1, nil, errors.WithStack(wrapError(err)) 91 } 92 93 attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) { 94 // dio must be assigned to the previously defined dio for the defer above 95 // to handle cleanup 96 dio, err = c.newDirectIO(ctx, fifos) 97 if err != nil { 98 return nil, err 99 } 100 return attachStdio(dio) 101 } 102 t, err := ctr.Task(ctx, attachIO) 103 if err != nil && !containerderrors.IsNotFound(err) { 104 return false, -1, nil, errors.Wrap(wrapError(err), "error getting containerd task for container") 105 } 106 107 if t != nil { 108 s, err := t.Status(ctx) 109 if err != nil { 110 return false, -1, nil, errors.Wrap(wrapError(err), "error getting task status") 111 } 112 alive = s.Status != containerd.Stopped 113 pid = int(t.Pid()) 114 } 115 116 c.logger.WithFields(logrus.Fields{ 117 "container": id, 118 "alive": alive, 119 "pid": pid, 120 }).Debug("restored container") 121 122 return alive, pid, &restoredProcess{ 123 p: t, 124 }, nil 125 } 126 127 func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, runtimeOptions interface{}) error { 128 bdir := c.bundleDir(id) 129 c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created") 130 131 _, err := c.client.NewContainer(ctx, id, 132 containerd.WithSpec(ociSpec), 133 containerd.WithRuntime(runtimeName, runtimeOptions), 134 WithBundle(bdir, ociSpec), 135 ) 136 if err != nil { 137 if containerderrors.IsAlreadyExists(err) { 138 return errors.WithStack(errdefs.Conflict(errors.New("id already in use"))) 139 } 140 return wrapError(err) 141 } 142 return nil 143 } 144 145 // Start create and start a task for the specified containerd id 146 func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) { 147 ctr, err := c.getContainer(ctx, id) 148 if err != nil { 149 return -1, err 150 } 151 var ( 152 cp *types.Descriptor 153 t containerd.Task 154 rio cio.IO 155 stdinCloseSync = make(chan struct{}) 156 ) 157 158 if checkpointDir != "" { 159 // write checkpoint to the content store 160 tar := archive.Diff(ctx, "", checkpointDir) 161 cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar) 162 // remove the checkpoint when we're done 163 defer func() { 164 if cp != nil { 165 err := c.client.ContentStore().Delete(context.Background(), cp.Digest) 166 if err != nil { 167 c.logger.WithError(err).WithFields(logrus.Fields{ 168 "ref": checkpointDir, 169 "digest": cp.Digest, 170 }).Warnf("failed to delete temporary checkpoint entry") 171 } 172 } 173 }() 174 if err := tar.Close(); err != nil { 175 return -1, errors.Wrap(err, "failed to close checkpoint tar stream") 176 } 177 if err != nil { 178 return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd") 179 } 180 } 181 182 spec, err := ctr.Spec(ctx) 183 if err != nil { 184 return -1, errors.Wrap(err, "failed to retrieve spec") 185 } 186 labels, err := ctr.Labels(ctx) 187 if err != nil { 188 return -1, errors.Wrap(err, "failed to retreive labels") 189 } 190 bundle := labels[DockerContainerBundlePath] 191 uid, gid := getSpecUser(spec) 192 t, err = ctr.NewTask(ctx, 193 func(id string) (cio.IO, error) { 194 fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal) 195 196 rio, err = c.createIO(fifos, id, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio) 197 return rio, err 198 }, 199 func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { 200 info.Checkpoint = cp 201 if runtime.GOOS != "windows" { 202 info.Options = &runctypes.CreateOptions{ 203 IoUid: uint32(uid), 204 IoGid: uint32(gid), 205 NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", 206 } 207 } else { 208 // Make sure we set the runhcs options to debug if we are at debug level. 209 if c.logger.Level == logrus.DebugLevel { 210 info.Options = &options.Options{Debug: true} 211 } 212 } 213 return nil 214 }) 215 if err != nil { 216 close(stdinCloseSync) 217 if rio != nil { 218 rio.Cancel() 219 rio.Close() 220 } 221 return -1, wrapError(err) 222 } 223 224 // Signal c.createIO that it can call CloseIO 225 close(stdinCloseSync) 226 227 if err := t.Start(ctx); err != nil { 228 if _, err := t.Delete(ctx); err != nil { 229 c.logger.WithError(err).WithField("container", id). 230 Error("failed to delete task after fail start") 231 } 232 return -1, wrapError(err) 233 } 234 235 return int(t.Pid()), nil 236 } 237 238 // Exec creates exec process. 239 // 240 // The containerd client calls Exec to register the exec config in the shim side. 241 // When the client calls Start, the shim will create stdin fifo if needs. But 242 // for the container main process, the stdin fifo will be created in Create not 243 // the Start call. stdinCloseSync channel should be closed after Start exec 244 // process. 245 func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (int, error) { 246 ctr, err := c.getContainer(ctx, containerID) 247 if err != nil { 248 return -1, err 249 } 250 t, err := ctr.Task(ctx, nil) 251 if err != nil { 252 if containerderrors.IsNotFound(err) { 253 return -1, errors.WithStack(errdefs.InvalidParameter(errors.New("container is not running"))) 254 } 255 return -1, wrapError(err) 256 } 257 258 var ( 259 p containerd.Process 260 rio cio.IO 261 stdinCloseSync = make(chan struct{}) 262 ) 263 264 labels, err := ctr.Labels(ctx) 265 if err != nil { 266 return -1, wrapError(err) 267 } 268 269 fifos := newFIFOSet(labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal) 270 271 defer func() { 272 if err != nil { 273 if rio != nil { 274 rio.Cancel() 275 rio.Close() 276 } 277 } 278 }() 279 280 p, err = t.Exec(ctx, processID, spec, func(id string) (cio.IO, error) { 281 rio, err = c.createIO(fifos, containerID, processID, stdinCloseSync, attachStdio) 282 return rio, err 283 }) 284 if err != nil { 285 close(stdinCloseSync) 286 if containerderrors.IsAlreadyExists(err) { 287 return -1, errors.WithStack(errdefs.Conflict(errors.New("id already in use"))) 288 } 289 return -1, wrapError(err) 290 } 291 292 // Signal c.createIO that it can call CloseIO 293 // 294 // the stdin of exec process will be created after p.Start in containerd 295 defer close(stdinCloseSync) 296 297 if err = p.Start(ctx); err != nil { 298 // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure 299 // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in 300 // older containerd-shim 301 ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) 302 defer cancel() 303 p.Delete(ctx) 304 return -1, wrapError(err) 305 } 306 return int(p.Pid()), nil 307 } 308 309 func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error { 310 p, err := c.getProcess(ctx, containerID, processID) 311 if err != nil { 312 return err 313 } 314 return wrapError(p.Kill(ctx, syscall.Signal(signal))) 315 } 316 317 func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error { 318 p, err := c.getProcess(ctx, containerID, processID) 319 if err != nil { 320 return err 321 } 322 323 return p.Resize(ctx, uint32(width), uint32(height)) 324 } 325 326 func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error { 327 p, err := c.getProcess(ctx, containerID, processID) 328 if err != nil { 329 return err 330 } 331 332 return p.CloseIO(ctx, containerd.WithStdinCloser) 333 } 334 335 func (c *client) Pause(ctx context.Context, containerID string) error { 336 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 337 if err != nil { 338 return err 339 } 340 341 return wrapError(p.(containerd.Task).Pause(ctx)) 342 } 343 344 func (c *client) Resume(ctx context.Context, containerID string) error { 345 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 346 if err != nil { 347 return err 348 } 349 350 return p.(containerd.Task).Resume(ctx) 351 } 352 353 func (c *client) Stats(ctx context.Context, containerID string) (*libcontainerdtypes.Stats, error) { 354 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 355 if err != nil { 356 return nil, err 357 } 358 359 m, err := p.(containerd.Task).Metrics(ctx) 360 if err != nil { 361 return nil, err 362 } 363 364 v, err := typeurl.UnmarshalAny(m.Data) 365 if err != nil { 366 return nil, err 367 } 368 return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil 369 } 370 371 func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) { 372 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 373 if err != nil { 374 return nil, err 375 } 376 377 pis, err := p.(containerd.Task).Pids(ctx) 378 if err != nil { 379 return nil, err 380 } 381 382 var pids []uint32 383 for _, i := range pis { 384 pids = append(pids, i.Pid) 385 } 386 387 return pids, nil 388 } 389 390 func (c *client) Summary(ctx context.Context, containerID string) ([]libcontainerdtypes.Summary, error) { 391 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 392 if err != nil { 393 return nil, err 394 } 395 396 pis, err := p.(containerd.Task).Pids(ctx) 397 if err != nil { 398 return nil, err 399 } 400 401 var infos []libcontainerdtypes.Summary 402 for _, pi := range pis { 403 i, err := typeurl.UnmarshalAny(pi.Info) 404 if err != nil { 405 return nil, errors.Wrap(err, "unable to decode process details") 406 } 407 s, err := summaryFromInterface(i) 408 if err != nil { 409 return nil, err 410 } 411 infos = append(infos, *s) 412 } 413 414 return infos, nil 415 } 416 417 type restoredProcess struct { 418 p containerd.Process 419 } 420 421 func (p *restoredProcess) Delete(ctx context.Context) (uint32, time.Time, error) { 422 if p.p == nil { 423 return 255, time.Now(), nil 424 } 425 status, err := p.p.Delete(ctx) 426 if err != nil { 427 return 255, time.Now(), nil 428 } 429 return status.ExitCode(), status.ExitTime(), nil 430 } 431 432 func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) { 433 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 434 if err != nil { 435 return 255, time.Now(), nil 436 } 437 438 status, err := p.Delete(ctx) 439 if err != nil { 440 return 255, time.Now(), nil 441 } 442 return status.ExitCode(), status.ExitTime(), nil 443 } 444 445 func (c *client) Delete(ctx context.Context, containerID string) error { 446 ctr, err := c.getContainer(ctx, containerID) 447 if err != nil { 448 return err 449 } 450 labels, err := ctr.Labels(ctx) 451 if err != nil { 452 return err 453 } 454 bundle := labels[DockerContainerBundlePath] 455 if err := ctr.Delete(ctx); err != nil { 456 return wrapError(err) 457 } 458 c.oomMu.Lock() 459 delete(c.oom, containerID) 460 c.oomMu.Unlock() 461 if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" { 462 if err := os.RemoveAll(bundle); err != nil { 463 c.logger.WithError(err).WithFields(logrus.Fields{ 464 "container": containerID, 465 "bundle": bundle, 466 }).Error("failed to remove state dir") 467 } 468 } 469 return nil 470 } 471 472 func (c *client) Status(ctx context.Context, containerID string) (containerd.ProcessStatus, error) { 473 t, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 474 if err != nil { 475 return containerd.Unknown, err 476 } 477 s, err := t.Status(ctx) 478 if err != nil { 479 return containerd.Unknown, wrapError(err) 480 } 481 return s.Status, nil 482 } 483 484 func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error { 485 p, err := c.getProcess(ctx, containerID, libcontainerdtypes.InitProcessName) 486 if err != nil { 487 return err 488 } 489 490 opts := []containerd.CheckpointTaskOpts{} 491 if exit { 492 opts = append(opts, func(r *containerd.CheckpointTaskInfo) error { 493 if r.Options == nil { 494 r.Options = &runctypes.CheckpointOptions{ 495 Exit: true, 496 } 497 } else { 498 opts, _ := r.Options.(*runctypes.CheckpointOptions) 499 opts.Exit = true 500 } 501 return nil 502 }) 503 } 504 img, err := p.(containerd.Task).Checkpoint(ctx, opts...) 505 if err != nil { 506 return wrapError(err) 507 } 508 // Whatever happens, delete the checkpoint from containerd 509 defer func() { 510 err := c.client.ImageService().Delete(context.Background(), img.Name()) 511 if err != nil { 512 c.logger.WithError(err).WithField("digest", img.Target().Digest). 513 Warnf("failed to delete checkpoint image") 514 } 515 }() 516 517 b, err := content.ReadBlob(ctx, c.client.ContentStore(), img.Target()) 518 if err != nil { 519 return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data")) 520 } 521 var index v1.Index 522 if err := json.Unmarshal(b, &index); err != nil { 523 return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data")) 524 } 525 526 var cpDesc *v1.Descriptor 527 for _, m := range index.Manifests { 528 if m.MediaType == images.MediaTypeContainerd1Checkpoint { 529 cpDesc = &m 530 break 531 } 532 } 533 if cpDesc == nil { 534 return errdefs.System(errors.Wrapf(err, "invalid checkpoint")) 535 } 536 537 rat, err := c.client.ContentStore().ReaderAt(ctx, *cpDesc) 538 if err != nil { 539 return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader")) 540 } 541 defer rat.Close() 542 _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat)) 543 if err != nil { 544 return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader")) 545 } 546 547 return err 548 } 549 550 func (c *client) getContainer(ctx context.Context, id string) (containerd.Container, error) { 551 ctr, err := c.client.LoadContainer(ctx, id) 552 if err != nil { 553 if containerderrors.IsNotFound(err) { 554 return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container"))) 555 } 556 return nil, wrapError(err) 557 } 558 return ctr, nil 559 } 560 561 func (c *client) getProcess(ctx context.Context, containerID, processID string) (containerd.Process, error) { 562 ctr, err := c.getContainer(ctx, containerID) 563 if err != nil { 564 return nil, err 565 } 566 t, err := ctr.Task(ctx, nil) 567 if err != nil { 568 if containerderrors.IsNotFound(err) { 569 return nil, errors.WithStack(errdefs.NotFound(errors.New("container is not running"))) 570 } 571 return nil, wrapError(err) 572 } 573 if processID == libcontainerdtypes.InitProcessName { 574 return t, nil 575 } 576 p, err := t.LoadProcess(ctx, processID, nil) 577 if err != nil { 578 if containerderrors.IsNotFound(err) { 579 return nil, errors.WithStack(errdefs.NotFound(errors.New("no such exec"))) 580 } 581 return nil, wrapError(err) 582 } 583 return p, nil 584 } 585 586 // createIO creates the io to be used by a process 587 // This needs to get a pointer to interface as upon closure the process may not have yet been registered 588 func (c *client) createIO(fifos *cio.FIFOSet, containerID, processID string, stdinCloseSync chan struct{}, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) { 589 var ( 590 io *cio.DirectIO 591 err error 592 ) 593 io, err = c.newDirectIO(context.Background(), fifos) 594 if err != nil { 595 return nil, err 596 } 597 598 if io.Stdin != nil { 599 var ( 600 err error 601 stdinOnce sync.Once 602 ) 603 pipe := io.Stdin 604 io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error { 605 stdinOnce.Do(func() { 606 err = pipe.Close() 607 // Do the rest in a new routine to avoid a deadlock if the 608 // Exec/Start call failed. 609 go func() { 610 <-stdinCloseSync 611 p, err := c.getProcess(context.Background(), containerID, processID) 612 if err == nil { 613 err = p.CloseIO(context.Background(), containerd.WithStdinCloser) 614 if err != nil && strings.Contains(err.Error(), "transport is closing") { 615 err = nil 616 } 617 } 618 }() 619 }) 620 return err 621 }) 622 } 623 624 rio, err := attachStdio(io) 625 if err != nil { 626 io.Cancel() 627 io.Close() 628 } 629 return rio, err 630 } 631 632 func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) { 633 c.eventQ.Append(ei.ContainerID, func() { 634 err := c.backend.ProcessEvent(ei.ContainerID, et, ei) 635 if err != nil { 636 c.logger.WithError(err).WithFields(logrus.Fields{ 637 "container": ei.ContainerID, 638 "event": et, 639 "event-info": ei, 640 }).Error("failed to process event") 641 } 642 643 if et == libcontainerdtypes.EventExit && ei.ProcessID != ei.ContainerID { 644 p, err := c.getProcess(ctx, ei.ContainerID, ei.ProcessID) 645 if err != nil { 646 647 c.logger.WithError(errors.New("no such process")). 648 WithFields(logrus.Fields{ 649 "error": err, 650 "container": ei.ContainerID, 651 "process": ei.ProcessID, 652 }).Error("exit event") 653 return 654 } 655 656 ctr, err := c.getContainer(ctx, ei.ContainerID) 657 if err != nil { 658 c.logger.WithFields(logrus.Fields{ 659 "container": ei.ContainerID, 660 "error": err, 661 }).Error("failed to find container") 662 } else { 663 labels, err := ctr.Labels(ctx) 664 if err != nil { 665 c.logger.WithFields(logrus.Fields{ 666 "container": ei.ContainerID, 667 "error": err, 668 }).Error("failed to get container labels") 669 return 670 } 671 newFIFOSet(labels[DockerContainerBundlePath], ei.ProcessID, true, false).Close() 672 } 673 _, err = p.Delete(context.Background()) 674 if err != nil { 675 c.logger.WithError(err).WithFields(logrus.Fields{ 676 "container": ei.ContainerID, 677 "process": ei.ProcessID, 678 }).Warn("failed to delete process") 679 } 680 } 681 }) 682 } 683 684 func (c *client) processEventStream(ctx context.Context, ns string) { 685 var ( 686 err error 687 ev *events.Envelope 688 et libcontainerdtypes.EventType 689 ei libcontainerdtypes.EventInfo 690 ) 691 692 // Filter on both namespace *and* topic. To create an "and" filter, 693 // this must be a single, comma-separated string 694 eventStream, errC := c.client.EventService().Subscribe(ctx, "namespace=="+ns+",topic~=|^/tasks/|") 695 696 c.logger.Debug("processing event stream") 697 698 for { 699 var oomKilled bool 700 select { 701 case err = <-errC: 702 if err != nil { 703 errStatus, ok := status.FromError(err) 704 if !ok || errStatus.Code() != codes.Canceled { 705 c.logger.WithError(err).Error("failed to get event") 706 707 // rate limit 708 select { 709 case <-time.After(time.Second): 710 go c.processEventStream(ctx, ns) 711 return 712 case <-ctx.Done(): 713 } 714 } 715 c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown") 716 } 717 return 718 case ev = <-eventStream: 719 if ev.Event == nil { 720 c.logger.WithField("event", ev).Warn("invalid event") 721 continue 722 } 723 724 v, err := typeurl.UnmarshalAny(ev.Event) 725 if err != nil { 726 c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event") 727 continue 728 } 729 730 c.logger.WithField("topic", ev.Topic).Debug("event") 731 732 switch t := v.(type) { 733 case *apievents.TaskCreate: 734 et = libcontainerdtypes.EventCreate 735 ei = libcontainerdtypes.EventInfo{ 736 ContainerID: t.ContainerID, 737 ProcessID: t.ContainerID, 738 Pid: t.Pid, 739 } 740 case *apievents.TaskStart: 741 et = libcontainerdtypes.EventStart 742 ei = libcontainerdtypes.EventInfo{ 743 ContainerID: t.ContainerID, 744 ProcessID: t.ContainerID, 745 Pid: t.Pid, 746 } 747 case *apievents.TaskExit: 748 et = libcontainerdtypes.EventExit 749 ei = libcontainerdtypes.EventInfo{ 750 ContainerID: t.ContainerID, 751 ProcessID: t.ID, 752 Pid: t.Pid, 753 ExitCode: t.ExitStatus, 754 ExitedAt: t.ExitedAt, 755 } 756 case *apievents.TaskOOM: 757 et = libcontainerdtypes.EventOOM 758 ei = libcontainerdtypes.EventInfo{ 759 ContainerID: t.ContainerID, 760 OOMKilled: true, 761 } 762 oomKilled = true 763 case *apievents.TaskExecAdded: 764 et = libcontainerdtypes.EventExecAdded 765 ei = libcontainerdtypes.EventInfo{ 766 ContainerID: t.ContainerID, 767 ProcessID: t.ExecID, 768 } 769 case *apievents.TaskExecStarted: 770 et = libcontainerdtypes.EventExecStarted 771 ei = libcontainerdtypes.EventInfo{ 772 ContainerID: t.ContainerID, 773 ProcessID: t.ExecID, 774 Pid: t.Pid, 775 } 776 case *apievents.TaskPaused: 777 et = libcontainerdtypes.EventPaused 778 ei = libcontainerdtypes.EventInfo{ 779 ContainerID: t.ContainerID, 780 } 781 case *apievents.TaskResumed: 782 et = libcontainerdtypes.EventResumed 783 ei = libcontainerdtypes.EventInfo{ 784 ContainerID: t.ContainerID, 785 } 786 default: 787 c.logger.WithFields(logrus.Fields{ 788 "topic": ev.Topic, 789 "type": reflect.TypeOf(t)}, 790 ).Info("ignoring event") 791 continue 792 } 793 794 c.oomMu.Lock() 795 if oomKilled { 796 c.oom[ei.ContainerID] = true 797 } 798 ei.OOMKilled = c.oom[ei.ContainerID] 799 c.oomMu.Unlock() 800 801 c.processEvent(ctx, et, ei) 802 } 803 } 804 } 805 806 func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { 807 writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref)) 808 if err != nil { 809 return nil, err 810 } 811 defer writer.Close() 812 size, err := io.Copy(writer, r) 813 if err != nil { 814 return nil, err 815 } 816 labels := map[string]string{ 817 "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), 818 } 819 if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil { 820 return nil, err 821 } 822 return &types.Descriptor{ 823 MediaType: mediaType, 824 Digest: writer.Digest(), 825 Size_: size, 826 }, nil 827 } 828 829 func (c *client) bundleDir(id string) string { 830 return filepath.Join(c.stateDir, id) 831 } 832 833 func wrapError(err error) error { 834 switch { 835 case err == nil: 836 return nil 837 case containerderrors.IsNotFound(err): 838 return errdefs.NotFound(err) 839 } 840 841 msg := err.Error() 842 for _, s := range []string{"container does not exist", "not found", "no such container"} { 843 if strings.Contains(msg, s) { 844 return errdefs.NotFound(err) 845 } 846 } 847 return err 848 }