github.com/moby/docker@v26.1.3+incompatible/libcontainerd/remote/client.go (about) 1 package remote // import "github.com/docker/docker/libcontainerd/remote" 2 3 import ( 4 "context" 5 "encoding/json" 6 "io" 7 "os" 8 "path/filepath" 9 "reflect" 10 "runtime" 11 "strings" 12 "sync" 13 "syscall" 14 "time" 15 16 "github.com/containerd/containerd" 17 apievents "github.com/containerd/containerd/api/events" 18 "github.com/containerd/containerd/api/types" 19 "github.com/containerd/containerd/archive" 20 "github.com/containerd/containerd/cio" 21 "github.com/containerd/containerd/content" 22 cerrdefs "github.com/containerd/containerd/errdefs" 23 "github.com/containerd/containerd/images" 24 "github.com/containerd/containerd/protobuf" 25 v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" 26 "github.com/containerd/log" 27 "github.com/containerd/typeurl/v2" 28 "github.com/docker/docker/errdefs" 29 "github.com/docker/docker/libcontainerd/queue" 30 libcontainerdtypes "github.com/docker/docker/libcontainerd/types" 31 "github.com/docker/docker/pkg/ioutils" 32 "github.com/hashicorp/go-multierror" 33 "github.com/opencontainers/go-digest" 34 ocispec "github.com/opencontainers/image-spec/specs-go/v1" 35 specs "github.com/opencontainers/runtime-spec/specs-go" 36 "github.com/pkg/errors" 37 "google.golang.org/grpc/codes" 38 "google.golang.org/grpc/status" 39 "google.golang.org/protobuf/proto" 40 ) 41 42 // DockerContainerBundlePath is the label key pointing to the container's bundle path 43 const DockerContainerBundlePath = "com.docker/engine.bundle.path" 44 45 type client struct { 46 client *containerd.Client 47 stateDir string 48 logger *log.Entry 49 ns string 50 51 backend libcontainerdtypes.Backend 52 eventQ queue.Queue 53 } 54 55 type container struct { 56 client *client 57 c8dCtr containerd.Container 58 59 v2runcoptions *v2runcoptions.Options 60 } 61 62 type task struct { 63 containerd.Task 64 ctr *container 65 } 66 67 type process struct { 68 containerd.Process 69 } 70 71 // NewClient creates a new libcontainerd client from a containerd client 72 func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) { 73 c := &client{ 74 client: cli, 75 stateDir: stateDir, 76 logger: log.G(ctx).WithField("module", "libcontainerd").WithField("namespace", ns), 77 ns: ns, 78 backend: b, 79 } 80 81 go c.processEventStream(ctx, ns) 82 83 return c, nil 84 } 85 86 func (c *client) Version(ctx context.Context) (containerd.Version, error) { 87 return c.client.Version(ctx) 88 } 89 90 func (c *container) newTask(t containerd.Task) *task { 91 return &task{Task: t, ctr: c} 92 } 93 94 func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) { 95 var dio *cio.DirectIO 96 defer func() { 97 if err != nil && dio != nil { 98 dio.Cancel() 99 dio.Close() 100 } 101 }() 102 103 attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) { 104 // dio must be assigned to the previously defined dio for the defer above 105 // to handle cleanup 106 dio, err = c.client.newDirectIO(ctx, fifos) 107 if err != nil { 108 return nil, err 109 } 110 return attachStdio(dio) 111 } 112 t, err := c.c8dCtr.Task(ctx, attachIO) 113 if err != nil { 114 return nil, errors.Wrap(wrapError(err), "error getting containerd task for container") 115 } 116 return c.newTask(t), nil 117 } 118 119 func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) { 120 bdir := c.bundleDir(id) 121 c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created") 122 123 newOpts := []containerd.NewContainerOpts{ 124 containerd.WithSpec(ociSpec), 125 containerd.WithRuntime(shim, runtimeOptions), 126 WithBundle(bdir, ociSpec), 127 } 128 opts = append(opts, newOpts...) 129 130 ctr, err := c.client.NewContainer(ctx, id, opts...) 131 if err != nil { 132 if cerrdefs.IsAlreadyExists(err) { 133 return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use"))) 134 } 135 return nil, wrapError(err) 136 } 137 138 created := container{ 139 client: c, 140 c8dCtr: ctr, 141 } 142 if x, ok := runtimeOptions.(*v2runcoptions.Options); ok { 143 created.v2runcoptions = x 144 } 145 return &created, nil 146 } 147 148 // NewTask creates a task for the specified containerd id 149 func (c *container) NewTask(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) { 150 var ( 151 checkpoint *types.Descriptor 152 t containerd.Task 153 rio cio.IO 154 stdinCloseSync = make(chan containerd.Process, 1) 155 ) 156 157 if checkpointDir != "" { 158 // write checkpoint to the content store 159 tar := archive.Diff(ctx, "", checkpointDir) 160 var err error 161 checkpoint, err = c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar) 162 // remove the checkpoint when we're done 163 defer func() { 164 if checkpoint != nil { 165 err := c.client.client.ContentStore().Delete(ctx, digest.Digest(checkpoint.Digest)) 166 if err != nil { 167 c.client.logger.WithError(err).WithFields(log.Fields{ 168 "ref": checkpointDir, 169 "digest": checkpoint.Digest, 170 }).Warnf("failed to delete temporary checkpoint entry") 171 } 172 } 173 }() 174 if err := tar.Close(); err != nil { 175 return nil, errors.Wrap(err, "failed to close checkpoint tar stream") 176 } 177 if err != nil { 178 return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd") 179 } 180 } 181 182 // Optimization: assume the relevant metadata has not changed in the 183 // moment since the container was created. Elide redundant RPC requests 184 // to refresh the metadata separately for spec and labels. 185 md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata) 186 if err != nil { 187 return nil, errors.Wrap(err, "failed to retrieve metadata") 188 } 189 bundle := md.Labels[DockerContainerBundlePath] 190 191 var spec specs.Spec 192 if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil { 193 return nil, errors.Wrap(err, "failed to retrieve spec") 194 } 195 uid, gid := getSpecUser(&spec) 196 197 taskOpts := []containerd.NewTaskOpts{ 198 func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { 199 info.Checkpoint = checkpoint 200 return nil 201 }, 202 } 203 204 if runtime.GOOS != "windows" { 205 taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { 206 if c.v2runcoptions != nil { 207 opts := proto.Clone(c.v2runcoptions).(*v2runcoptions.Options) 208 opts.IoUid = uint32(uid) 209 opts.IoGid = uint32(gid) 210 info.Options = opts 211 } 212 return nil 213 }) 214 } else { 215 taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level)) 216 } 217 218 t, err = c.c8dCtr.NewTask(ctx, 219 func(id string) (cio.IO, error) { 220 fifos := newFIFOSet(bundle, id, withStdin, spec.Process.Terminal) 221 222 rio, err = c.createIO(fifos, stdinCloseSync, attachStdio) 223 return rio, err 224 }, 225 taskOpts..., 226 ) 227 if err != nil { 228 close(stdinCloseSync) 229 if rio != nil { 230 rio.Cancel() 231 rio.Close() 232 } 233 return nil, errors.Wrap(wrapError(err), "failed to create task for container") 234 } 235 236 // Signal c.createIO that it can call CloseIO 237 stdinCloseSync <- t 238 239 return c.newTask(t), nil 240 } 241 242 func (t *task) Start(ctx context.Context) error { 243 return wrapError(t.Task.Start(ctx)) 244 245 } 246 247 // Exec creates exec process. 248 // 249 // The containerd client calls Exec to register the exec config in the shim side. 250 // When the client calls Start, the shim will create stdin fifo if needs. But 251 // for the container main process, the stdin fifo will be created in Create not 252 // the Start call. stdinCloseSync channel should be closed after Start exec 253 // process. 254 func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) { 255 var ( 256 p containerd.Process 257 rio cio.IO 258 stdinCloseSync = make(chan containerd.Process, 1) 259 ) 260 261 // Optimization: assume the DockerContainerBundlePath label has not been 262 // updated since the container metadata was last loaded/refreshed. 263 md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata) 264 if err != nil { 265 return nil, wrapError(err) 266 } 267 268 fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal) 269 270 defer func() { 271 if err != nil { 272 if rio != nil { 273 rio.Cancel() 274 rio.Close() 275 } 276 } 277 }() 278 279 p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) { 280 rio, err = t.ctr.createIO(fifos, stdinCloseSync, attachStdio) 281 return rio, err 282 }) 283 if err != nil { 284 close(stdinCloseSync) 285 if cerrdefs.IsAlreadyExists(err) { 286 return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use"))) 287 } 288 return nil, wrapError(err) 289 } 290 291 // Signal c.createIO that it can call CloseIO 292 // 293 // the stdin of exec process will be created after p.Start in containerd 294 defer func() { stdinCloseSync <- p }() 295 296 if err = p.Start(ctx); err != nil { 297 // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure 298 // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in 299 // older containerd-shim 300 ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) 301 defer cancel() 302 p.Delete(ctx) 303 return nil, wrapError(err) 304 } 305 return process{p}, nil 306 } 307 308 func (t *task) Kill(ctx context.Context, signal syscall.Signal) error { 309 return wrapError(t.Task.Kill(ctx, signal)) 310 } 311 312 func (p process) Kill(ctx context.Context, signal syscall.Signal) error { 313 return wrapError(p.Process.Kill(ctx, signal)) 314 } 315 316 func (t *task) Pause(ctx context.Context) error { 317 return wrapError(t.Task.Pause(ctx)) 318 } 319 320 func (t *task) Resume(ctx context.Context) error { 321 return wrapError(t.Task.Resume(ctx)) 322 } 323 324 func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) { 325 m, err := t.Metrics(ctx) 326 if err != nil { 327 return nil, err 328 } 329 330 v, err := typeurl.UnmarshalAny(m.Data) 331 if err != nil { 332 return nil, err 333 } 334 return libcontainerdtypes.InterfaceToStats(protobuf.FromTimestamp(m.Timestamp), v), nil 335 } 336 337 func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) { 338 pis, err := t.Pids(ctx) 339 if err != nil { 340 return nil, err 341 } 342 343 var infos []libcontainerdtypes.Summary 344 for _, pi := range pis { 345 i, err := typeurl.UnmarshalAny(pi.Info) 346 if err != nil { 347 return nil, errors.Wrap(err, "unable to decode process details") 348 } 349 s, err := summaryFromInterface(i) 350 if err != nil { 351 return nil, err 352 } 353 infos = append(infos, *s) 354 } 355 356 return infos, nil 357 } 358 359 func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) { 360 s, err := t.Task.Delete(ctx) 361 return s, wrapError(err) 362 } 363 364 func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) { 365 s, err := p.Process.Delete(ctx) 366 return s, wrapError(err) 367 } 368 369 func (c *container) Delete(ctx context.Context) error { 370 // Optimization: assume the DockerContainerBundlePath label has not been 371 // updated since the container metadata was last loaded/refreshed. 372 md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata) 373 if err != nil { 374 return err 375 } 376 bundle := md.Labels[DockerContainerBundlePath] 377 if err := c.c8dCtr.Delete(ctx); err != nil { 378 return wrapError(err) 379 } 380 if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" { 381 if err := os.RemoveAll(bundle); err != nil { 382 c.client.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{ 383 "container": c.c8dCtr.ID(), 384 "bundle": bundle, 385 }).Error("failed to remove state dir") 386 } 387 } 388 return nil 389 } 390 391 func (t *task) ForceDelete(ctx context.Context) error { 392 _, err := t.Task.Delete(ctx, containerd.WithProcessKill) 393 return wrapError(err) 394 } 395 396 func (t *task) Status(ctx context.Context) (containerd.Status, error) { 397 s, err := t.Task.Status(ctx) 398 return s, wrapError(err) 399 } 400 401 func (p process) Status(ctx context.Context) (containerd.Status, error) { 402 s, err := p.Process.Status(ctx) 403 return s, wrapError(err) 404 } 405 406 func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts { 407 return func(r *containerd.CheckpointTaskInfo) error { 408 if r.Options == nil && c.v2runcoptions != nil { 409 r.Options = &v2runcoptions.CheckpointOptions{} 410 } 411 412 switch opts := r.Options.(type) { 413 case *v2runcoptions.CheckpointOptions: 414 opts.Exit = exit 415 } 416 417 return nil 418 } 419 } 420 421 func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error { 422 img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit)) 423 if err != nil { 424 return wrapError(err) 425 } 426 // Whatever happens, delete the checkpoint from containerd 427 defer func() { 428 err := t.ctr.client.client.ImageService().Delete(ctx, img.Name()) 429 if err != nil { 430 t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest). 431 Warnf("failed to delete checkpoint image") 432 } 433 }() 434 435 b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target()) 436 if err != nil { 437 return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data")) 438 } 439 var index ocispec.Index 440 if err := json.Unmarshal(b, &index); err != nil { 441 return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data")) 442 } 443 444 var cpDesc *ocispec.Descriptor 445 for _, m := range index.Manifests { 446 m := m 447 if m.MediaType == images.MediaTypeContainerd1Checkpoint { 448 cpDesc = &m //nolint:gosec 449 break 450 } 451 } 452 if cpDesc == nil { 453 return errdefs.System(errors.Wrapf(err, "invalid checkpoint")) 454 } 455 456 rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc) 457 if err != nil { 458 return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader")) 459 } 460 defer rat.Close() 461 _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat)) 462 if err != nil { 463 return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader")) 464 } 465 466 return err 467 } 468 469 // LoadContainer loads the containerd container. 470 func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) { 471 ctr, err := c.client.LoadContainer(ctx, id) 472 if err != nil { 473 if cerrdefs.IsNotFound(err) { 474 return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container"))) 475 } 476 return nil, wrapError(err) 477 } 478 return &container{client: c, c8dCtr: ctr}, nil 479 } 480 481 func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) { 482 t, err := c.c8dCtr.Task(ctx, nil) 483 if err != nil { 484 return nil, wrapError(err) 485 } 486 return c.newTask(t), nil 487 } 488 489 // createIO creates the io to be used by a process 490 // This needs to get a pointer to interface as upon closure the process may not have yet been registered 491 func (c *container) createIO(fifos *cio.FIFOSet, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) { 492 var ( 493 io *cio.DirectIO 494 err error 495 ) 496 io, err = c.client.newDirectIO(context.Background(), fifos) 497 if err != nil { 498 return nil, err 499 } 500 501 if io.Stdin != nil { 502 var ( 503 closeErr error 504 stdinOnce sync.Once 505 ) 506 pipe := io.Stdin 507 io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error { 508 stdinOnce.Do(func() { 509 closeErr = pipe.Close() 510 511 select { 512 case p, ok := <-stdinCloseSync: 513 if !ok { 514 return 515 } 516 if err := closeStdin(context.Background(), p); err != nil { 517 if closeErr != nil { 518 closeErr = multierror.Append(closeErr, err) 519 } else { 520 // Avoid wrapping a single error in a multierror. 521 closeErr = err 522 } 523 } 524 default: 525 // The process wasn't ready. Close its stdin asynchronously. 526 go func() { 527 p, ok := <-stdinCloseSync 528 if !ok { 529 return 530 } 531 if err := closeStdin(context.Background(), p); err != nil { 532 c.client.logger.WithError(err). 533 WithField("container", c.c8dCtr.ID()). 534 Error("failed to close container stdin") 535 } 536 }() 537 } 538 }) 539 return closeErr 540 }) 541 } 542 543 rio, err := attachStdio(io) 544 if err != nil { 545 io.Cancel() 546 io.Close() 547 } 548 return rio, err 549 } 550 551 func closeStdin(ctx context.Context, p containerd.Process) error { 552 err := p.CloseIO(ctx, containerd.WithStdinCloser) 553 if err != nil && strings.Contains(err.Error(), "transport is closing") { 554 err = nil 555 } 556 return err 557 } 558 559 func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) { 560 c.eventQ.Append(ei.ContainerID, func() { 561 err := c.backend.ProcessEvent(ei.ContainerID, et, ei) 562 if err != nil { 563 c.logger.WithContext(ctx).WithError(err).WithFields(log.Fields{ 564 "container": ei.ContainerID, 565 "event": et, 566 "event-info": ei, 567 }).Error("failed to process event") 568 } 569 }) 570 } 571 572 func (c *client) waitServe(ctx context.Context) bool { 573 t := 100 * time.Millisecond 574 delay := time.NewTimer(t) 575 if !delay.Stop() { 576 <-delay.C 577 } 578 defer delay.Stop() 579 580 // `IsServing` will actually block until the service is ready. 581 // However it can return early, so we'll loop with a delay to handle it. 582 for { 583 serving, err := c.client.IsServing(ctx) 584 if err != nil { 585 if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { 586 return false 587 } 588 log.G(ctx).WithError(err).Warn("Error while testing if containerd API is ready") 589 } 590 591 if serving { 592 return true 593 } 594 595 delay.Reset(t) 596 select { 597 case <-ctx.Done(): 598 return false 599 case <-delay.C: 600 } 601 } 602 } 603 604 func (c *client) processEventStream(ctx context.Context, ns string) { 605 // Create a new context specifically for this subscription. 606 // The context must be cancelled to cancel the subscription. 607 // In cases where we have to restart event stream processing, 608 // we'll need the original context b/c this one will be cancelled 609 subCtx, cancel := context.WithCancel(ctx) 610 defer cancel() 611 612 // Filter on both namespace *and* topic. To create an "and" filter, 613 // this must be a single, comma-separated string 614 eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|") 615 616 c.logger.Debug("processing event stream") 617 618 for { 619 select { 620 case err := <-errC: 621 if err != nil { 622 errStatus, ok := status.FromError(err) 623 if !ok || errStatus.Code() != codes.Canceled { 624 c.logger.WithError(err).Error("Failed to get event") 625 c.logger.Info("Waiting for containerd to be ready to restart event processing") 626 if c.waitServe(ctx) { 627 go c.processEventStream(ctx, ns) 628 return 629 } 630 } 631 c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown") 632 } 633 return 634 case ev := <-eventStream: 635 if ev.Event == nil { 636 c.logger.WithField("event", ev).Warn("invalid event") 637 continue 638 } 639 640 v, err := typeurl.UnmarshalAny(ev.Event) 641 if err != nil { 642 c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event") 643 continue 644 } 645 646 c.logger.WithField("topic", ev.Topic).Debug("event") 647 648 switch t := v.(type) { 649 case *apievents.TaskCreate: 650 c.processEvent(ctx, libcontainerdtypes.EventCreate, libcontainerdtypes.EventInfo{ 651 ContainerID: t.ContainerID, 652 ProcessID: t.ContainerID, 653 Pid: t.Pid, 654 }) 655 case *apievents.TaskStart: 656 c.processEvent(ctx, libcontainerdtypes.EventStart, libcontainerdtypes.EventInfo{ 657 ContainerID: t.ContainerID, 658 ProcessID: t.ContainerID, 659 Pid: t.Pid, 660 }) 661 case *apievents.TaskExit: 662 c.processEvent(ctx, libcontainerdtypes.EventExit, libcontainerdtypes.EventInfo{ 663 ContainerID: t.ContainerID, 664 ProcessID: t.ID, 665 Pid: t.Pid, 666 ExitCode: t.ExitStatus, 667 ExitedAt: protobuf.FromTimestamp(t.ExitedAt), 668 }) 669 case *apievents.TaskOOM: 670 c.processEvent(ctx, libcontainerdtypes.EventOOM, libcontainerdtypes.EventInfo{ 671 ContainerID: t.ContainerID, 672 }) 673 case *apievents.TaskExecAdded: 674 c.processEvent(ctx, libcontainerdtypes.EventExecAdded, libcontainerdtypes.EventInfo{ 675 ContainerID: t.ContainerID, 676 ProcessID: t.ExecID, 677 }) 678 case *apievents.TaskExecStarted: 679 c.processEvent(ctx, libcontainerdtypes.EventExecStarted, libcontainerdtypes.EventInfo{ 680 ContainerID: t.ContainerID, 681 ProcessID: t.ExecID, 682 Pid: t.Pid, 683 }) 684 case *apievents.TaskPaused: 685 c.processEvent(ctx, libcontainerdtypes.EventPaused, libcontainerdtypes.EventInfo{ 686 ContainerID: t.ContainerID, 687 }) 688 case *apievents.TaskResumed: 689 c.processEvent(ctx, libcontainerdtypes.EventResumed, libcontainerdtypes.EventInfo{ 690 ContainerID: t.ContainerID, 691 }) 692 case *apievents.TaskDelete: 693 c.logger.WithFields(log.Fields{ 694 "topic": ev.Topic, 695 "type": reflect.TypeOf(t), 696 "container": t.ContainerID, 697 }).Info("ignoring event") 698 default: 699 c.logger.WithFields(log.Fields{ 700 "topic": ev.Topic, 701 "type": reflect.TypeOf(t), 702 }).Info("ignoring event") 703 } 704 } 705 } 706 } 707 708 func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { 709 writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref)) 710 if err != nil { 711 return nil, err 712 } 713 defer writer.Close() 714 size, err := io.Copy(writer, r) 715 if err != nil { 716 return nil, err 717 } 718 labels := map[string]string{ 719 "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), 720 } 721 if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil { 722 return nil, err 723 } 724 return &types.Descriptor{ 725 MediaType: mediaType, 726 Digest: writer.Digest().String(), 727 Size: size, 728 }, nil 729 } 730 731 func (c *client) bundleDir(id string) string { 732 return filepath.Join(c.stateDir, id) 733 } 734 735 func wrapError(err error) error { 736 switch { 737 case err == nil: 738 return nil 739 case cerrdefs.IsNotFound(err): 740 return errdefs.NotFound(err) 741 } 742 743 msg := err.Error() 744 for _, s := range []string{"container does not exist", "not found", "no such container"} { 745 if strings.Contains(msg, s) { 746 return errdefs.NotFound(err) 747 } 748 } 749 return err 750 }