github.com/Heebron/moby@v0.0.0-20221111184709-6eab4f55faf7/libcontainerd/remote/client.go (about) 1 package remote // import "github.com/docker/docker/libcontainerd/remote" 2 3 import ( 4 "context" 5 "encoding/json" 6 "io" 7 "os" 8 "path/filepath" 9 "reflect" 10 "runtime" 11 "strings" 12 "sync" 13 "syscall" 14 "time" 15 16 "github.com/containerd/containerd" 17 apievents "github.com/containerd/containerd/api/events" 18 "github.com/containerd/containerd/api/types" 19 "github.com/containerd/containerd/archive" 20 "github.com/containerd/containerd/cio" 21 "github.com/containerd/containerd/content" 22 containerderrors "github.com/containerd/containerd/errdefs" 23 "github.com/containerd/containerd/events" 24 "github.com/containerd/containerd/images" 25 v2runcoptions "github.com/containerd/containerd/runtime/v2/runc/options" 26 "github.com/containerd/typeurl" 27 "github.com/docker/docker/errdefs" 28 "github.com/docker/docker/libcontainerd/queue" 29 libcontainerdtypes "github.com/docker/docker/libcontainerd/types" 30 "github.com/docker/docker/pkg/ioutils" 31 v1 "github.com/opencontainers/image-spec/specs-go/v1" 32 specs "github.com/opencontainers/runtime-spec/specs-go" 33 "github.com/pkg/errors" 34 "github.com/sirupsen/logrus" 35 "google.golang.org/grpc/codes" 36 "google.golang.org/grpc/status" 37 ) 38 39 // DockerContainerBundlePath is the label key pointing to the container's bundle path 40 const DockerContainerBundlePath = "com.docker/engine.bundle.path" 41 42 type client struct { 43 client *containerd.Client 44 stateDir string 45 logger *logrus.Entry 46 ns string 47 48 backend libcontainerdtypes.Backend 49 eventQ queue.Queue 50 } 51 52 type container struct { 53 client *client 54 c8dCtr containerd.Container 55 56 v2runcoptions *v2runcoptions.Options 57 } 58 59 type task struct { 60 containerd.Task 61 ctr *container 62 } 63 64 type process struct { 65 containerd.Process 66 } 67 68 // NewClient creates a new libcontainerd client from a containerd client 69 func NewClient(ctx context.Context, cli *containerd.Client, stateDir, ns string, b libcontainerdtypes.Backend) (libcontainerdtypes.Client, error) { 70 c := &client{ 71 client: cli, 72 stateDir: stateDir, 73 logger: logrus.WithField("module", "libcontainerd").WithField("namespace", ns), 74 ns: ns, 75 backend: b, 76 } 77 78 go c.processEventStream(ctx, ns) 79 80 return c, nil 81 } 82 83 func (c *client) Version(ctx context.Context) (containerd.Version, error) { 84 return c.client.Version(ctx) 85 } 86 87 func (c *container) newTask(t containerd.Task) *task { 88 return &task{Task: t, ctr: c} 89 } 90 91 func (c *container) AttachTask(ctx context.Context, attachStdio libcontainerdtypes.StdioCallback) (_ libcontainerdtypes.Task, err error) { 92 var dio *cio.DirectIO 93 defer func() { 94 if err != nil && dio != nil { 95 dio.Cancel() 96 dio.Close() 97 } 98 }() 99 100 attachIO := func(fifos *cio.FIFOSet) (cio.IO, error) { 101 // dio must be assigned to the previously defined dio for the defer above 102 // to handle cleanup 103 dio, err = c.client.newDirectIO(ctx, fifos) 104 if err != nil { 105 return nil, err 106 } 107 return attachStdio(dio) 108 } 109 t, err := c.c8dCtr.Task(ctx, attachIO) 110 if err != nil { 111 return nil, errors.Wrap(wrapError(err), "error getting containerd task for container") 112 } 113 return c.newTask(t), nil 114 } 115 116 func (c *client) NewContainer(ctx context.Context, id string, ociSpec *specs.Spec, shim string, runtimeOptions interface{}, opts ...containerd.NewContainerOpts) (libcontainerdtypes.Container, error) { 117 bdir := c.bundleDir(id) 118 c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created") 119 120 newOpts := []containerd.NewContainerOpts{ 121 containerd.WithSpec(ociSpec), 122 containerd.WithRuntime(shim, runtimeOptions), 123 WithBundle(bdir, ociSpec), 124 } 125 opts = append(opts, newOpts...) 126 127 ctr, err := c.client.NewContainer(ctx, id, opts...) 128 if err != nil { 129 if containerderrors.IsAlreadyExists(err) { 130 return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use"))) 131 } 132 return nil, wrapError(err) 133 } 134 135 created := container{ 136 client: c, 137 c8dCtr: ctr, 138 } 139 if x, ok := runtimeOptions.(*v2runcoptions.Options); ok { 140 created.v2runcoptions = x 141 } 142 return &created, nil 143 } 144 145 // Start create and start a task for the specified containerd id 146 func (c *container) Start(ctx context.Context, checkpointDir string, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Task, error) { 147 var ( 148 cp *types.Descriptor 149 t containerd.Task 150 rio cio.IO 151 stdinCloseSync = make(chan containerd.Process, 1) 152 ) 153 154 if checkpointDir != "" { 155 // write checkpoint to the content store 156 tar := archive.Diff(ctx, "", checkpointDir) 157 cp, err := c.client.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar) 158 // remove the checkpoint when we're done 159 defer func() { 160 if cp != nil { 161 err := c.client.client.ContentStore().Delete(ctx, cp.Digest) 162 if err != nil { 163 c.client.logger.WithError(err).WithFields(logrus.Fields{ 164 "ref": checkpointDir, 165 "digest": cp.Digest, 166 }).Warnf("failed to delete temporary checkpoint entry") 167 } 168 } 169 }() 170 if err := tar.Close(); err != nil { 171 return nil, errors.Wrap(err, "failed to close checkpoint tar stream") 172 } 173 if err != nil { 174 return nil, errors.Wrapf(err, "failed to upload checkpoint to containerd") 175 } 176 } 177 178 // Optimization: assume the relevant metadata has not changed in the 179 // moment since the container was created. Elide redundant RPC requests 180 // to refresh the metadata separately for spec and labels. 181 md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata) 182 if err != nil { 183 return nil, errors.Wrap(err, "failed to retrieve metadata") 184 } 185 bundle := md.Labels[DockerContainerBundlePath] 186 187 var spec specs.Spec 188 if err := json.Unmarshal(md.Spec.GetValue(), &spec); err != nil { 189 return nil, errors.Wrap(err, "failed to retrieve spec") 190 } 191 uid, gid := getSpecUser(&spec) 192 193 taskOpts := []containerd.NewTaskOpts{ 194 func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { 195 info.Checkpoint = cp 196 return nil 197 }, 198 } 199 200 if runtime.GOOS != "windows" { 201 taskOpts = append(taskOpts, func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error { 202 if c.v2runcoptions != nil { 203 opts := *c.v2runcoptions 204 opts.IoUid = uint32(uid) 205 opts.IoGid = uint32(gid) 206 info.Options = &opts 207 } 208 return nil 209 }) 210 } else { 211 taskOpts = append(taskOpts, withLogLevel(c.client.logger.Level)) 212 } 213 214 t, err = c.c8dCtr.NewTask(ctx, 215 func(id string) (cio.IO, error) { 216 fifos := newFIFOSet(bundle, libcontainerdtypes.InitProcessName, withStdin, spec.Process.Terminal) 217 218 rio, err = c.createIO(fifos, libcontainerdtypes.InitProcessName, stdinCloseSync, attachStdio) 219 return rio, err 220 }, 221 taskOpts..., 222 ) 223 if err != nil { 224 close(stdinCloseSync) 225 if rio != nil { 226 rio.Cancel() 227 rio.Close() 228 } 229 return nil, errors.Wrap(wrapError(err), "failed to create task for container") 230 } 231 232 // Signal c.createIO that it can call CloseIO 233 stdinCloseSync <- t 234 235 if err := t.Start(ctx); err != nil { 236 // Only Stopped tasks can be deleted. Created tasks have to be 237 // killed first, to transition them to Stopped. 238 if _, err := t.Delete(ctx, containerd.WithProcessKill); err != nil { 239 c.client.logger.WithError(err).WithField("container", c.c8dCtr.ID()). 240 Error("failed to delete task after fail start") 241 } 242 return nil, wrapError(err) 243 } 244 245 return c.newTask(t), nil 246 } 247 248 // Exec creates exec process. 249 // 250 // The containerd client calls Exec to register the exec config in the shim side. 251 // When the client calls Start, the shim will create stdin fifo if needs. But 252 // for the container main process, the stdin fifo will be created in Create not 253 // the Start call. stdinCloseSync channel should be closed after Start exec 254 // process. 255 func (t *task) Exec(ctx context.Context, processID string, spec *specs.Process, withStdin bool, attachStdio libcontainerdtypes.StdioCallback) (libcontainerdtypes.Process, error) { 256 var ( 257 p containerd.Process 258 rio cio.IO 259 stdinCloseSync = make(chan containerd.Process, 1) 260 ) 261 262 // Optimization: assume the DockerContainerBundlePath label has not been 263 // updated since the container metadata was last loaded/refreshed. 264 md, err := t.ctr.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata) 265 if err != nil { 266 return nil, wrapError(err) 267 } 268 269 fifos := newFIFOSet(md.Labels[DockerContainerBundlePath], processID, withStdin, spec.Terminal) 270 271 defer func() { 272 if err != nil { 273 if rio != nil { 274 rio.Cancel() 275 rio.Close() 276 } 277 } 278 }() 279 280 p, err = t.Task.Exec(ctx, processID, spec, func(id string) (cio.IO, error) { 281 rio, err = t.ctr.createIO(fifos, processID, stdinCloseSync, attachStdio) 282 return rio, err 283 }) 284 if err != nil { 285 close(stdinCloseSync) 286 if containerderrors.IsAlreadyExists(err) { 287 return nil, errors.WithStack(errdefs.Conflict(errors.New("id already in use"))) 288 } 289 return nil, wrapError(err) 290 } 291 292 // Signal c.createIO that it can call CloseIO 293 // 294 // the stdin of exec process will be created after p.Start in containerd 295 defer func() { stdinCloseSync <- p }() 296 297 if err = p.Start(ctx); err != nil { 298 // use new context for cleanup because old one may be cancelled by user, but leave a timeout to make sure 299 // we are not waiting forever if containerd is unresponsive or to work around fifo cancelling issues in 300 // older containerd-shim 301 ctx, cancel := context.WithTimeout(context.Background(), 45*time.Second) 302 defer cancel() 303 p.Delete(ctx) 304 return nil, wrapError(err) 305 } 306 return process{p}, nil 307 } 308 309 func (t *task) Kill(ctx context.Context, signal syscall.Signal) error { 310 return wrapError(t.Task.Kill(ctx, signal)) 311 } 312 313 func (p process) Kill(ctx context.Context, signal syscall.Signal) error { 314 return wrapError(p.Process.Kill(ctx, signal)) 315 } 316 317 func (t *task) Pause(ctx context.Context) error { 318 return wrapError(t.Task.Pause(ctx)) 319 } 320 321 func (t *task) Resume(ctx context.Context) error { 322 return wrapError(t.Task.Resume(ctx)) 323 } 324 325 func (t *task) Stats(ctx context.Context) (*libcontainerdtypes.Stats, error) { 326 m, err := t.Metrics(ctx) 327 if err != nil { 328 return nil, err 329 } 330 331 v, err := typeurl.UnmarshalAny(m.Data) 332 if err != nil { 333 return nil, err 334 } 335 return libcontainerdtypes.InterfaceToStats(m.Timestamp, v), nil 336 } 337 338 func (t *task) Summary(ctx context.Context) ([]libcontainerdtypes.Summary, error) { 339 pis, err := t.Pids(ctx) 340 if err != nil { 341 return nil, err 342 } 343 344 var infos []libcontainerdtypes.Summary 345 for _, pi := range pis { 346 i, err := typeurl.UnmarshalAny(pi.Info) 347 if err != nil { 348 return nil, errors.Wrap(err, "unable to decode process details") 349 } 350 s, err := summaryFromInterface(i) 351 if err != nil { 352 return nil, err 353 } 354 infos = append(infos, *s) 355 } 356 357 return infos, nil 358 } 359 360 func (t *task) Delete(ctx context.Context) (*containerd.ExitStatus, error) { 361 s, err := t.Task.Delete(ctx) 362 return s, wrapError(err) 363 } 364 365 func (p process) Delete(ctx context.Context) (*containerd.ExitStatus, error) { 366 s, err := p.Process.Delete(ctx) 367 return s, wrapError(err) 368 } 369 370 func (c *container) Delete(ctx context.Context) error { 371 // Optimization: assume the DockerContainerBundlePath label has not been 372 // updated since the container metadata was last loaded/refreshed. 373 md, err := c.c8dCtr.Info(ctx, containerd.WithoutRefreshedMetadata) 374 if err != nil { 375 return err 376 } 377 bundle := md.Labels[DockerContainerBundlePath] 378 if err := c.c8dCtr.Delete(ctx); err != nil { 379 return wrapError(err) 380 } 381 if os.Getenv("LIBCONTAINERD_NOCLEAN") != "1" { 382 if err := os.RemoveAll(bundle); err != nil { 383 c.client.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{ 384 "container": c.c8dCtr.ID(), 385 "bundle": bundle, 386 }).Error("failed to remove state dir") 387 } 388 } 389 return nil 390 } 391 392 func (t *task) ForceDelete(ctx context.Context) error { 393 _, err := t.Task.Delete(ctx, containerd.WithProcessKill) 394 return wrapError(err) 395 } 396 397 func (t *task) Status(ctx context.Context) (containerd.Status, error) { 398 s, err := t.Task.Status(ctx) 399 return s, wrapError(err) 400 } 401 402 func (p process) Status(ctx context.Context) (containerd.Status, error) { 403 s, err := p.Process.Status(ctx) 404 return s, wrapError(err) 405 } 406 407 func (c *container) getCheckpointOptions(exit bool) containerd.CheckpointTaskOpts { 408 return func(r *containerd.CheckpointTaskInfo) error { 409 if r.Options == nil && c.v2runcoptions != nil { 410 r.Options = &v2runcoptions.CheckpointOptions{} 411 } 412 413 switch opts := r.Options.(type) { 414 case *v2runcoptions.CheckpointOptions: 415 opts.Exit = exit 416 } 417 418 return nil 419 } 420 } 421 422 func (t *task) CreateCheckpoint(ctx context.Context, checkpointDir string, exit bool) error { 423 img, err := t.Task.Checkpoint(ctx, t.ctr.getCheckpointOptions(exit)) 424 if err != nil { 425 return wrapError(err) 426 } 427 // Whatever happens, delete the checkpoint from containerd 428 defer func() { 429 err := t.ctr.client.client.ImageService().Delete(ctx, img.Name()) 430 if err != nil { 431 t.ctr.client.logger.WithError(err).WithField("digest", img.Target().Digest). 432 Warnf("failed to delete checkpoint image") 433 } 434 }() 435 436 b, err := content.ReadBlob(ctx, t.ctr.client.client.ContentStore(), img.Target()) 437 if err != nil { 438 return errdefs.System(errors.Wrapf(err, "failed to retrieve checkpoint data")) 439 } 440 var index v1.Index 441 if err := json.Unmarshal(b, &index); err != nil { 442 return errdefs.System(errors.Wrapf(err, "failed to decode checkpoint data")) 443 } 444 445 var cpDesc *v1.Descriptor 446 for _, m := range index.Manifests { 447 m := m 448 if m.MediaType == images.MediaTypeContainerd1Checkpoint { 449 cpDesc = &m //nolint:gosec 450 break 451 } 452 } 453 if cpDesc == nil { 454 return errdefs.System(errors.Wrapf(err, "invalid checkpoint")) 455 } 456 457 rat, err := t.ctr.client.client.ContentStore().ReaderAt(ctx, *cpDesc) 458 if err != nil { 459 return errdefs.System(errors.Wrapf(err, "failed to get checkpoint reader")) 460 } 461 defer rat.Close() 462 _, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat)) 463 if err != nil { 464 return errdefs.System(errors.Wrapf(err, "failed to read checkpoint reader")) 465 } 466 467 return err 468 } 469 470 // LoadContainer loads the containerd container. 471 func (c *client) LoadContainer(ctx context.Context, id string) (libcontainerdtypes.Container, error) { 472 ctr, err := c.client.LoadContainer(ctx, id) 473 if err != nil { 474 if containerderrors.IsNotFound(err) { 475 return nil, errors.WithStack(errdefs.NotFound(errors.New("no such container"))) 476 } 477 return nil, wrapError(err) 478 } 479 return &container{client: c, c8dCtr: ctr}, nil 480 } 481 482 func (c *container) Task(ctx context.Context) (libcontainerdtypes.Task, error) { 483 t, err := c.c8dCtr.Task(ctx, nil) 484 if err != nil { 485 return nil, wrapError(err) 486 } 487 return c.newTask(t), nil 488 } 489 490 // createIO creates the io to be used by a process 491 // This needs to get a pointer to interface as upon closure the process may not have yet been registered 492 func (c *container) createIO(fifos *cio.FIFOSet, processID string, stdinCloseSync chan containerd.Process, attachStdio libcontainerdtypes.StdioCallback) (cio.IO, error) { 493 var ( 494 io *cio.DirectIO 495 err error 496 ) 497 io, err = c.client.newDirectIO(context.Background(), fifos) 498 if err != nil { 499 return nil, err 500 } 501 502 if io.Stdin != nil { 503 var ( 504 err error 505 stdinOnce sync.Once 506 ) 507 pipe := io.Stdin 508 io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error { 509 stdinOnce.Do(func() { 510 err = pipe.Close() 511 // Do the rest in a new routine to avoid a deadlock if the 512 // Exec/Start call failed. 513 go func() { 514 p, ok := <-stdinCloseSync 515 if !ok { 516 return 517 } 518 err = p.CloseIO(context.Background(), containerd.WithStdinCloser) 519 if err != nil && strings.Contains(err.Error(), "transport is closing") { 520 err = nil 521 } 522 }() 523 }) 524 return err 525 }) 526 } 527 528 rio, err := attachStdio(io) 529 if err != nil { 530 io.Cancel() 531 io.Close() 532 } 533 return rio, err 534 } 535 536 func (c *client) processEvent(ctx context.Context, et libcontainerdtypes.EventType, ei libcontainerdtypes.EventInfo) { 537 c.eventQ.Append(ei.ContainerID, func() { 538 err := c.backend.ProcessEvent(ei.ContainerID, et, ei) 539 if err != nil { 540 c.logger.WithContext(ctx).WithError(err).WithFields(logrus.Fields{ 541 "container": ei.ContainerID, 542 "event": et, 543 "event-info": ei, 544 }).Error("failed to process event") 545 } 546 }) 547 } 548 549 func (c *client) waitServe(ctx context.Context) bool { 550 t := 100 * time.Millisecond 551 delay := time.NewTimer(t) 552 if !delay.Stop() { 553 <-delay.C 554 } 555 defer delay.Stop() 556 557 // `IsServing` will actually block until the service is ready. 558 // However it can return early, so we'll loop with a delay to handle it. 559 for { 560 serving, err := c.client.IsServing(ctx) 561 if err != nil { 562 if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { 563 return false 564 } 565 logrus.WithError(err).Warn("Error while testing if containerd API is ready") 566 } 567 568 if serving { 569 return true 570 } 571 572 delay.Reset(t) 573 select { 574 case <-ctx.Done(): 575 return false 576 case <-delay.C: 577 } 578 } 579 } 580 581 func (c *client) processEventStream(ctx context.Context, ns string) { 582 var ( 583 err error 584 ev *events.Envelope 585 et libcontainerdtypes.EventType 586 ei libcontainerdtypes.EventInfo 587 ) 588 589 // Create a new context specifically for this subscription. 590 // The context must be cancelled to cancel the subscription. 591 // In cases where we have to restart event stream processing, 592 // we'll need the original context b/c this one will be cancelled 593 subCtx, cancel := context.WithCancel(ctx) 594 defer cancel() 595 596 // Filter on both namespace *and* topic. To create an "and" filter, 597 // this must be a single, comma-separated string 598 eventStream, errC := c.client.EventService().Subscribe(subCtx, "namespace=="+ns+",topic~=|^/tasks/|") 599 600 c.logger.Debug("processing event stream") 601 602 for { 603 select { 604 case err = <-errC: 605 if err != nil { 606 errStatus, ok := status.FromError(err) 607 if !ok || errStatus.Code() != codes.Canceled { 608 c.logger.WithError(err).Error("Failed to get event") 609 c.logger.Info("Waiting for containerd to be ready to restart event processing") 610 if c.waitServe(ctx) { 611 go c.processEventStream(ctx, ns) 612 return 613 } 614 } 615 c.logger.WithError(ctx.Err()).Info("stopping event stream following graceful shutdown") 616 } 617 return 618 case ev = <-eventStream: 619 if ev.Event == nil { 620 c.logger.WithField("event", ev).Warn("invalid event") 621 continue 622 } 623 624 v, err := typeurl.UnmarshalAny(ev.Event) 625 if err != nil { 626 c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event") 627 continue 628 } 629 630 c.logger.WithField("topic", ev.Topic).Debug("event") 631 632 switch t := v.(type) { 633 case *apievents.TaskCreate: 634 et = libcontainerdtypes.EventCreate 635 ei = libcontainerdtypes.EventInfo{ 636 ContainerID: t.ContainerID, 637 ProcessID: t.ContainerID, 638 Pid: t.Pid, 639 } 640 case *apievents.TaskStart: 641 et = libcontainerdtypes.EventStart 642 ei = libcontainerdtypes.EventInfo{ 643 ContainerID: t.ContainerID, 644 ProcessID: t.ContainerID, 645 Pid: t.Pid, 646 } 647 case *apievents.TaskExit: 648 et = libcontainerdtypes.EventExit 649 ei = libcontainerdtypes.EventInfo{ 650 ContainerID: t.ContainerID, 651 ProcessID: t.ID, 652 Pid: t.Pid, 653 ExitCode: t.ExitStatus, 654 ExitedAt: t.ExitedAt, 655 } 656 case *apievents.TaskOOM: 657 et = libcontainerdtypes.EventOOM 658 ei = libcontainerdtypes.EventInfo{ 659 ContainerID: t.ContainerID, 660 } 661 case *apievents.TaskExecAdded: 662 et = libcontainerdtypes.EventExecAdded 663 ei = libcontainerdtypes.EventInfo{ 664 ContainerID: t.ContainerID, 665 ProcessID: t.ExecID, 666 } 667 case *apievents.TaskExecStarted: 668 et = libcontainerdtypes.EventExecStarted 669 ei = libcontainerdtypes.EventInfo{ 670 ContainerID: t.ContainerID, 671 ProcessID: t.ExecID, 672 Pid: t.Pid, 673 } 674 case *apievents.TaskPaused: 675 et = libcontainerdtypes.EventPaused 676 ei = libcontainerdtypes.EventInfo{ 677 ContainerID: t.ContainerID, 678 } 679 case *apievents.TaskResumed: 680 et = libcontainerdtypes.EventResumed 681 ei = libcontainerdtypes.EventInfo{ 682 ContainerID: t.ContainerID, 683 } 684 case *apievents.TaskDelete: 685 c.logger.WithFields(logrus.Fields{ 686 "topic": ev.Topic, 687 "type": reflect.TypeOf(t), 688 "container": t.ContainerID}, 689 ).Info("ignoring event") 690 continue 691 default: 692 c.logger.WithFields(logrus.Fields{ 693 "topic": ev.Topic, 694 "type": reflect.TypeOf(t)}, 695 ).Info("ignoring event") 696 continue 697 } 698 699 c.processEvent(ctx, et, ei) 700 } 701 } 702 } 703 704 func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) { 705 writer, err := c.client.ContentStore().Writer(ctx, content.WithRef(ref)) 706 if err != nil { 707 return nil, err 708 } 709 defer writer.Close() 710 size, err := io.Copy(writer, r) 711 if err != nil { 712 return nil, err 713 } 714 labels := map[string]string{ 715 "containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339), 716 } 717 if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil { 718 return nil, err 719 } 720 return &types.Descriptor{ 721 MediaType: mediaType, 722 Digest: writer.Digest(), 723 Size_: size, 724 }, nil 725 } 726 727 func (c *client) bundleDir(id string) string { 728 return filepath.Join(c.stateDir, id) 729 } 730 731 func wrapError(err error) error { 732 switch { 733 case err == nil: 734 return nil 735 case containerderrors.IsNotFound(err): 736 return errdefs.NotFound(err) 737 } 738 739 msg := err.Error() 740 for _, s := range []string{"container does not exist", "not found", "no such container"} { 741 if strings.Contains(msg, s) { 742 return errdefs.NotFound(err) 743 } 744 } 745 return err 746 }