github.com/containerd/Containerd@v1.4.13/container_linux_test.go (about) 1 // +build linux 2 3 /* 4 Copyright The containerd Authors. 5 6 Licensed under the Apache License, Version 2.0 (the "License"); 7 you may not use this file except in compliance with the License. 8 You may obtain a copy of the License at 9 10 http://www.apache.org/licenses/LICENSE-2.0 11 12 Unless required by applicable law or agreed to in writing, software 13 distributed under the License is distributed on an "AS IS" BASIS, 14 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 See the License for the specific language governing permissions and 16 limitations under the License. 17 */ 18 19 package containerd 20 21 import ( 22 "bytes" 23 "context" 24 "fmt" 25 "io" 26 "io/ioutil" 27 "os" 28 "os/exec" 29 "path/filepath" 30 "runtime" 31 "strings" 32 "sync" 33 "syscall" 34 "testing" 35 "time" 36 37 "github.com/containerd/cgroups" 38 cgroupsv2 "github.com/containerd/cgroups/v2" 39 "github.com/containerd/containerd/cio" 40 "github.com/containerd/containerd/containers" 41 "github.com/containerd/containerd/errdefs" 42 "github.com/containerd/containerd/oci" 43 "github.com/containerd/containerd/plugin" 44 "github.com/containerd/containerd/runtime/linux/runctypes" 45 "github.com/containerd/containerd/runtime/v2/runc/options" 46 "github.com/containerd/containerd/sys" 47 specs "github.com/opencontainers/runtime-spec/specs-go" 48 "golang.org/x/sys/unix" 49 ) 50 51 func TestTaskUpdate(t *testing.T) { 52 t.Parallel() 53 54 client, err := newClient(t, address) 55 if err != nil { 56 t.Fatal(err) 57 } 58 defer client.Close() 59 60 var ( 61 ctx, cancel = testContext(t) 62 id = t.Name() 63 ) 64 defer cancel() 65 66 image, err := client.GetImage(ctx, testImage) 67 if err != nil { 68 t.Fatal(err) 69 } 70 limit := int64(32 * 1024 * 1024) 71 memory := func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { 72 s.Linux.Resources.Memory = &specs.LinuxMemory{ 73 Limit: &limit, 74 } 75 return nil 76 } 77 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), 78 WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"), memory)) 79 if err != nil { 80 t.Fatal(err) 81 } 82 defer container.Delete(ctx, WithSnapshotCleanup) 83 84 task, err := container.NewTask(ctx, empty()) 85 if err != nil { 86 t.Fatal(err) 87 } 88 defer task.Delete(ctx) 89 90 statusC, err := task.Wait(ctx) 91 if err != nil { 92 t.Fatal(err) 93 } 94 95 var ( 96 cgroup cgroups.Cgroup 97 cgroup2 *cgroupsv2.Manager 98 ) 99 // check that the task has a limit of 32mb 100 if cgroups.Mode() == cgroups.Unified { 101 groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid())) 102 if err != nil { 103 t.Fatal(err) 104 } 105 cgroup2, err = cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) 106 if err != nil { 107 t.Fatal(err) 108 } 109 stat, err := cgroup2.Stat() 110 if err != nil { 111 t.Fatal(err) 112 } 113 if int64(stat.Memory.UsageLimit) != limit { 114 t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit) 115 } 116 } else { 117 cgroup, err = cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid()))) 118 if err != nil { 119 t.Fatal(err) 120 } 121 stat, err := cgroup.Stat(cgroups.IgnoreNotExist) 122 if err != nil { 123 t.Fatal(err) 124 } 125 if int64(stat.Memory.Usage.Limit) != limit { 126 t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) 127 } 128 } 129 limit = 64 * 1024 * 1024 130 if err := task.Update(ctx, WithResources(&specs.LinuxResources{ 131 Memory: &specs.LinuxMemory{ 132 Limit: &limit, 133 }, 134 })); err != nil { 135 t.Error(err) 136 } 137 // check that the task has a limit of 64mb 138 if cgroups.Mode() == cgroups.Unified { 139 stat, err := cgroup2.Stat() 140 if err != nil { 141 t.Fatal(err) 142 } 143 if int64(stat.Memory.UsageLimit) != limit { 144 t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit) 145 } 146 } else { 147 stat, err := cgroup.Stat(cgroups.IgnoreNotExist) 148 if err != nil { 149 t.Fatal(err) 150 } 151 if int64(stat.Memory.Usage.Limit) != limit { 152 t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) 153 } 154 } 155 if err := task.Kill(ctx, unix.SIGKILL); err != nil { 156 t.Fatal(err) 157 } 158 159 <-statusC 160 } 161 162 func TestShimInCgroup(t *testing.T) { 163 t.Parallel() 164 165 client, err := newClient(t, address) 166 if err != nil { 167 t.Fatal(err) 168 } 169 defer client.Close() 170 var ( 171 ctx, cancel = testContext(t) 172 id = t.Name() 173 ) 174 defer cancel() 175 176 image, err := client.GetImage(ctx, testImage) 177 if err != nil { 178 t.Fatal(err) 179 } 180 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "30"))) 181 if err != nil { 182 t.Fatal(err) 183 } 184 defer container.Delete(ctx, WithSnapshotCleanup) 185 // create a cgroup for the shim to use 186 path := "/containerd/shim" 187 var ( 188 cg cgroups.Cgroup 189 cg2 *cgroupsv2.Manager 190 ) 191 if cgroups.Mode() == cgroups.Unified { 192 cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{}) 193 if err != nil { 194 t.Fatal(err) 195 } 196 defer cg2.Delete() 197 } else { 198 cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{}) 199 if err != nil { 200 t.Fatal(err) 201 } 202 defer cg.Delete() 203 } 204 205 task, err := container.NewTask(ctx, empty(), WithShimCgroup(path)) 206 if err != nil { 207 t.Fatal(err) 208 } 209 defer task.Delete(ctx) 210 211 statusC, err := task.Wait(ctx) 212 if err != nil { 213 t.Fatal(err) 214 } 215 216 // check to see if the shim is inside the cgroup 217 if cgroups.Mode() == cgroups.Unified { 218 processes, err := cg2.Procs(false) 219 if err != nil { 220 t.Fatal(err) 221 } 222 if len(processes) == 0 { 223 t.Errorf("created cgroup should have at least one process inside: %d", len(processes)) 224 } 225 } else { 226 processes, err := cg.Processes(cgroups.Devices, false) 227 if err != nil { 228 t.Fatal(err) 229 } 230 if len(processes) == 0 { 231 t.Errorf("created cgroup should have at least one process inside: %d", len(processes)) 232 } 233 } 234 if err := task.Kill(ctx, unix.SIGKILL); err != nil { 235 t.Fatal(err) 236 } 237 238 <-statusC 239 } 240 241 func TestDaemonRestart(t *testing.T) { 242 client, err := newClient(t, address) 243 if err != nil { 244 t.Fatal(err) 245 } 246 defer client.Close() 247 248 var ( 249 image Image 250 ctx, cancel = testContext(t) 251 id = t.Name() 252 ) 253 defer cancel() 254 255 image, err = client.GetImage(ctx, testImage) 256 if err != nil { 257 t.Fatal(err) 258 } 259 260 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 261 if err != nil { 262 t.Fatal(err) 263 } 264 defer container.Delete(ctx, WithSnapshotCleanup) 265 266 task, err := container.NewTask(ctx, empty()) 267 if err != nil { 268 t.Fatal(err) 269 } 270 defer task.Delete(ctx) 271 272 statusC, err := task.Wait(ctx) 273 if err != nil { 274 t.Fatal(err) 275 } 276 277 if err := task.Start(ctx); err != nil { 278 t.Fatal(err) 279 } 280 281 var exitStatus ExitStatus 282 if err := ctrd.Restart(func() { 283 exitStatus = <-statusC 284 }); err != nil { 285 t.Fatal(err) 286 } 287 288 if exitStatus.Error() == nil { 289 t.Errorf(`first task.Wait() should have failed with "transport is closing"`) 290 } 291 292 waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) 293 serving, err := client.IsServing(waitCtx) 294 waitCancel() 295 if !serving { 296 t.Fatalf("containerd did not start within 2s: %v", err) 297 } 298 299 statusC, err = task.Wait(ctx) 300 if err != nil { 301 t.Fatal(err) 302 } 303 304 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 305 t.Fatal(err) 306 } 307 308 <-statusC 309 } 310 311 func TestShimDoesNotLeakPipes(t *testing.T) { 312 containerdPid := ctrd.cmd.Process.Pid 313 initialPipes, err := numPipes(containerdPid) 314 if err != nil { 315 t.Fatal(err) 316 } 317 318 client, err := newClient(t, address) 319 if err != nil { 320 t.Fatal(err) 321 } 322 defer client.Close() 323 324 var ( 325 image Image 326 ctx, cancel = testContext(t) 327 id = t.Name() 328 ) 329 defer cancel() 330 331 image, err = client.GetImage(ctx, testImage) 332 if err != nil { 333 t.Fatal(err) 334 } 335 336 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 337 if err != nil { 338 t.Fatal(err) 339 } 340 341 task, err := container.NewTask(ctx, empty()) 342 if err != nil { 343 t.Fatal(err) 344 } 345 346 exitChannel, err := task.Wait(ctx) 347 if err != nil { 348 t.Fatal(err) 349 } 350 351 if err := task.Start(ctx); err != nil { 352 t.Fatal(err) 353 } 354 355 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 356 t.Fatal(err) 357 } 358 359 <-exitChannel 360 361 if _, err := task.Delete(ctx); err != nil { 362 t.Fatal(err) 363 } 364 365 if err := container.Delete(ctx, WithSnapshotCleanup); err != nil { 366 t.Fatal(err) 367 } 368 369 currentPipes, err := numPipes(containerdPid) 370 if err != nil { 371 t.Fatal(err) 372 } 373 374 if initialPipes != currentPipes { 375 t.Errorf("Pipes have leaked after container has been deleted. Initially there were %d pipes, after container deletion there were %d pipes", initialPipes, currentPipes) 376 } 377 } 378 379 func numPipes(pid int) (int, error) { 380 cmd := exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep FIFO", pid)) 381 382 var stdout bytes.Buffer 383 cmd.Stdout = &stdout 384 if err := cmd.Run(); err != nil { 385 return 0, err 386 } 387 return strings.Count(stdout.String(), "\n"), nil 388 } 389 390 func TestDaemonReconnectsToShimIOPipesOnRestart(t *testing.T) { 391 client, err := newClient(t, address) 392 if err != nil { 393 t.Fatal(err) 394 } 395 defer client.Close() 396 397 var ( 398 image Image 399 ctx, cancel = testContext(t) 400 id = t.Name() 401 ) 402 defer cancel() 403 404 image, err = client.GetImage(ctx, testImage) 405 if err != nil { 406 t.Fatal(err) 407 } 408 409 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 410 if err != nil { 411 t.Fatal(err) 412 } 413 defer container.Delete(ctx, WithSnapshotCleanup) 414 415 task, err := container.NewTask(ctx, empty()) 416 if err != nil { 417 t.Fatal(err) 418 } 419 defer task.Delete(ctx) 420 421 _, err = task.Wait(ctx) 422 if err != nil { 423 t.Fatal(err) 424 } 425 426 if err := task.Start(ctx); err != nil { 427 t.Fatal(err) 428 } 429 430 if err := ctrd.Restart(nil); err != nil { 431 t.Fatal(err) 432 } 433 434 waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) 435 serving, err := client.IsServing(waitCtx) 436 waitCancel() 437 if !serving { 438 t.Fatalf("containerd did not start within 2s: %v", err) 439 } 440 441 // After we restared containerd we write some messages to the log pipes, simulating shim writing stuff there. 442 // Then we make sure that these messages are available on the containerd log thus proving that the server reconnected to the log pipes 443 runtimeVersion := getRuntimeVersion() 444 logDirPath := getLogDirPath(runtimeVersion, id) 445 446 switch runtimeVersion { 447 case "v1": 448 writeToFile(t, filepath.Join(logDirPath, "shim.stdout.log"), fmt.Sprintf("%s writing to stdout\n", id)) 449 writeToFile(t, filepath.Join(logDirPath, "shim.stderr.log"), fmt.Sprintf("%s writing to stderr\n", id)) 450 case "v2": 451 writeToFile(t, filepath.Join(logDirPath, "log"), fmt.Sprintf("%s writing to log\n", id)) 452 } 453 454 statusC, err := task.Wait(ctx) 455 if err != nil { 456 t.Fatal(err) 457 } 458 459 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 460 t.Fatal(err) 461 } 462 463 <-statusC 464 465 stdioContents, err := ioutil.ReadFile(ctrdStdioFilePath) 466 if err != nil { 467 t.Fatal(err) 468 } 469 470 switch runtimeVersion { 471 case "v1": 472 if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stdout", id)) { 473 t.Fatal("containerd did not connect to the shim stdout pipe") 474 } 475 if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stderr", id)) { 476 t.Fatal("containerd did not connect to the shim stderr pipe") 477 } 478 case "v2": 479 if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to log", id)) { 480 t.Fatal("containerd did not connect to the shim log pipe") 481 } 482 } 483 } 484 485 func writeToFile(t *testing.T, filePath, message string) { 486 writer, err := os.OpenFile(filePath, os.O_WRONLY, 0600) 487 if err != nil { 488 t.Fatal(err) 489 } 490 if _, err := writer.WriteString(message); err != nil { 491 t.Fatal(err) 492 } 493 if err := writer.Close(); err != nil { 494 t.Fatal(err) 495 } 496 } 497 498 func getLogDirPath(runtimeVersion, id string) string { 499 switch runtimeVersion { 500 case "v1": 501 return filepath.Join(defaultRoot, plugin.RuntimeLinuxV1, testNamespace, id) 502 case "v2": 503 return filepath.Join(defaultState, "io.containerd.runtime.v2.task", testNamespace, id) 504 default: 505 panic(fmt.Errorf("Unsupported runtime version %s", runtimeVersion)) 506 } 507 } 508 509 func getRuntimeVersion() string { 510 switch rt := os.Getenv("TEST_RUNTIME"); rt { 511 case plugin.RuntimeLinuxV1: 512 return "v1" 513 default: 514 return "v2" 515 } 516 } 517 518 func TestContainerPTY(t *testing.T) { 519 t.Parallel() 520 521 client, err := newClient(t, address) 522 if err != nil { 523 t.Fatal(err) 524 } 525 defer client.Close() 526 527 var ( 528 image Image 529 ctx, cancel = testContext(t) 530 id = t.Name() 531 ) 532 defer cancel() 533 534 image, err = client.GetImage(ctx, testImage) 535 if err != nil { 536 t.Fatal(err) 537 } 538 539 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithTTY, withProcessArgs("echo", "hello"))) 540 if err != nil { 541 t.Fatal(err) 542 } 543 defer container.Delete(ctx, WithSnapshotCleanup) 544 545 direct, err := newDirectIO(ctx, true) 546 if err != nil { 547 t.Fatal(err) 548 } 549 defer direct.Delete() 550 var ( 551 wg sync.WaitGroup 552 buf = bytes.NewBuffer(nil) 553 ) 554 wg.Add(1) 555 go func() { 556 defer wg.Done() 557 io.Copy(buf, direct.Stdout) 558 }() 559 560 task, err := container.NewTask(ctx, direct.IOCreate) 561 if err != nil { 562 t.Fatal(err) 563 } 564 defer task.Delete(ctx) 565 566 status, err := task.Wait(ctx) 567 if err != nil { 568 t.Error(err) 569 } 570 571 if err := task.Start(ctx); err != nil { 572 t.Fatal(err) 573 } 574 575 <-status 576 wg.Wait() 577 578 if err := direct.Close(); err != nil { 579 t.Error(err) 580 } 581 582 out := buf.String() 583 if !strings.ContainsAny(fmt.Sprintf("%#q", out), `\x00`) { 584 t.Fatal(`expected \x00 in output`) 585 } 586 } 587 588 func TestContainerAttach(t *testing.T) { 589 t.Parallel() 590 591 if runtime.GOOS == "windows" { 592 // On windows, closing the write side of the pipe closes the read 593 // side, sending an EOF to it and preventing reopening it. 594 // Hence this test will always fails on windows 595 t.Skip("invalid logic on windows") 596 } 597 598 client, err := newClient(t, address) 599 if err != nil { 600 t.Fatal(err) 601 } 602 defer client.Close() 603 604 var ( 605 image Image 606 ctx, cancel = testContext(t) 607 id = t.Name() 608 ) 609 defer cancel() 610 611 image, err = client.GetImage(ctx, testImage) 612 if err != nil { 613 t.Fatal(err) 614 } 615 616 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withCat())) 617 if err != nil { 618 t.Fatal(err) 619 } 620 defer container.Delete(ctx, WithSnapshotCleanup) 621 622 expected := "hello" + newLine 623 624 direct, err := newDirectIO(ctx, false) 625 if err != nil { 626 t.Fatal(err) 627 } 628 defer direct.Delete() 629 var ( 630 wg sync.WaitGroup 631 buf = bytes.NewBuffer(nil) 632 ) 633 wg.Add(1) 634 go func() { 635 defer wg.Done() 636 io.Copy(buf, direct.Stdout) 637 }() 638 639 task, err := container.NewTask(ctx, direct.IOCreate) 640 if err != nil { 641 t.Fatal(err) 642 } 643 defer task.Delete(ctx) 644 645 status, err := task.Wait(ctx) 646 if err != nil { 647 t.Error(err) 648 } 649 650 if err := task.Start(ctx); err != nil { 651 t.Fatal(err) 652 } 653 654 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 655 t.Error(err) 656 } 657 658 // load the container and re-load the task 659 if container, err = client.LoadContainer(ctx, id); err != nil { 660 t.Fatal(err) 661 } 662 663 if task, err = container.Task(ctx, direct.IOAttach); err != nil { 664 t.Fatal(err) 665 } 666 667 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 668 t.Error(err) 669 } 670 671 direct.Stdin.Close() 672 673 if err := task.CloseIO(ctx, WithStdinCloser); err != nil { 674 t.Error(err) 675 } 676 677 <-status 678 679 wg.Wait() 680 if _, err := task.Delete(ctx); err != nil { 681 t.Error(err) 682 } 683 684 output := buf.String() 685 686 // we wrote the same thing after attach 687 expected = expected + expected 688 if output != expected { 689 t.Errorf("expected output %q but received %q", expected, output) 690 } 691 } 692 693 func newDirectIO(ctx context.Context, terminal bool) (*directIO, error) { 694 fifos, err := cio.NewFIFOSetInDir("", "", terminal) 695 if err != nil { 696 return nil, err 697 } 698 dio, err := cio.NewDirectIO(ctx, fifos) 699 if err != nil { 700 return nil, err 701 } 702 return &directIO{DirectIO: *dio}, nil 703 } 704 705 type directIO struct { 706 cio.DirectIO 707 } 708 709 // ioCreate returns IO available for use with task creation 710 func (f *directIO) IOCreate(id string) (cio.IO, error) { 711 return f, nil 712 } 713 714 // ioAttach returns IO available for use with task attachment 715 func (f *directIO) IOAttach(set *cio.FIFOSet) (cio.IO, error) { 716 return f, nil 717 } 718 719 func (f *directIO) Cancel() { 720 // nothing to cancel as all operations are handled externally 721 } 722 723 // Close closes all open fds 724 func (f *directIO) Close() error { 725 err := f.Stdin.Close() 726 if f.Stdout != nil { 727 if err2 := f.Stdout.Close(); err == nil { 728 err = err2 729 } 730 } 731 if f.Stderr != nil { 732 if err2 := f.Stderr.Close(); err == nil { 733 err = err2 734 } 735 } 736 return err 737 } 738 739 // Delete removes the underlying directory containing fifos 740 func (f *directIO) Delete() error { 741 return f.DirectIO.Close() 742 } 743 744 func TestContainerUsername(t *testing.T) { 745 t.Parallel() 746 747 client, err := newClient(t, address) 748 if err != nil { 749 t.Fatal(err) 750 } 751 defer client.Close() 752 753 var ( 754 image Image 755 ctx, cancel = testContext(t) 756 id = t.Name() 757 ) 758 defer cancel() 759 760 image, err = client.GetImage(ctx, testImage) 761 if err != nil { 762 t.Fatal(err) 763 } 764 direct, err := newDirectIO(ctx, false) 765 if err != nil { 766 t.Fatal(err) 767 } 768 defer direct.Delete() 769 var ( 770 wg sync.WaitGroup 771 buf = bytes.NewBuffer(nil) 772 ) 773 wg.Add(1) 774 go func() { 775 defer wg.Done() 776 io.Copy(buf, direct.Stdout) 777 }() 778 779 // squid user in the alpine image has a uid of 31 780 container, err := client.NewContainer(ctx, id, 781 WithNewSnapshot(id, image), 782 WithNewSpec(oci.WithImageConfig(image), oci.WithUsername("squid"), oci.WithProcessArgs("id", "-u")), 783 ) 784 if err != nil { 785 t.Fatal(err) 786 } 787 defer container.Delete(ctx, WithSnapshotCleanup) 788 789 task, err := container.NewTask(ctx, direct.IOCreate) 790 if err != nil { 791 t.Fatal(err) 792 } 793 defer task.Delete(ctx) 794 795 statusC, err := task.Wait(ctx) 796 if err != nil { 797 t.Fatal(err) 798 } 799 800 if err := task.Start(ctx); err != nil { 801 t.Fatal(err) 802 } 803 <-statusC 804 805 wg.Wait() 806 807 output := strings.TrimSuffix(buf.String(), "\n") 808 if output != "31" { 809 t.Errorf("expected squid uid to be 31 but received %q", output) 810 } 811 } 812 813 func TestContainerUser(t *testing.T) { 814 t.Parallel() 815 t.Run("UserNameAndGroupName", func(t *testing.T) { testContainerUser(t, "squid:squid", "31:31") }) 816 t.Run("UserIDAndGroupName", func(t *testing.T) { testContainerUser(t, "1001:squid", "1001:31") }) 817 t.Run("UserNameAndGroupID", func(t *testing.T) { testContainerUser(t, "squid:1002", "31:1002") }) 818 t.Run("UserIDAndGroupID", func(t *testing.T) { testContainerUser(t, "1001:1002", "1001:1002") }) 819 } 820 821 func testContainerUser(t *testing.T, userstr, expectedOutput string) { 822 client, err := newClient(t, address) 823 if err != nil { 824 t.Fatal(err) 825 } 826 defer client.Close() 827 828 var ( 829 image Image 830 ctx, cancel = testContext(t) 831 id = strings.Replace(t.Name(), "/", "_", -1) 832 ) 833 defer cancel() 834 835 image, err = client.GetImage(ctx, testImage) 836 if err != nil { 837 t.Fatal(err) 838 } 839 direct, err := newDirectIO(ctx, false) 840 if err != nil { 841 t.Fatal(err) 842 } 843 defer direct.Delete() 844 var ( 845 wg sync.WaitGroup 846 buf = bytes.NewBuffer(nil) 847 ) 848 wg.Add(1) 849 go func() { 850 defer wg.Done() 851 io.Copy(buf, direct.Stdout) 852 }() 853 854 container, err := client.NewContainer(ctx, id, 855 WithNewSnapshot(id, image), 856 WithNewSpec(oci.WithImageConfig(image), oci.WithUser(userstr), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")), 857 ) 858 if err != nil { 859 t.Fatal(err) 860 } 861 defer container.Delete(ctx, WithSnapshotCleanup) 862 863 task, err := container.NewTask(ctx, direct.IOCreate) 864 if err != nil { 865 t.Fatal(err) 866 } 867 defer task.Delete(ctx) 868 869 statusC, err := task.Wait(ctx) 870 if err != nil { 871 t.Fatal(err) 872 } 873 874 if err := task.Start(ctx); err != nil { 875 t.Fatal(err) 876 } 877 <-statusC 878 879 wg.Wait() 880 881 output := strings.TrimSuffix(buf.String(), "\n") 882 if output != expectedOutput { 883 t.Errorf("expected uid:gid to be %q, but received %q", expectedOutput, output) 884 } 885 } 886 887 func TestContainerAttachProcess(t *testing.T) { 888 t.Parallel() 889 890 if runtime.GOOS == "windows" { 891 // On windows, closing the write side of the pipe closes the read 892 // side, sending an EOF to it and preventing reopening it. 893 // Hence this test will always fails on windows 894 t.Skip("invalid logic on windows") 895 } 896 897 client, err := newClient(t, address) 898 if err != nil { 899 t.Fatal(err) 900 } 901 defer client.Close() 902 903 var ( 904 image Image 905 ctx, cancel = testContext(t) 906 id = t.Name() 907 ) 908 defer cancel() 909 910 image, err = client.GetImage(ctx, testImage) 911 if err != nil { 912 t.Fatal(err) 913 } 914 915 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "100"))) 916 if err != nil { 917 t.Fatal(err) 918 } 919 defer container.Delete(ctx, WithSnapshotCleanup) 920 921 expected := "hello" + newLine 922 923 // creating IO early for easy resource cleanup 924 direct, err := newDirectIO(ctx, false) 925 if err != nil { 926 t.Fatal(err) 927 } 928 defer direct.Delete() 929 var ( 930 wg sync.WaitGroup 931 buf = bytes.NewBuffer(nil) 932 ) 933 wg.Add(1) 934 go func() { 935 defer wg.Done() 936 io.Copy(buf, direct.Stdout) 937 }() 938 939 task, err := container.NewTask(ctx, empty()) 940 if err != nil { 941 t.Fatal(err) 942 } 943 defer task.Delete(ctx) 944 945 status, err := task.Wait(ctx) 946 if err != nil { 947 t.Error(err) 948 } 949 950 if err := task.Start(ctx); err != nil { 951 t.Fatal(err) 952 } 953 954 spec, err := container.Spec(ctx) 955 if err != nil { 956 t.Fatal(err) 957 } 958 959 processSpec := spec.Process 960 processSpec.Args = []string{"cat"} 961 execID := t.Name() + "_exec" 962 process, err := task.Exec(ctx, execID, processSpec, direct.IOCreate) 963 if err != nil { 964 t.Fatal(err) 965 } 966 processStatusC, err := process.Wait(ctx) 967 if err != nil { 968 t.Fatal(err) 969 } 970 971 if err := process.Start(ctx); err != nil { 972 t.Fatal(err) 973 } 974 975 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 976 t.Error(err) 977 } 978 979 if process, err = task.LoadProcess(ctx, execID, direct.IOAttach); err != nil { 980 t.Fatal(err) 981 } 982 983 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 984 t.Error(err) 985 } 986 987 direct.Stdin.Close() 988 989 if err := process.CloseIO(ctx, WithStdinCloser); err != nil { 990 t.Error(err) 991 } 992 993 <-processStatusC 994 995 wg.Wait() 996 997 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 998 t.Error(err) 999 } 1000 1001 output := buf.String() 1002 1003 // we wrote the same thing after attach 1004 expected = expected + expected 1005 if output != expected { 1006 t.Errorf("expected output %q but received %q", expected, output) 1007 } 1008 <-status 1009 } 1010 1011 func TestContainerLoadUnexistingProcess(t *testing.T) { 1012 t.Parallel() 1013 1014 if runtime.GOOS == "windows" { 1015 // On windows, closing the write side of the pipe closes the read 1016 // side, sending an EOF to it and preventing reopening it. 1017 // Hence this test will always fails on windows 1018 t.Skip("invalid logic on windows") 1019 } 1020 1021 client, err := newClient(t, address) 1022 if err != nil { 1023 t.Fatal(err) 1024 } 1025 defer client.Close() 1026 1027 var ( 1028 image Image 1029 ctx, cancel = testContext(t) 1030 id = t.Name() 1031 ) 1032 defer cancel() 1033 1034 image, err = client.GetImage(ctx, testImage) 1035 if err != nil { 1036 t.Fatal(err) 1037 } 1038 1039 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "100"))) 1040 if err != nil { 1041 t.Fatal(err) 1042 } 1043 defer container.Delete(ctx, WithSnapshotCleanup) 1044 1045 // creating IO early for easy resource cleanup 1046 direct, err := newDirectIO(ctx, false) 1047 if err != nil { 1048 t.Fatal(err) 1049 } 1050 defer direct.Delete() 1051 1052 task, err := container.NewTask(ctx, empty()) 1053 if err != nil { 1054 t.Fatal(err) 1055 } 1056 defer task.Delete(ctx) 1057 1058 status, err := task.Wait(ctx) 1059 if err != nil { 1060 t.Error(err) 1061 } 1062 1063 if err := task.Start(ctx); err != nil { 1064 t.Fatal(err) 1065 } 1066 1067 if _, err = task.LoadProcess(ctx, "this-process-does-not-exist", direct.IOAttach); err == nil { 1068 t.Fatal("an error should have occurred when loading a process that does not exist") 1069 } 1070 1071 if !errdefs.IsNotFound(err) { 1072 t.Fatalf("an error of type NotFound should have been returned when loading a process that does not exist, got %#v instead ", err) 1073 } 1074 1075 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 1076 t.Error(err) 1077 } 1078 1079 <-status 1080 } 1081 1082 func TestContainerUserID(t *testing.T) { 1083 t.Parallel() 1084 1085 client, err := newClient(t, address) 1086 if err != nil { 1087 t.Fatal(err) 1088 } 1089 defer client.Close() 1090 1091 var ( 1092 image Image 1093 ctx, cancel = testContext(t) 1094 id = t.Name() 1095 ) 1096 defer cancel() 1097 1098 image, err = client.GetImage(ctx, testImage) 1099 if err != nil { 1100 t.Fatal(err) 1101 } 1102 direct, err := newDirectIO(ctx, false) 1103 if err != nil { 1104 t.Fatal(err) 1105 } 1106 defer direct.Delete() 1107 var ( 1108 wg sync.WaitGroup 1109 buf = bytes.NewBuffer(nil) 1110 ) 1111 wg.Add(1) 1112 go func() { 1113 defer wg.Done() 1114 io.Copy(buf, direct.Stdout) 1115 }() 1116 1117 // adm user in the alpine image has a uid of 3 and gid of 4. 1118 container, err := client.NewContainer(ctx, id, 1119 WithNewSnapshot(id, image), 1120 WithNewSpec(oci.WithImageConfig(image), oci.WithUserID(3), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")), 1121 ) 1122 if err != nil { 1123 t.Fatal(err) 1124 } 1125 defer container.Delete(ctx, WithSnapshotCleanup) 1126 1127 task, err := container.NewTask(ctx, direct.IOCreate) 1128 if err != nil { 1129 t.Fatal(err) 1130 } 1131 defer task.Delete(ctx) 1132 1133 statusC, err := task.Wait(ctx) 1134 if err != nil { 1135 t.Fatal(err) 1136 } 1137 1138 if err := task.Start(ctx); err != nil { 1139 t.Fatal(err) 1140 } 1141 <-statusC 1142 1143 wg.Wait() 1144 1145 output := strings.TrimSuffix(buf.String(), "\n") 1146 if output != "3:4" { 1147 t.Errorf("expected uid:gid to be 3:4, but received %q", output) 1148 } 1149 } 1150 1151 func TestContainerKillAll(t *testing.T) { 1152 t.Parallel() 1153 1154 client, err := newClient(t, address) 1155 if err != nil { 1156 t.Fatal(err) 1157 } 1158 defer client.Close() 1159 1160 var ( 1161 image Image 1162 ctx, cancel = testContext(t) 1163 id = t.Name() 1164 ) 1165 defer cancel() 1166 1167 image, err = client.GetImage(ctx, testImage) 1168 if err != nil { 1169 t.Fatal(err) 1170 } 1171 1172 container, err := client.NewContainer(ctx, id, 1173 WithNewSnapshot(id, image), 1174 WithNewSpec(oci.WithImageConfig(image), 1175 withProcessArgs("sh", "-c", "top"), 1176 oci.WithHostNamespace(specs.PIDNamespace), 1177 ), 1178 ) 1179 if err != nil { 1180 t.Fatal(err) 1181 } 1182 defer container.Delete(ctx, WithSnapshotCleanup) 1183 1184 stdout := bytes.NewBuffer(nil) 1185 task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) 1186 if err != nil { 1187 t.Fatal(err) 1188 } 1189 defer task.Delete(ctx) 1190 1191 statusC, err := task.Wait(ctx) 1192 if err != nil { 1193 t.Fatal(err) 1194 } 1195 1196 if err := task.Start(ctx); err != nil { 1197 t.Fatal(err) 1198 } 1199 1200 if err := task.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil { 1201 t.Error(err) 1202 } 1203 1204 <-statusC 1205 if _, err := task.Delete(ctx); err != nil { 1206 t.Fatal(err) 1207 } 1208 } 1209 1210 func TestDaemonRestartWithRunningShim(t *testing.T) { 1211 client, err := newClient(t, address) 1212 if err != nil { 1213 t.Fatal(err) 1214 } 1215 defer client.Close() 1216 1217 var ( 1218 image Image 1219 ctx, cancel = testContext(t) 1220 id = t.Name() 1221 ) 1222 defer cancel() 1223 1224 image, err = client.GetImage(ctx, testImage) 1225 if err != nil { 1226 t.Fatal(err) 1227 } 1228 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100"))) 1229 if err != nil { 1230 t.Fatal(err) 1231 } 1232 defer container.Delete(ctx, WithSnapshotCleanup) 1233 1234 task, err := container.NewTask(ctx, empty()) 1235 if err != nil { 1236 t.Fatal(err) 1237 } 1238 defer task.Delete(ctx) 1239 1240 statusC, err := task.Wait(ctx) 1241 if err != nil { 1242 t.Error(err) 1243 } 1244 1245 pid := task.Pid() 1246 if pid < 1 { 1247 t.Fatalf("invalid task pid %d", pid) 1248 } 1249 1250 if err := task.Start(ctx); err != nil { 1251 t.Fatal(err) 1252 } 1253 1254 var exitStatus ExitStatus 1255 if err := ctrd.Restart(func() { 1256 exitStatus = <-statusC 1257 }); err != nil { 1258 t.Fatal(err) 1259 } 1260 1261 if exitStatus.Error() == nil { 1262 t.Errorf(`first task.Wait() should have failed with "transport is closing"`) 1263 } 1264 1265 waitCtx, cancel := context.WithTimeout(ctx, 1*time.Second) 1266 c, err := ctrd.waitForStart(waitCtx) 1267 cancel() 1268 if err != nil { 1269 t.Fatal(err) 1270 } 1271 c.Close() 1272 1273 statusC, err = task.Wait(ctx) 1274 if err != nil { 1275 t.Error(err) 1276 } 1277 1278 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 1279 t.Fatal(err) 1280 } 1281 1282 <-statusC 1283 1284 if err := unix.Kill(int(pid), 0); err != unix.ESRCH { 1285 t.Errorf("pid %d still exists", pid) 1286 } 1287 } 1288 1289 func TestContainerRuntimeOptionsv1(t *testing.T) { 1290 t.Parallel() 1291 1292 client, err := newClient(t, address) 1293 if err != nil { 1294 t.Fatal(err) 1295 } 1296 defer client.Close() 1297 1298 var ( 1299 image Image 1300 ctx, cancel = testContext(t) 1301 id = t.Name() 1302 ) 1303 defer cancel() 1304 1305 image, err = client.GetImage(ctx, testImage) 1306 if err != nil { 1307 t.Fatal(err) 1308 } 1309 1310 container, err := client.NewContainer( 1311 ctx, id, 1312 WithNewSnapshot(id, image), 1313 WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)), 1314 WithRuntime(plugin.RuntimeLinuxV1, &runctypes.RuncOptions{Runtime: "no-runc"}), 1315 ) 1316 if err != nil { 1317 t.Fatal(err) 1318 } 1319 defer container.Delete(ctx, WithSnapshotCleanup) 1320 1321 task, err := container.NewTask(ctx, empty()) 1322 if err == nil { 1323 t.Errorf("task creation should have failed") 1324 task.Delete(ctx) 1325 return 1326 } 1327 if !strings.Contains(err.Error(), `"no-runc"`) { 1328 t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error()) 1329 } 1330 } 1331 1332 func TestContainerRuntimeOptionsv2(t *testing.T) { 1333 t.Parallel() 1334 1335 client, err := newClient(t, address) 1336 if err != nil { 1337 t.Fatal(err) 1338 } 1339 defer client.Close() 1340 1341 var ( 1342 image Image 1343 ctx, cancel = testContext(t) 1344 id = t.Name() 1345 ) 1346 defer cancel() 1347 1348 image, err = client.GetImage(ctx, testImage) 1349 if err != nil { 1350 t.Fatal(err) 1351 } 1352 1353 container, err := client.NewContainer( 1354 ctx, id, 1355 WithNewSnapshot(id, image), 1356 WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)), 1357 WithRuntime(plugin.RuntimeRuncV1, &options.Options{BinaryName: "no-runc"}), 1358 ) 1359 if err != nil { 1360 t.Fatal(err) 1361 } 1362 defer container.Delete(ctx, WithSnapshotCleanup) 1363 1364 task, err := container.NewTask(ctx, empty()) 1365 if err == nil { 1366 t.Errorf("task creation should have failed") 1367 task.Delete(ctx) 1368 return 1369 } 1370 if !strings.Contains(err.Error(), `"no-runc"`) { 1371 t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error()) 1372 } 1373 } 1374 1375 func initContainerAndCheckChildrenDieOnKill(t *testing.T, opts ...oci.SpecOpts) { 1376 client, err := newClient(t, address) 1377 if err != nil { 1378 t.Fatal(err) 1379 } 1380 defer client.Close() 1381 1382 var ( 1383 image Image 1384 ctx, cancel = testContext(t) 1385 id = t.Name() 1386 ) 1387 defer cancel() 1388 1389 image, err = client.GetImage(ctx, testImage) 1390 if err != nil { 1391 t.Fatal(err) 1392 } 1393 1394 opts = append(opts, oci.WithImageConfig(image)) 1395 opts = append(opts, withProcessArgs("sh", "-c", "sleep 42; echo hi")) 1396 1397 container, err := client.NewContainer(ctx, id, 1398 WithNewSnapshot(id, image), 1399 WithNewSpec(opts...), 1400 ) 1401 if err != nil { 1402 t.Fatal(err) 1403 } 1404 defer container.Delete(ctx, WithSnapshotCleanup) 1405 1406 stdout := bytes.NewBuffer(nil) 1407 task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) 1408 if err != nil { 1409 t.Fatal(err) 1410 } 1411 defer task.Delete(ctx) 1412 1413 statusC, err := task.Wait(ctx) 1414 if err != nil { 1415 t.Fatal(err) 1416 } 1417 1418 if err := task.Start(ctx); err != nil { 1419 t.Fatal(err) 1420 } 1421 1422 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 1423 t.Error(err) 1424 } 1425 1426 // Give the shim time to reap the init process and kill the orphans 1427 select { 1428 case <-statusC: 1429 case <-time.After(100 * time.Millisecond): 1430 } 1431 1432 b, err := exec.Command("ps", "ax").CombinedOutput() 1433 if err != nil { 1434 t.Fatal(err) 1435 } 1436 1437 if strings.Contains(string(b), "sleep 42") { 1438 t.Fatalf("killing init didn't kill all its children:\n%v", string(b)) 1439 } 1440 1441 if _, err := task.Delete(ctx, WithProcessKill); err != nil { 1442 t.Error(err) 1443 } 1444 } 1445 1446 func TestContainerKillInitPidHost(t *testing.T) { 1447 initContainerAndCheckChildrenDieOnKill(t, oci.WithHostNamespace(specs.PIDNamespace)) 1448 } 1449 1450 func TestContainerKillInitKillsChildWhenNotHostPid(t *testing.T) { 1451 initContainerAndCheckChildrenDieOnKill(t) 1452 } 1453 1454 func TestUserNamespaces(t *testing.T) { 1455 t.Parallel() 1456 t.Run("WritableRootFS", func(t *testing.T) { testUserNamespaces(t, false) }) 1457 // see #1373 and runc#1572 1458 t.Run("ReadonlyRootFS", func(t *testing.T) { testUserNamespaces(t, true) }) 1459 } 1460 1461 func checkUserNS(t *testing.T) { 1462 cmd := exec.Command("true") 1463 cmd.SysProcAttr = &syscall.SysProcAttr{ 1464 Cloneflags: syscall.CLONE_NEWUSER, 1465 } 1466 1467 if err := cmd.Run(); err != nil { 1468 t.Skip("User namespaces are unavailable") 1469 } 1470 } 1471 1472 func testUserNamespaces(t *testing.T, readonlyRootFS bool) { 1473 checkUserNS(t) 1474 1475 client, err := newClient(t, address) 1476 if err != nil { 1477 t.Fatal(err) 1478 } 1479 defer client.Close() 1480 1481 var ( 1482 image Image 1483 ctx, cancel = testContext(t) 1484 id = strings.Replace(t.Name(), "/", "-", -1) 1485 ) 1486 defer cancel() 1487 1488 image, err = client.GetImage(ctx, testImage) 1489 if err != nil { 1490 t.Fatal(err) 1491 } 1492 1493 opts := []NewContainerOpts{WithNewSpec(oci.WithImageConfig(image), 1494 withExitStatus(7), 1495 oci.WithUserNamespace([]specs.LinuxIDMapping{ 1496 { 1497 ContainerID: 0, 1498 HostID: 1000, 1499 Size: 10000, 1500 }, 1501 }, []specs.LinuxIDMapping{ 1502 { 1503 ContainerID: 0, 1504 HostID: 2000, 1505 Size: 10000, 1506 }, 1507 }), 1508 )} 1509 if readonlyRootFS { 1510 opts = append([]NewContainerOpts{WithRemappedSnapshotView(id, image, 1000, 2000)}, opts...) 1511 } else { 1512 opts = append([]NewContainerOpts{WithRemappedSnapshot(id, image, 1000, 2000)}, opts...) 1513 } 1514 1515 container, err := client.NewContainer(ctx, id, opts...) 1516 if err != nil { 1517 t.Fatal(err) 1518 } 1519 defer container.Delete(ctx, WithSnapshotCleanup) 1520 1521 var copts interface{} 1522 if CheckRuntime(client.runtime, "io.containerd.runc") { 1523 copts = &options.Options{ 1524 IoUid: 1000, 1525 IoGid: 2000, 1526 } 1527 } else { 1528 copts = &runctypes.CreateOptions{ 1529 IoUid: 1000, 1530 IoGid: 2000, 1531 } 1532 } 1533 1534 task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStdio), func(_ context.Context, client *Client, r *TaskInfo) error { 1535 r.Options = copts 1536 return nil 1537 }) 1538 if err != nil { 1539 t.Fatal(err) 1540 } 1541 defer task.Delete(ctx) 1542 1543 statusC, err := task.Wait(ctx) 1544 if err != nil { 1545 t.Fatal(err) 1546 } 1547 1548 if pid := task.Pid(); pid < 1 { 1549 t.Errorf("invalid task pid %d", pid) 1550 } 1551 if err := task.Start(ctx); err != nil { 1552 t.Error(err) 1553 task.Delete(ctx) 1554 return 1555 } 1556 status := <-statusC 1557 code, _, err := status.Result() 1558 if err != nil { 1559 t.Fatal(err) 1560 } 1561 if code != 7 { 1562 t.Errorf("expected status 7 from wait but received %d", code) 1563 } 1564 deleteStatus, err := task.Delete(ctx) 1565 if err != nil { 1566 t.Fatal(err) 1567 } 1568 if ec := deleteStatus.ExitCode(); ec != 7 { 1569 t.Errorf("expected status 7 from delete but received %d", ec) 1570 } 1571 } 1572 1573 func TestTaskResize(t *testing.T) { 1574 t.Parallel() 1575 1576 client, err := newClient(t, address) 1577 if err != nil { 1578 t.Fatal(err) 1579 } 1580 defer client.Close() 1581 1582 var ( 1583 image Image 1584 ctx, cancel = testContext(t) 1585 id = t.Name() 1586 ) 1587 defer cancel() 1588 1589 image, err = client.GetImage(ctx, testImage) 1590 if err != nil { 1591 t.Fatal(err) 1592 } 1593 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7))) 1594 if err != nil { 1595 t.Fatal(err) 1596 } 1597 defer container.Delete(ctx, WithSnapshotCleanup) 1598 1599 task, err := container.NewTask(ctx, empty()) 1600 if err != nil { 1601 t.Fatal(err) 1602 } 1603 defer task.Delete(ctx) 1604 1605 statusC, err := task.Wait(ctx) 1606 if err != nil { 1607 t.Fatal(err) 1608 } 1609 if err := task.Resize(ctx, 32, 32); err != nil { 1610 t.Fatal(err) 1611 } 1612 task.Kill(ctx, syscall.SIGKILL) 1613 <-statusC 1614 } 1615 1616 func TestContainerImage(t *testing.T) { 1617 t.Parallel() 1618 1619 ctx, cancel := testContext(t) 1620 defer cancel() 1621 id := t.Name() 1622 1623 client, err := newClient(t, address) 1624 if err != nil { 1625 t.Fatal(err) 1626 } 1627 defer client.Close() 1628 1629 image, err := client.GetImage(ctx, testImage) 1630 if err != nil { 1631 t.Fatal(err) 1632 } 1633 1634 container, err := client.NewContainer(ctx, id, WithNewSpec(), WithImage(image)) 1635 if err != nil { 1636 t.Fatal(err) 1637 } 1638 defer container.Delete(ctx) 1639 1640 i, err := container.Image(ctx) 1641 if err != nil { 1642 t.Fatal(err) 1643 } 1644 if i.Name() != image.Name() { 1645 t.Fatalf("expected container image name %s but received %s", image.Name(), i.Name()) 1646 } 1647 } 1648 1649 func TestContainerNoImage(t *testing.T) { 1650 t.Parallel() 1651 1652 ctx, cancel := testContext(t) 1653 defer cancel() 1654 id := t.Name() 1655 1656 client, err := newClient(t, address) 1657 if err != nil { 1658 t.Fatal(err) 1659 } 1660 defer client.Close() 1661 1662 container, err := client.NewContainer(ctx, id, WithNewSpec()) 1663 if err != nil { 1664 t.Fatal(err) 1665 } 1666 defer container.Delete(ctx) 1667 1668 _, err = container.Image(ctx) 1669 if err == nil { 1670 t.Fatal("error should not be nil when container is created without an image") 1671 } 1672 if !errdefs.IsNotFound(err) { 1673 t.Fatalf("expected error to be %s but received %s", errdefs.ErrNotFound, err) 1674 } 1675 } 1676 1677 func TestUIDNoGID(t *testing.T) { 1678 t.Parallel() 1679 1680 ctx, cancel := testContext(t) 1681 defer cancel() 1682 id := t.Name() 1683 1684 client, err := newClient(t, address) 1685 if err != nil { 1686 t.Fatal(err) 1687 } 1688 defer client.Close() 1689 image, err := client.GetImage(ctx, testImage) 1690 if err != nil { 1691 t.Fatal(err) 1692 } 1693 1694 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithUserID(1000))) 1695 if err != nil { 1696 t.Fatal(err) 1697 } 1698 defer container.Delete(ctx) 1699 1700 spec, err := container.Spec(ctx) 1701 if err != nil { 1702 t.Fatal(err) 1703 } 1704 if uid := spec.Process.User.UID; uid != 1000 { 1705 t.Fatalf("expected uid 1000 but received %d", uid) 1706 } 1707 if gid := spec.Process.User.GID; gid != 0 { 1708 t.Fatalf("expected gid 0 but received %d", gid) 1709 } 1710 } 1711 1712 func TestBindLowPortNonRoot(t *testing.T) { 1713 t.Parallel() 1714 1715 client, err := newClient(t, address) 1716 if err != nil { 1717 t.Fatal(err) 1718 } 1719 defer client.Close() 1720 1721 var ( 1722 image Image 1723 ctx, cancel = testContext(t) 1724 id = t.Name() 1725 ) 1726 defer cancel() 1727 1728 image, err = client.GetImage(ctx, testImage) 1729 if err != nil { 1730 t.Fatal(err) 1731 } 1732 container, err := client.NewContainer(ctx, id, 1733 WithNewSnapshot(id, image), 1734 WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000)), 1735 ) 1736 if err != nil { 1737 t.Fatal(err) 1738 } 1739 defer container.Delete(ctx, WithSnapshotCleanup) 1740 1741 task, err := container.NewTask(ctx, empty()) 1742 if err != nil { 1743 t.Fatal(err) 1744 } 1745 defer task.Delete(ctx) 1746 1747 statusC, err := task.Wait(ctx) 1748 if err != nil { 1749 t.Fatal(err) 1750 } 1751 1752 if err := task.Start(ctx); err != nil { 1753 t.Fatal(err) 1754 } 1755 status := <-statusC 1756 code, _, err := status.Result() 1757 if err != nil { 1758 t.Fatal(err) 1759 } 1760 if code != 1 { 1761 t.Errorf("expected status 1 from wait but received %d", code) 1762 } 1763 if _, err := task.Delete(ctx); err != nil { 1764 t.Fatal(err) 1765 } 1766 } 1767 1768 func TestBindLowPortNonOpt(t *testing.T) { 1769 t.Parallel() 1770 1771 client, err := newClient(t, address) 1772 if err != nil { 1773 t.Fatal(err) 1774 } 1775 defer client.Close() 1776 1777 var ( 1778 image Image 1779 ctx, cancel = testContext(t) 1780 id = t.Name() 1781 ) 1782 defer cancel() 1783 1784 image, err = client.GetImage(ctx, testImage) 1785 if err != nil { 1786 t.Fatal(err) 1787 } 1788 container, err := client.NewContainer(ctx, id, 1789 WithNewSnapshot(id, image), 1790 WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000), oci.WithAmbientCapabilities([]string{"CAP_NET_BIND_SERVICE"})), 1791 ) 1792 if err != nil { 1793 t.Fatal(err) 1794 } 1795 defer container.Delete(ctx, WithSnapshotCleanup) 1796 1797 task, err := container.NewTask(ctx, empty()) 1798 if err != nil { 1799 t.Fatal(err) 1800 } 1801 defer task.Delete(ctx) 1802 1803 statusC, err := task.Wait(ctx) 1804 if err != nil { 1805 t.Fatal(err) 1806 } 1807 1808 if err := task.Start(ctx); err != nil { 1809 t.Fatal(err) 1810 } 1811 go func() { 1812 time.Sleep(2 * time.Second) 1813 task.Kill(ctx, unix.SIGTERM) 1814 }() 1815 status := <-statusC 1816 code, _, err := status.Result() 1817 if err != nil { 1818 t.Fatal(err) 1819 } 1820 // 128 + sigterm 1821 if code != 143 { 1822 t.Errorf("expected status 143 from wait but received %d", code) 1823 } 1824 if _, err := task.Delete(ctx); err != nil { 1825 t.Fatal(err) 1826 } 1827 } 1828 1829 func TestContainerNoSTDIN(t *testing.T) { 1830 t.Parallel() 1831 1832 client, err := newClient(t, address) 1833 if err != nil { 1834 t.Fatal(err) 1835 } 1836 defer client.Close() 1837 1838 var ( 1839 image Image 1840 ctx, cancel = testContext(t) 1841 id = t.Name() 1842 ) 1843 defer cancel() 1844 1845 image, err = client.GetImage(ctx, testImage) 1846 if err != nil { 1847 t.Fatal(err) 1848 } 1849 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(0))) 1850 if err != nil { 1851 t.Fatal(err) 1852 } 1853 defer container.Delete(ctx, WithSnapshotCleanup) 1854 1855 task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(nil, ioutil.Discard, ioutil.Discard))) 1856 if err != nil { 1857 t.Fatal(err) 1858 } 1859 defer task.Delete(ctx) 1860 1861 statusC, err := task.Wait(ctx) 1862 if err != nil { 1863 t.Fatal(err) 1864 } 1865 if err := task.Start(ctx); err != nil { 1866 t.Fatal(err) 1867 } 1868 status := <-statusC 1869 code, _, err := status.Result() 1870 if err != nil { 1871 t.Fatal(err) 1872 } 1873 if code != 0 { 1874 t.Errorf("expected status 0 from wait but received %d", code) 1875 } 1876 } 1877 1878 func TestShimOOMScore(t *testing.T) { 1879 containerdPid := ctrd.cmd.Process.Pid 1880 containerdScore, err := sys.GetOOMScoreAdj(containerdPid) 1881 if err != nil { 1882 t.Fatal(err) 1883 } 1884 1885 client, err := newClient(t, address) 1886 if err != nil { 1887 t.Fatal(err) 1888 } 1889 defer client.Close() 1890 1891 var ( 1892 image Image 1893 ctx, cancel = testContext(t) 1894 id = t.Name() 1895 ) 1896 defer cancel() 1897 1898 path := "/containerd/oomshim" 1899 var ( 1900 cg cgroups.Cgroup 1901 cg2 *cgroupsv2.Manager 1902 ) 1903 if cgroups.Mode() == cgroups.Unified { 1904 cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{}) 1905 if err != nil { 1906 t.Fatal(err) 1907 } 1908 defer cg2.Delete() 1909 } else { 1910 cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{}) 1911 if err != nil { 1912 t.Fatal(err) 1913 } 1914 defer cg.Delete() 1915 } 1916 1917 image, err = client.GetImage(ctx, testImage) 1918 if err != nil { 1919 t.Fatal(err) 1920 } 1921 1922 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 1923 if err != nil { 1924 t.Fatal(err) 1925 } 1926 defer container.Delete(ctx, WithSnapshotCleanup) 1927 1928 task, err := container.NewTask(ctx, empty(), WithShimCgroup(path)) 1929 if err != nil { 1930 t.Fatal(err) 1931 } 1932 defer task.Delete(ctx) 1933 1934 statusC, err := task.Wait(ctx) 1935 if err != nil { 1936 t.Fatal(err) 1937 } 1938 1939 expectedScore := containerdScore + 1 1940 if expectedScore > sys.OOMScoreAdjMax { 1941 expectedScore = sys.OOMScoreAdjMax 1942 } 1943 1944 // find the shim's pid 1945 if cgroups.Mode() == cgroups.Unified { 1946 processes, err := cg2.Procs(false) 1947 if err != nil { 1948 t.Fatal(err) 1949 } 1950 for _, pid := range processes { 1951 score, err := sys.GetOOMScoreAdj(int(pid)) 1952 if err != nil { 1953 t.Fatal(err) 1954 } 1955 if score != expectedScore { 1956 t.Errorf("expected score %d but got %d for shim process", expectedScore, score) 1957 } 1958 } 1959 } else { 1960 processes, err := cg.Processes(cgroups.Devices, false) 1961 if err != nil { 1962 t.Fatal(err) 1963 } 1964 for _, p := range processes { 1965 score, err := sys.GetOOMScoreAdj(p.Pid) 1966 if err != nil { 1967 t.Fatal(err) 1968 } 1969 if score != expectedScore { 1970 t.Errorf("expected score %d but got %d for shim process", expectedScore, score) 1971 } 1972 } 1973 } 1974 1975 if err := task.Kill(ctx, unix.SIGKILL); err != nil { 1976 t.Fatal(err) 1977 } 1978 1979 <-statusC 1980 } 1981 1982 func TestTaskSpec(t *testing.T) { 1983 t.Parallel() 1984 1985 client, err := newClient(t, address) 1986 if err != nil { 1987 t.Fatal(err) 1988 } 1989 defer client.Close() 1990 1991 var ( 1992 image Image 1993 ctx, cancel = testContext(t) 1994 id = t.Name() 1995 ) 1996 defer cancel() 1997 1998 image, err = client.GetImage(ctx, testImage) 1999 if err != nil { 2000 t.Fatal(err) 2001 } 2002 2003 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), longCommand)) 2004 if err != nil { 2005 t.Fatal(err) 2006 } 2007 defer container.Delete(ctx, WithSnapshotCleanup) 2008 2009 task, err := container.NewTask(ctx, empty()) 2010 if err != nil { 2011 t.Fatal(err) 2012 } 2013 defer task.Delete(ctx) 2014 2015 statusC, err := task.Wait(ctx) 2016 if err != nil { 2017 t.Fatal(err) 2018 } 2019 2020 spec, err := task.Spec(ctx) 2021 if err != nil { 2022 t.Fatal(err) 2023 } 2024 if spec == nil { 2025 t.Fatal("spec from task is nil") 2026 } 2027 direct, err := newDirectIO(ctx, false) 2028 if err != nil { 2029 t.Fatal(err) 2030 } 2031 defer direct.Delete() 2032 2033 lt, err := container.Task(ctx, direct.IOAttach) 2034 if err != nil { 2035 t.Fatal(err) 2036 } 2037 2038 spec, err = lt.Spec(ctx) 2039 if err != nil { 2040 t.Fatal(err) 2041 } 2042 if spec == nil { 2043 t.Fatal("spec from loaded task is nil") 2044 } 2045 2046 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 2047 t.Fatal(err) 2048 } 2049 <-statusC 2050 }