github.com/containerd/containerd@v22.0.0-20200918172823-438c87b8e050+incompatible/container_linux_test.go (about) 1 // +build linux 2 3 /* 4 Copyright The containerd Authors. 5 6 Licensed under the Apache License, Version 2.0 (the "License"); 7 you may not use this file except in compliance with the License. 8 You may obtain a copy of the License at 9 10 http://www.apache.org/licenses/LICENSE-2.0 11 12 Unless required by applicable law or agreed to in writing, software 13 distributed under the License is distributed on an "AS IS" BASIS, 14 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 See the License for the specific language governing permissions and 16 limitations under the License. 17 */ 18 19 package containerd 20 21 import ( 22 "bytes" 23 "context" 24 "fmt" 25 "io" 26 "io/ioutil" 27 "os" 28 "os/exec" 29 "path/filepath" 30 "runtime" 31 "strings" 32 "sync" 33 "syscall" 34 "testing" 35 "time" 36 37 "github.com/containerd/cgroups" 38 cgroupsv2 "github.com/containerd/cgroups/v2" 39 "github.com/containerd/containerd/cio" 40 "github.com/containerd/containerd/containers" 41 "github.com/containerd/containerd/errdefs" 42 "github.com/containerd/containerd/oci" 43 "github.com/containerd/containerd/plugin" 44 "github.com/containerd/containerd/runtime/linux/runctypes" 45 "github.com/containerd/containerd/runtime/v2/runc/options" 46 "github.com/containerd/containerd/sys" 47 specs "github.com/opencontainers/runtime-spec/specs-go" 48 "golang.org/x/sys/unix" 49 ) 50 51 func TestTaskUpdate(t *testing.T) { 52 t.Parallel() 53 54 client, err := newClient(t, address) 55 if err != nil { 56 t.Fatal(err) 57 } 58 defer client.Close() 59 60 var ( 61 ctx, cancel = testContext(t) 62 id = t.Name() 63 ) 64 defer cancel() 65 66 image, err := client.GetImage(ctx, testImage) 67 if err != nil { 68 t.Fatal(err) 69 } 70 limit := int64(32 * 1024 * 1024) 71 memory := func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { 72 s.Linux.Resources.Memory = &specs.LinuxMemory{ 73 Limit: &limit, 74 } 75 return nil 76 } 77 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), 78 WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"), memory)) 79 if err != nil { 80 t.Fatal(err) 81 } 82 defer container.Delete(ctx, WithSnapshotCleanup) 83 84 task, err := container.NewTask(ctx, empty()) 85 if err != nil { 86 t.Fatal(err) 87 } 88 defer task.Delete(ctx) 89 90 statusC, err := task.Wait(ctx) 91 if err != nil { 92 t.Fatal(err) 93 } 94 95 var ( 96 cgroup cgroups.Cgroup 97 cgroup2 *cgroupsv2.Manager 98 ) 99 // check that the task has a limit of 32mb 100 if cgroups.Mode() == cgroups.Unified { 101 groupPath, err := cgroupsv2.PidGroupPath(int(task.Pid())) 102 if err != nil { 103 t.Fatal(err) 104 } 105 cgroup2, err = cgroupsv2.LoadManager("/sys/fs/cgroup", groupPath) 106 if err != nil { 107 t.Fatal(err) 108 } 109 stat, err := cgroup2.Stat() 110 if err != nil { 111 t.Fatal(err) 112 } 113 if int64(stat.Memory.UsageLimit) != limit { 114 t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit) 115 } 116 } else { 117 cgroup, err = cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid()))) 118 if err != nil { 119 t.Fatal(err) 120 } 121 stat, err := cgroup.Stat(cgroups.IgnoreNotExist) 122 if err != nil { 123 t.Fatal(err) 124 } 125 if int64(stat.Memory.Usage.Limit) != limit { 126 t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) 127 } 128 } 129 limit = 64 * 1024 * 1024 130 if err := task.Update(ctx, WithResources(&specs.LinuxResources{ 131 Memory: &specs.LinuxMemory{ 132 Limit: &limit, 133 }, 134 })); err != nil { 135 t.Error(err) 136 } 137 // check that the task has a limit of 64mb 138 if cgroups.Mode() == cgroups.Unified { 139 stat, err := cgroup2.Stat() 140 if err != nil { 141 t.Fatal(err) 142 } 143 if int64(stat.Memory.UsageLimit) != limit { 144 t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.UsageLimit) 145 } 146 } else { 147 stat, err := cgroup.Stat(cgroups.IgnoreNotExist) 148 if err != nil { 149 t.Fatal(err) 150 } 151 if int64(stat.Memory.Usage.Limit) != limit { 152 t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit) 153 } 154 } 155 if err := task.Kill(ctx, unix.SIGKILL); err != nil { 156 t.Fatal(err) 157 } 158 159 <-statusC 160 } 161 162 func TestShimInCgroup(t *testing.T) { 163 t.Parallel() 164 165 client, err := newClient(t, address) 166 if err != nil { 167 t.Fatal(err) 168 } 169 defer client.Close() 170 var ( 171 ctx, cancel = testContext(t) 172 id = t.Name() 173 ) 174 defer cancel() 175 176 image, err := client.GetImage(ctx, testImage) 177 if err != nil { 178 t.Fatal(err) 179 } 180 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "30"))) 181 if err != nil { 182 t.Fatal(err) 183 } 184 defer container.Delete(ctx, WithSnapshotCleanup) 185 // create a cgroup for the shim to use 186 path := "/containerd/shim" 187 var ( 188 cg cgroups.Cgroup 189 cg2 *cgroupsv2.Manager 190 ) 191 if cgroups.Mode() == cgroups.Unified { 192 cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{}) 193 if err != nil { 194 t.Fatal(err) 195 } 196 defer cg2.Delete() 197 } else { 198 cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{}) 199 if err != nil { 200 t.Fatal(err) 201 } 202 defer cg.Delete() 203 } 204 205 task, err := container.NewTask(ctx, empty(), WithShimCgroup(path)) 206 if err != nil { 207 t.Fatal(err) 208 } 209 defer task.Delete(ctx) 210 211 statusC, err := task.Wait(ctx) 212 if err != nil { 213 t.Fatal(err) 214 } 215 216 // check to see if the shim is inside the cgroup 217 if cgroups.Mode() == cgroups.Unified { 218 processes, err := cg2.Procs(false) 219 if err != nil { 220 t.Fatal(err) 221 } 222 if len(processes) == 0 { 223 t.Errorf("created cgroup should have at least one process inside: %d", len(processes)) 224 } 225 } else { 226 processes, err := cg.Processes(cgroups.Devices, false) 227 if err != nil { 228 t.Fatal(err) 229 } 230 if len(processes) == 0 { 231 t.Errorf("created cgroup should have at least one process inside: %d", len(processes)) 232 } 233 } 234 if err := task.Kill(ctx, unix.SIGKILL); err != nil { 235 t.Fatal(err) 236 } 237 238 <-statusC 239 } 240 241 func TestDaemonRestart(t *testing.T) { 242 client, err := newClient(t, address) 243 if err != nil { 244 t.Fatal(err) 245 } 246 defer client.Close() 247 248 var ( 249 image Image 250 ctx, cancel = testContext(t) 251 id = t.Name() 252 ) 253 defer cancel() 254 255 image, err = client.GetImage(ctx, testImage) 256 if err != nil { 257 t.Fatal(err) 258 } 259 260 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 261 if err != nil { 262 t.Fatal(err) 263 } 264 defer container.Delete(ctx, WithSnapshotCleanup) 265 266 task, err := container.NewTask(ctx, empty()) 267 if err != nil { 268 t.Fatal(err) 269 } 270 defer task.Delete(ctx) 271 272 statusC, err := task.Wait(ctx) 273 if err != nil { 274 t.Fatal(err) 275 } 276 277 if err := task.Start(ctx); err != nil { 278 t.Fatal(err) 279 } 280 281 var exitStatus ExitStatus 282 if err := ctrd.Restart(func() { 283 exitStatus = <-statusC 284 }); err != nil { 285 t.Fatal(err) 286 } 287 288 if exitStatus.Error() == nil { 289 t.Errorf(`first task.Wait() should have failed with "transport is closing"`) 290 } 291 292 waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) 293 serving, err := client.IsServing(waitCtx) 294 waitCancel() 295 if !serving { 296 t.Fatalf("containerd did not start within 2s: %v", err) 297 } 298 299 statusC, err = task.Wait(ctx) 300 if err != nil { 301 t.Fatal(err) 302 } 303 304 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 305 t.Fatal(err) 306 } 307 308 <-statusC 309 } 310 311 func TestShimDoesNotLeakPipes(t *testing.T) { 312 containerdPid := ctrd.cmd.Process.Pid 313 initialPipes, err := numPipes(containerdPid) 314 if err != nil { 315 t.Fatal(err) 316 } 317 318 client, err := newClient(t, address) 319 if err != nil { 320 t.Fatal(err) 321 } 322 defer client.Close() 323 324 var ( 325 image Image 326 ctx, cancel = testContext(t) 327 id = t.Name() 328 ) 329 defer cancel() 330 331 image, err = client.GetImage(ctx, testImage) 332 if err != nil { 333 t.Fatal(err) 334 } 335 336 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 337 if err != nil { 338 t.Fatal(err) 339 } 340 341 task, err := container.NewTask(ctx, empty()) 342 if err != nil { 343 t.Fatal(err) 344 } 345 346 exitChannel, err := task.Wait(ctx) 347 if err != nil { 348 t.Fatal(err) 349 } 350 351 if err := task.Start(ctx); err != nil { 352 t.Fatal(err) 353 } 354 355 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 356 t.Fatal(err) 357 } 358 359 <-exitChannel 360 361 if _, err := task.Delete(ctx); err != nil { 362 t.Fatal(err) 363 } 364 365 if err := container.Delete(ctx, WithSnapshotCleanup); err != nil { 366 t.Fatal(err) 367 } 368 369 currentPipes, err := numPipes(containerdPid) 370 if err != nil { 371 t.Fatal(err) 372 } 373 374 if initialPipes != currentPipes { 375 t.Errorf("Pipes have leaked after container has been deleted. Initially there were %d pipes, after container deletion there were %d pipes", initialPipes, currentPipes) 376 } 377 } 378 379 func numPipes(pid int) (int, error) { 380 cmd := exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep FIFO", pid)) 381 382 var stdout bytes.Buffer 383 cmd.Stdout = &stdout 384 if err := cmd.Run(); err != nil { 385 return 0, err 386 } 387 return strings.Count(stdout.String(), "\n"), nil 388 } 389 390 func TestDaemonReconnectsToShimIOPipesOnRestart(t *testing.T) { 391 client, err := newClient(t, address) 392 if err != nil { 393 t.Fatal(err) 394 } 395 defer client.Close() 396 397 var ( 398 image Image 399 ctx, cancel = testContext(t) 400 id = t.Name() 401 ) 402 defer cancel() 403 404 image, err = client.GetImage(ctx, testImage) 405 if err != nil { 406 t.Fatal(err) 407 } 408 409 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 410 if err != nil { 411 t.Fatal(err) 412 } 413 defer container.Delete(ctx, WithSnapshotCleanup) 414 415 task, err := container.NewTask(ctx, empty()) 416 if err != nil { 417 t.Fatal(err) 418 } 419 defer task.Delete(ctx) 420 421 _, err = task.Wait(ctx) 422 if err != nil { 423 t.Fatal(err) 424 } 425 426 if err := task.Start(ctx); err != nil { 427 t.Fatal(err) 428 } 429 430 if err := ctrd.Restart(nil); err != nil { 431 t.Fatal(err) 432 } 433 434 waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second) 435 serving, err := client.IsServing(waitCtx) 436 waitCancel() 437 if !serving { 438 t.Fatalf("containerd did not start within 2s: %v", err) 439 } 440 441 // After we restared containerd we write some messages to the log pipes, simulating shim writing stuff there. 442 // Then we make sure that these messages are available on the containerd log thus proving that the server reconnected to the log pipes 443 runtimeVersion := getRuntimeVersion() 444 logDirPath := getLogDirPath(runtimeVersion, id) 445 446 switch runtimeVersion { 447 case "v1": 448 writeToFile(t, filepath.Join(logDirPath, "shim.stdout.log"), fmt.Sprintf("%s writing to stdout\n", id)) 449 writeToFile(t, filepath.Join(logDirPath, "shim.stderr.log"), fmt.Sprintf("%s writing to stderr\n", id)) 450 case "v2": 451 writeToFile(t, filepath.Join(logDirPath, "log"), fmt.Sprintf("%s writing to log\n", id)) 452 } 453 454 statusC, err := task.Wait(ctx) 455 if err != nil { 456 t.Fatal(err) 457 } 458 459 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 460 t.Fatal(err) 461 } 462 463 <-statusC 464 465 stdioContents, err := ioutil.ReadFile(ctrdStdioFilePath) 466 if err != nil { 467 t.Fatal(err) 468 } 469 470 switch runtimeVersion { 471 case "v1": 472 if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stdout", id)) { 473 t.Fatal("containerd did not connect to the shim stdout pipe") 474 } 475 if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stderr", id)) { 476 t.Fatal("containerd did not connect to the shim stderr pipe") 477 } 478 case "v2": 479 if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to log", id)) { 480 t.Fatal("containerd did not connect to the shim log pipe") 481 } 482 } 483 } 484 485 func writeToFile(t *testing.T, filePath, message string) { 486 writer, err := os.OpenFile(filePath, os.O_WRONLY, 0600) 487 if err != nil { 488 t.Fatal(err) 489 } 490 if _, err := writer.WriteString(message); err != nil { 491 t.Fatal(err) 492 } 493 if err := writer.Close(); err != nil { 494 t.Fatal(err) 495 } 496 } 497 498 func getLogDirPath(runtimeVersion, id string) string { 499 switch runtimeVersion { 500 case "v1": 501 return filepath.Join(defaultRoot, plugin.RuntimeLinuxV1, testNamespace, id) 502 case "v2": 503 return filepath.Join(defaultState, "io.containerd.runtime.v2.task", testNamespace, id) 504 default: 505 panic(fmt.Errorf("Unsupported runtime version %s", runtimeVersion)) 506 } 507 } 508 509 func getRuntimeVersion() string { 510 switch rt := os.Getenv("TEST_RUNTIME"); rt { 511 case plugin.RuntimeLinuxV1: 512 return "v1" 513 default: 514 return "v2" 515 } 516 } 517 518 func TestContainerPTY(t *testing.T) { 519 t.Parallel() 520 521 client, err := newClient(t, address) 522 if err != nil { 523 t.Fatal(err) 524 } 525 defer client.Close() 526 527 var ( 528 image Image 529 ctx, cancel = testContext(t) 530 id = t.Name() 531 ) 532 defer cancel() 533 534 image, err = client.GetImage(ctx, testImage) 535 if err != nil { 536 t.Fatal(err) 537 } 538 539 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithTTY, withProcessArgs("echo", "hello"))) 540 if err != nil { 541 t.Fatal(err) 542 } 543 defer container.Delete(ctx, WithSnapshotCleanup) 544 545 direct, err := newDirectIO(ctx, true) 546 if err != nil { 547 t.Fatal(err) 548 } 549 defer direct.Delete() 550 var ( 551 wg sync.WaitGroup 552 buf = bytes.NewBuffer(nil) 553 ) 554 wg.Add(1) 555 go func() { 556 defer wg.Done() 557 io.Copy(buf, direct.Stdout) 558 }() 559 560 task, err := container.NewTask(ctx, direct.IOCreate) 561 if err != nil { 562 t.Fatal(err) 563 } 564 defer task.Delete(ctx) 565 566 status, err := task.Wait(ctx) 567 if err != nil { 568 t.Error(err) 569 } 570 571 if err := task.Start(ctx); err != nil { 572 t.Fatal(err) 573 } 574 575 <-status 576 wg.Wait() 577 578 if err := direct.Close(); err != nil { 579 t.Error(err) 580 } 581 582 out := buf.String() 583 if !strings.ContainsAny(fmt.Sprintf("%#q", out), `\x00`) { 584 t.Fatal(`expected \x00 in output`) 585 } 586 } 587 588 func TestContainerAttach(t *testing.T) { 589 t.Parallel() 590 591 if runtime.GOOS == "windows" { 592 // On windows, closing the write side of the pipe closes the read 593 // side, sending an EOF to it and preventing reopening it. 594 // Hence this test will always fails on windows 595 t.Skip("invalid logic on windows") 596 } 597 598 client, err := newClient(t, address) 599 if err != nil { 600 t.Fatal(err) 601 } 602 defer client.Close() 603 604 var ( 605 image Image 606 ctx, cancel = testContext(t) 607 id = t.Name() 608 ) 609 defer cancel() 610 611 image, err = client.GetImage(ctx, testImage) 612 if err != nil { 613 t.Fatal(err) 614 } 615 616 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withCat())) 617 if err != nil { 618 t.Fatal(err) 619 } 620 defer container.Delete(ctx, WithSnapshotCleanup) 621 622 expected := "hello" + newLine 623 624 direct, err := newDirectIO(ctx, false) 625 if err != nil { 626 t.Fatal(err) 627 } 628 defer direct.Delete() 629 var ( 630 wg sync.WaitGroup 631 buf = bytes.NewBuffer(nil) 632 ) 633 wg.Add(1) 634 go func() { 635 defer wg.Done() 636 io.Copy(buf, direct.Stdout) 637 }() 638 639 task, err := container.NewTask(ctx, direct.IOCreate) 640 if err != nil { 641 t.Fatal(err) 642 } 643 defer task.Delete(ctx) 644 645 status, err := task.Wait(ctx) 646 if err != nil { 647 t.Error(err) 648 } 649 650 if err := task.Start(ctx); err != nil { 651 t.Fatal(err) 652 } 653 654 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 655 t.Error(err) 656 } 657 658 // load the container and re-load the task 659 if container, err = client.LoadContainer(ctx, id); err != nil { 660 t.Fatal(err) 661 } 662 663 if task, err = container.Task(ctx, direct.IOAttach); err != nil { 664 t.Fatal(err) 665 } 666 667 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 668 t.Error(err) 669 } 670 671 direct.Stdin.Close() 672 673 if err := task.CloseIO(ctx, WithStdinCloser); err != nil { 674 t.Error(err) 675 } 676 677 <-status 678 679 wg.Wait() 680 if _, err := task.Delete(ctx); err != nil { 681 t.Error(err) 682 } 683 684 output := buf.String() 685 686 // we wrote the same thing after attach 687 expected = expected + expected 688 if output != expected { 689 t.Errorf("expected output %q but received %q", expected, output) 690 } 691 } 692 693 func newDirectIO(ctx context.Context, terminal bool) (*directIO, error) { 694 fifos, err := cio.NewFIFOSetInDir("", "", terminal) 695 if err != nil { 696 return nil, err 697 } 698 dio, err := cio.NewDirectIO(ctx, fifos) 699 if err != nil { 700 return nil, err 701 } 702 return &directIO{DirectIO: *dio}, nil 703 } 704 705 type directIO struct { 706 cio.DirectIO 707 } 708 709 // ioCreate returns IO available for use with task creation 710 func (f *directIO) IOCreate(id string) (cio.IO, error) { 711 return f, nil 712 } 713 714 // ioAttach returns IO available for use with task attachment 715 func (f *directIO) IOAttach(set *cio.FIFOSet) (cio.IO, error) { 716 return f, nil 717 } 718 719 func (f *directIO) Cancel() { 720 // nothing to cancel as all operations are handled externally 721 } 722 723 // Close closes all open fds 724 func (f *directIO) Close() error { 725 err := f.Stdin.Close() 726 if f.Stdout != nil { 727 if err2 := f.Stdout.Close(); err == nil { 728 err = err2 729 } 730 } 731 if f.Stderr != nil { 732 if err2 := f.Stderr.Close(); err == nil { 733 err = err2 734 } 735 } 736 return err 737 } 738 739 // Delete removes the underlying directory containing fifos 740 func (f *directIO) Delete() error { 741 return f.DirectIO.Close() 742 } 743 744 func TestContainerUsername(t *testing.T) { 745 t.Parallel() 746 747 client, err := newClient(t, address) 748 if err != nil { 749 t.Fatal(err) 750 } 751 defer client.Close() 752 753 var ( 754 image Image 755 ctx, cancel = testContext(t) 756 id = t.Name() 757 ) 758 defer cancel() 759 760 image, err = client.GetImage(ctx, testImage) 761 if err != nil { 762 t.Fatal(err) 763 } 764 direct, err := newDirectIO(ctx, false) 765 if err != nil { 766 t.Fatal(err) 767 } 768 defer direct.Delete() 769 var ( 770 wg sync.WaitGroup 771 buf = bytes.NewBuffer(nil) 772 ) 773 wg.Add(1) 774 go func() { 775 defer wg.Done() 776 io.Copy(buf, direct.Stdout) 777 }() 778 779 // squid user in the alpine image has a uid of 31 780 container, err := client.NewContainer(ctx, id, 781 WithNewSnapshot(id, image), 782 WithNewSpec(oci.WithImageConfig(image), oci.WithUsername("squid"), oci.WithProcessArgs("id", "-u")), 783 ) 784 if err != nil { 785 t.Fatal(err) 786 } 787 defer container.Delete(ctx, WithSnapshotCleanup) 788 789 task, err := container.NewTask(ctx, direct.IOCreate) 790 if err != nil { 791 t.Fatal(err) 792 } 793 defer task.Delete(ctx) 794 795 statusC, err := task.Wait(ctx) 796 if err != nil { 797 t.Fatal(err) 798 } 799 800 if err := task.Start(ctx); err != nil { 801 t.Fatal(err) 802 } 803 <-statusC 804 805 wg.Wait() 806 807 output := strings.TrimSuffix(buf.String(), "\n") 808 if output != "31" { 809 t.Errorf("expected squid uid to be 31 but received %q", output) 810 } 811 } 812 813 func TestContainerUser(t *testing.T) { 814 t.Parallel() 815 t.Run("UserNameAndGroupName", func(t *testing.T) { testContainerUser(t, "squid:squid", "31:31") }) 816 t.Run("UserIDAndGroupName", func(t *testing.T) { testContainerUser(t, "1001:squid", "1001:31") }) 817 t.Run("UserNameAndGroupID", func(t *testing.T) { testContainerUser(t, "squid:1002", "31:1002") }) 818 t.Run("UserIDAndGroupID", func(t *testing.T) { testContainerUser(t, "1001:1002", "1001:1002") }) 819 } 820 821 func testContainerUser(t *testing.T, userstr, expectedOutput string) { 822 client, err := newClient(t, address) 823 if err != nil { 824 t.Fatal(err) 825 } 826 defer client.Close() 827 828 var ( 829 image Image 830 ctx, cancel = testContext(t) 831 id = strings.Replace(t.Name(), "/", "_", -1) 832 ) 833 defer cancel() 834 835 image, err = client.GetImage(ctx, testImage) 836 if err != nil { 837 t.Fatal(err) 838 } 839 direct, err := newDirectIO(ctx, false) 840 if err != nil { 841 t.Fatal(err) 842 } 843 defer direct.Delete() 844 var ( 845 wg sync.WaitGroup 846 buf = bytes.NewBuffer(nil) 847 ) 848 wg.Add(1) 849 go func() { 850 defer wg.Done() 851 io.Copy(buf, direct.Stdout) 852 }() 853 854 container, err := client.NewContainer(ctx, id, 855 WithNewSnapshot(id, image), 856 WithNewSpec(oci.WithImageConfig(image), oci.WithUser(userstr), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")), 857 ) 858 if err != nil { 859 t.Fatal(err) 860 } 861 defer container.Delete(ctx, WithSnapshotCleanup) 862 863 task, err := container.NewTask(ctx, direct.IOCreate) 864 if err != nil { 865 t.Fatal(err) 866 } 867 defer task.Delete(ctx) 868 869 statusC, err := task.Wait(ctx) 870 if err != nil { 871 t.Fatal(err) 872 } 873 874 if err := task.Start(ctx); err != nil { 875 t.Fatal(err) 876 } 877 <-statusC 878 879 wg.Wait() 880 881 output := strings.TrimSuffix(buf.String(), "\n") 882 if output != expectedOutput { 883 t.Errorf("expected uid:gid to be %q, but received %q", expectedOutput, output) 884 } 885 } 886 887 func TestContainerAttachProcess(t *testing.T) { 888 t.Parallel() 889 890 if runtime.GOOS == "windows" { 891 // On windows, closing the write side of the pipe closes the read 892 // side, sending an EOF to it and preventing reopening it. 893 // Hence this test will always fails on windows 894 t.Skip("invalid logic on windows") 895 } 896 897 client, err := newClient(t, address) 898 if err != nil { 899 t.Fatal(err) 900 } 901 defer client.Close() 902 903 var ( 904 image Image 905 ctx, cancel = testContext(t) 906 id = t.Name() 907 ) 908 defer cancel() 909 910 image, err = client.GetImage(ctx, testImage) 911 if err != nil { 912 t.Fatal(err) 913 } 914 915 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "100"))) 916 if err != nil { 917 t.Fatal(err) 918 } 919 defer container.Delete(ctx, WithSnapshotCleanup) 920 921 expected := "hello" + newLine 922 923 // creating IO early for easy resource cleanup 924 direct, err := newDirectIO(ctx, false) 925 if err != nil { 926 t.Fatal(err) 927 } 928 defer direct.Delete() 929 var ( 930 wg sync.WaitGroup 931 buf = bytes.NewBuffer(nil) 932 ) 933 wg.Add(1) 934 go func() { 935 defer wg.Done() 936 io.Copy(buf, direct.Stdout) 937 }() 938 939 task, err := container.NewTask(ctx, empty()) 940 if err != nil { 941 t.Fatal(err) 942 } 943 defer task.Delete(ctx) 944 945 status, err := task.Wait(ctx) 946 if err != nil { 947 t.Error(err) 948 } 949 950 if err := task.Start(ctx); err != nil { 951 t.Fatal(err) 952 } 953 954 spec, err := container.Spec(ctx) 955 if err != nil { 956 t.Fatal(err) 957 } 958 959 processSpec := spec.Process 960 processSpec.Args = []string{"cat"} 961 execID := t.Name() + "_exec" 962 process, err := task.Exec(ctx, execID, processSpec, direct.IOCreate) 963 if err != nil { 964 t.Fatal(err) 965 } 966 processStatusC, err := process.Wait(ctx) 967 if err != nil { 968 t.Fatal(err) 969 } 970 971 if err := process.Start(ctx); err != nil { 972 t.Fatal(err) 973 } 974 975 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 976 t.Error(err) 977 } 978 979 if process, err = task.LoadProcess(ctx, execID, direct.IOAttach); err != nil { 980 t.Fatal(err) 981 } 982 983 if _, err := fmt.Fprint(direct.Stdin, expected); err != nil { 984 t.Error(err) 985 } 986 987 direct.Stdin.Close() 988 989 if err := process.CloseIO(ctx, WithStdinCloser); err != nil { 990 t.Error(err) 991 } 992 993 <-processStatusC 994 995 wg.Wait() 996 997 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 998 t.Error(err) 999 } 1000 1001 output := buf.String() 1002 1003 // we wrote the same thing after attach 1004 expected = expected + expected 1005 if output != expected { 1006 t.Errorf("expected output %q but received %q", expected, output) 1007 } 1008 <-status 1009 } 1010 1011 func TestContainerUserID(t *testing.T) { 1012 t.Parallel() 1013 1014 client, err := newClient(t, address) 1015 if err != nil { 1016 t.Fatal(err) 1017 } 1018 defer client.Close() 1019 1020 var ( 1021 image Image 1022 ctx, cancel = testContext(t) 1023 id = t.Name() 1024 ) 1025 defer cancel() 1026 1027 image, err = client.GetImage(ctx, testImage) 1028 if err != nil { 1029 t.Fatal(err) 1030 } 1031 direct, err := newDirectIO(ctx, false) 1032 if err != nil { 1033 t.Fatal(err) 1034 } 1035 defer direct.Delete() 1036 var ( 1037 wg sync.WaitGroup 1038 buf = bytes.NewBuffer(nil) 1039 ) 1040 wg.Add(1) 1041 go func() { 1042 defer wg.Done() 1043 io.Copy(buf, direct.Stdout) 1044 }() 1045 1046 // adm user in the alpine image has a uid of 3 and gid of 4. 1047 container, err := client.NewContainer(ctx, id, 1048 WithNewSnapshot(id, image), 1049 WithNewSpec(oci.WithImageConfig(image), oci.WithUserID(3), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")), 1050 ) 1051 if err != nil { 1052 t.Fatal(err) 1053 } 1054 defer container.Delete(ctx, WithSnapshotCleanup) 1055 1056 task, err := container.NewTask(ctx, direct.IOCreate) 1057 if err != nil { 1058 t.Fatal(err) 1059 } 1060 defer task.Delete(ctx) 1061 1062 statusC, err := task.Wait(ctx) 1063 if err != nil { 1064 t.Fatal(err) 1065 } 1066 1067 if err := task.Start(ctx); err != nil { 1068 t.Fatal(err) 1069 } 1070 <-statusC 1071 1072 wg.Wait() 1073 1074 output := strings.TrimSuffix(buf.String(), "\n") 1075 if output != "3:4" { 1076 t.Errorf("expected uid:gid to be 3:4, but received %q", output) 1077 } 1078 } 1079 1080 func TestContainerKillAll(t *testing.T) { 1081 t.Parallel() 1082 1083 client, err := newClient(t, address) 1084 if err != nil { 1085 t.Fatal(err) 1086 } 1087 defer client.Close() 1088 1089 var ( 1090 image Image 1091 ctx, cancel = testContext(t) 1092 id = t.Name() 1093 ) 1094 defer cancel() 1095 1096 image, err = client.GetImage(ctx, testImage) 1097 if err != nil { 1098 t.Fatal(err) 1099 } 1100 1101 container, err := client.NewContainer(ctx, id, 1102 WithNewSnapshot(id, image), 1103 WithNewSpec(oci.WithImageConfig(image), 1104 withProcessArgs("sh", "-c", "top"), 1105 oci.WithHostNamespace(specs.PIDNamespace), 1106 ), 1107 ) 1108 if err != nil { 1109 t.Fatal(err) 1110 } 1111 defer container.Delete(ctx, WithSnapshotCleanup) 1112 1113 stdout := bytes.NewBuffer(nil) 1114 task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) 1115 if err != nil { 1116 t.Fatal(err) 1117 } 1118 defer task.Delete(ctx) 1119 1120 statusC, err := task.Wait(ctx) 1121 if err != nil { 1122 t.Fatal(err) 1123 } 1124 1125 if err := task.Start(ctx); err != nil { 1126 t.Fatal(err) 1127 } 1128 1129 if err := task.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil { 1130 t.Error(err) 1131 } 1132 1133 <-statusC 1134 if _, err := task.Delete(ctx); err != nil { 1135 t.Fatal(err) 1136 } 1137 } 1138 1139 func TestDaemonRestartWithRunningShim(t *testing.T) { 1140 client, err := newClient(t, address) 1141 if err != nil { 1142 t.Fatal(err) 1143 } 1144 defer client.Close() 1145 1146 var ( 1147 image Image 1148 ctx, cancel = testContext(t) 1149 id = t.Name() 1150 ) 1151 defer cancel() 1152 1153 image, err = client.GetImage(ctx, testImage) 1154 if err != nil { 1155 t.Fatal(err) 1156 } 1157 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100"))) 1158 if err != nil { 1159 t.Fatal(err) 1160 } 1161 defer container.Delete(ctx, WithSnapshotCleanup) 1162 1163 task, err := container.NewTask(ctx, empty()) 1164 if err != nil { 1165 t.Fatal(err) 1166 } 1167 defer task.Delete(ctx) 1168 1169 statusC, err := task.Wait(ctx) 1170 if err != nil { 1171 t.Error(err) 1172 } 1173 1174 pid := task.Pid() 1175 if pid < 1 { 1176 t.Fatalf("invalid task pid %d", pid) 1177 } 1178 1179 if err := task.Start(ctx); err != nil { 1180 t.Fatal(err) 1181 } 1182 1183 var exitStatus ExitStatus 1184 if err := ctrd.Restart(func() { 1185 exitStatus = <-statusC 1186 }); err != nil { 1187 t.Fatal(err) 1188 } 1189 1190 if exitStatus.Error() == nil { 1191 t.Errorf(`first task.Wait() should have failed with "transport is closing"`) 1192 } 1193 1194 waitCtx, cancel := context.WithTimeout(ctx, 1*time.Second) 1195 c, err := ctrd.waitForStart(waitCtx) 1196 cancel() 1197 if err != nil { 1198 t.Fatal(err) 1199 } 1200 c.Close() 1201 1202 statusC, err = task.Wait(ctx) 1203 if err != nil { 1204 t.Error(err) 1205 } 1206 1207 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 1208 t.Fatal(err) 1209 } 1210 1211 <-statusC 1212 1213 if err := unix.Kill(int(pid), 0); err != unix.ESRCH { 1214 t.Errorf("pid %d still exists", pid) 1215 } 1216 } 1217 1218 func TestContainerRuntimeOptionsv1(t *testing.T) { 1219 t.Parallel() 1220 1221 client, err := newClient(t, address) 1222 if err != nil { 1223 t.Fatal(err) 1224 } 1225 defer client.Close() 1226 1227 var ( 1228 image Image 1229 ctx, cancel = testContext(t) 1230 id = t.Name() 1231 ) 1232 defer cancel() 1233 1234 image, err = client.GetImage(ctx, testImage) 1235 if err != nil { 1236 t.Fatal(err) 1237 } 1238 1239 container, err := client.NewContainer( 1240 ctx, id, 1241 WithNewSnapshot(id, image), 1242 WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)), 1243 WithRuntime(plugin.RuntimeLinuxV1, &runctypes.RuncOptions{Runtime: "no-runc"}), 1244 ) 1245 if err != nil { 1246 t.Fatal(err) 1247 } 1248 defer container.Delete(ctx, WithSnapshotCleanup) 1249 1250 task, err := container.NewTask(ctx, empty()) 1251 if err == nil { 1252 t.Errorf("task creation should have failed") 1253 task.Delete(ctx) 1254 return 1255 } 1256 if !strings.Contains(err.Error(), `"no-runc"`) { 1257 t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error()) 1258 } 1259 } 1260 1261 func TestContainerRuntimeOptionsv2(t *testing.T) { 1262 t.Parallel() 1263 1264 client, err := newClient(t, address) 1265 if err != nil { 1266 t.Fatal(err) 1267 } 1268 defer client.Close() 1269 1270 var ( 1271 image Image 1272 ctx, cancel = testContext(t) 1273 id = t.Name() 1274 ) 1275 defer cancel() 1276 1277 image, err = client.GetImage(ctx, testImage) 1278 if err != nil { 1279 t.Fatal(err) 1280 } 1281 1282 container, err := client.NewContainer( 1283 ctx, id, 1284 WithNewSnapshot(id, image), 1285 WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)), 1286 WithRuntime(plugin.RuntimeRuncV1, &options.Options{BinaryName: "no-runc"}), 1287 ) 1288 if err != nil { 1289 t.Fatal(err) 1290 } 1291 defer container.Delete(ctx, WithSnapshotCleanup) 1292 1293 task, err := container.NewTask(ctx, empty()) 1294 if err == nil { 1295 t.Errorf("task creation should have failed") 1296 task.Delete(ctx) 1297 return 1298 } 1299 if !strings.Contains(err.Error(), `"no-runc"`) { 1300 t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error()) 1301 } 1302 } 1303 1304 func initContainerAndCheckChildrenDieOnKill(t *testing.T, opts ...oci.SpecOpts) { 1305 client, err := newClient(t, address) 1306 if err != nil { 1307 t.Fatal(err) 1308 } 1309 defer client.Close() 1310 1311 var ( 1312 image Image 1313 ctx, cancel = testContext(t) 1314 id = t.Name() 1315 ) 1316 defer cancel() 1317 1318 image, err = client.GetImage(ctx, testImage) 1319 if err != nil { 1320 t.Fatal(err) 1321 } 1322 1323 opts = append(opts, oci.WithImageConfig(image)) 1324 opts = append(opts, withProcessArgs("sh", "-c", "sleep 42; echo hi")) 1325 1326 container, err := client.NewContainer(ctx, id, 1327 WithNewSnapshot(id, image), 1328 WithNewSpec(opts...), 1329 ) 1330 if err != nil { 1331 t.Fatal(err) 1332 } 1333 defer container.Delete(ctx, WithSnapshotCleanup) 1334 1335 stdout := bytes.NewBuffer(nil) 1336 task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout))) 1337 if err != nil { 1338 t.Fatal(err) 1339 } 1340 defer task.Delete(ctx) 1341 1342 statusC, err := task.Wait(ctx) 1343 if err != nil { 1344 t.Fatal(err) 1345 } 1346 1347 if err := task.Start(ctx); err != nil { 1348 t.Fatal(err) 1349 } 1350 1351 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 1352 t.Error(err) 1353 } 1354 1355 // Give the shim time to reap the init process and kill the orphans 1356 select { 1357 case <-statusC: 1358 case <-time.After(100 * time.Millisecond): 1359 } 1360 1361 b, err := exec.Command("ps", "ax").CombinedOutput() 1362 if err != nil { 1363 t.Fatal(err) 1364 } 1365 1366 if strings.Contains(string(b), "sleep 42") { 1367 t.Fatalf("killing init didn't kill all its children:\n%v", string(b)) 1368 } 1369 1370 if _, err := task.Delete(ctx, WithProcessKill); err != nil { 1371 t.Error(err) 1372 } 1373 } 1374 1375 func TestContainerKillInitPidHost(t *testing.T) { 1376 initContainerAndCheckChildrenDieOnKill(t, oci.WithHostNamespace(specs.PIDNamespace)) 1377 } 1378 1379 func TestContainerKillInitKillsChildWhenNotHostPid(t *testing.T) { 1380 initContainerAndCheckChildrenDieOnKill(t) 1381 } 1382 1383 func TestUserNamespaces(t *testing.T) { 1384 t.Parallel() 1385 t.Run("WritableRootFS", func(t *testing.T) { testUserNamespaces(t, false) }) 1386 // see #1373 and runc#1572 1387 t.Run("ReadonlyRootFS", func(t *testing.T) { testUserNamespaces(t, true) }) 1388 } 1389 1390 func checkUserNS(t *testing.T) { 1391 cmd := exec.Command("true") 1392 cmd.SysProcAttr = &syscall.SysProcAttr{ 1393 Cloneflags: syscall.CLONE_NEWUSER, 1394 } 1395 1396 if err := cmd.Run(); err != nil { 1397 t.Skip("User namespaces are unavailable") 1398 } 1399 } 1400 1401 func testUserNamespaces(t *testing.T, readonlyRootFS bool) { 1402 checkUserNS(t) 1403 1404 client, err := newClient(t, address) 1405 if err != nil { 1406 t.Fatal(err) 1407 } 1408 defer client.Close() 1409 1410 var ( 1411 image Image 1412 ctx, cancel = testContext(t) 1413 id = strings.Replace(t.Name(), "/", "-", -1) 1414 ) 1415 defer cancel() 1416 1417 image, err = client.GetImage(ctx, testImage) 1418 if err != nil { 1419 t.Fatal(err) 1420 } 1421 1422 opts := []NewContainerOpts{WithNewSpec(oci.WithImageConfig(image), 1423 withExitStatus(7), 1424 oci.WithUserNamespace([]specs.LinuxIDMapping{ 1425 { 1426 ContainerID: 0, 1427 HostID: 1000, 1428 Size: 10000, 1429 }, 1430 }, []specs.LinuxIDMapping{ 1431 { 1432 ContainerID: 0, 1433 HostID: 2000, 1434 Size: 10000, 1435 }, 1436 }), 1437 )} 1438 if readonlyRootFS { 1439 opts = append([]NewContainerOpts{WithRemappedSnapshotView(id, image, 1000, 2000)}, opts...) 1440 } else { 1441 opts = append([]NewContainerOpts{WithRemappedSnapshot(id, image, 1000, 2000)}, opts...) 1442 } 1443 1444 container, err := client.NewContainer(ctx, id, opts...) 1445 if err != nil { 1446 t.Fatal(err) 1447 } 1448 defer container.Delete(ctx, WithSnapshotCleanup) 1449 1450 var copts interface{} 1451 if CheckRuntime(client.runtime, "io.containerd.runc") { 1452 copts = &options.Options{ 1453 IoUid: 1000, 1454 IoGid: 2000, 1455 } 1456 } else { 1457 copts = &runctypes.CreateOptions{ 1458 IoUid: 1000, 1459 IoGid: 2000, 1460 } 1461 } 1462 1463 task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStdio), func(_ context.Context, client *Client, r *TaskInfo) error { 1464 r.Options = copts 1465 return nil 1466 }) 1467 if err != nil { 1468 t.Fatal(err) 1469 } 1470 defer task.Delete(ctx) 1471 1472 statusC, err := task.Wait(ctx) 1473 if err != nil { 1474 t.Fatal(err) 1475 } 1476 1477 if pid := task.Pid(); pid < 1 { 1478 t.Errorf("invalid task pid %d", pid) 1479 } 1480 if err := task.Start(ctx); err != nil { 1481 t.Error(err) 1482 task.Delete(ctx) 1483 return 1484 } 1485 status := <-statusC 1486 code, _, err := status.Result() 1487 if err != nil { 1488 t.Fatal(err) 1489 } 1490 if code != 7 { 1491 t.Errorf("expected status 7 from wait but received %d", code) 1492 } 1493 deleteStatus, err := task.Delete(ctx) 1494 if err != nil { 1495 t.Fatal(err) 1496 } 1497 if ec := deleteStatus.ExitCode(); ec != 7 { 1498 t.Errorf("expected status 7 from delete but received %d", ec) 1499 } 1500 } 1501 1502 func TestTaskResize(t *testing.T) { 1503 t.Parallel() 1504 1505 client, err := newClient(t, address) 1506 if err != nil { 1507 t.Fatal(err) 1508 } 1509 defer client.Close() 1510 1511 var ( 1512 image Image 1513 ctx, cancel = testContext(t) 1514 id = t.Name() 1515 ) 1516 defer cancel() 1517 1518 image, err = client.GetImage(ctx, testImage) 1519 if err != nil { 1520 t.Fatal(err) 1521 } 1522 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7))) 1523 if err != nil { 1524 t.Fatal(err) 1525 } 1526 defer container.Delete(ctx, WithSnapshotCleanup) 1527 1528 task, err := container.NewTask(ctx, empty()) 1529 if err != nil { 1530 t.Fatal(err) 1531 } 1532 defer task.Delete(ctx) 1533 1534 statusC, err := task.Wait(ctx) 1535 if err != nil { 1536 t.Fatal(err) 1537 } 1538 if err := task.Resize(ctx, 32, 32); err != nil { 1539 t.Fatal(err) 1540 } 1541 task.Kill(ctx, syscall.SIGKILL) 1542 <-statusC 1543 } 1544 1545 func TestContainerImage(t *testing.T) { 1546 t.Parallel() 1547 1548 ctx, cancel := testContext(t) 1549 defer cancel() 1550 id := t.Name() 1551 1552 client, err := newClient(t, address) 1553 if err != nil { 1554 t.Fatal(err) 1555 } 1556 defer client.Close() 1557 1558 image, err := client.GetImage(ctx, testImage) 1559 if err != nil { 1560 t.Fatal(err) 1561 } 1562 1563 container, err := client.NewContainer(ctx, id, WithNewSpec(), WithImage(image)) 1564 if err != nil { 1565 t.Fatal(err) 1566 } 1567 defer container.Delete(ctx) 1568 1569 i, err := container.Image(ctx) 1570 if err != nil { 1571 t.Fatal(err) 1572 } 1573 if i.Name() != image.Name() { 1574 t.Fatalf("expected container image name %s but received %s", image.Name(), i.Name()) 1575 } 1576 } 1577 1578 func TestContainerNoImage(t *testing.T) { 1579 t.Parallel() 1580 1581 ctx, cancel := testContext(t) 1582 defer cancel() 1583 id := t.Name() 1584 1585 client, err := newClient(t, address) 1586 if err != nil { 1587 t.Fatal(err) 1588 } 1589 defer client.Close() 1590 1591 container, err := client.NewContainer(ctx, id, WithNewSpec()) 1592 if err != nil { 1593 t.Fatal(err) 1594 } 1595 defer container.Delete(ctx) 1596 1597 _, err = container.Image(ctx) 1598 if err == nil { 1599 t.Fatal("error should not be nil when container is created without an image") 1600 } 1601 if !errdefs.IsNotFound(err) { 1602 t.Fatalf("expected error to be %s but received %s", errdefs.ErrNotFound, err) 1603 } 1604 } 1605 1606 func TestUIDNoGID(t *testing.T) { 1607 t.Parallel() 1608 1609 ctx, cancel := testContext(t) 1610 defer cancel() 1611 id := t.Name() 1612 1613 client, err := newClient(t, address) 1614 if err != nil { 1615 t.Fatal(err) 1616 } 1617 defer client.Close() 1618 image, err := client.GetImage(ctx, testImage) 1619 if err != nil { 1620 t.Fatal(err) 1621 } 1622 1623 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithUserID(1000))) 1624 if err != nil { 1625 t.Fatal(err) 1626 } 1627 defer container.Delete(ctx) 1628 1629 spec, err := container.Spec(ctx) 1630 if err != nil { 1631 t.Fatal(err) 1632 } 1633 if uid := spec.Process.User.UID; uid != 1000 { 1634 t.Fatalf("expected uid 1000 but received %d", uid) 1635 } 1636 if gid := spec.Process.User.GID; gid != 0 { 1637 t.Fatalf("expected gid 0 but received %d", gid) 1638 } 1639 } 1640 1641 func TestBindLowPortNonRoot(t *testing.T) { 1642 t.Parallel() 1643 1644 client, err := newClient(t, address) 1645 if err != nil { 1646 t.Fatal(err) 1647 } 1648 defer client.Close() 1649 1650 var ( 1651 image Image 1652 ctx, cancel = testContext(t) 1653 id = t.Name() 1654 ) 1655 defer cancel() 1656 1657 image, err = client.GetImage(ctx, testImage) 1658 if err != nil { 1659 t.Fatal(err) 1660 } 1661 container, err := client.NewContainer(ctx, id, 1662 WithNewSnapshot(id, image), 1663 WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000)), 1664 ) 1665 if err != nil { 1666 t.Fatal(err) 1667 } 1668 defer container.Delete(ctx, WithSnapshotCleanup) 1669 1670 task, err := container.NewTask(ctx, empty()) 1671 if err != nil { 1672 t.Fatal(err) 1673 } 1674 defer task.Delete(ctx) 1675 1676 statusC, err := task.Wait(ctx) 1677 if err != nil { 1678 t.Fatal(err) 1679 } 1680 1681 if err := task.Start(ctx); err != nil { 1682 t.Fatal(err) 1683 } 1684 status := <-statusC 1685 code, _, err := status.Result() 1686 if err != nil { 1687 t.Fatal(err) 1688 } 1689 if code != 1 { 1690 t.Errorf("expected status 1 from wait but received %d", code) 1691 } 1692 if _, err := task.Delete(ctx); err != nil { 1693 t.Fatal(err) 1694 } 1695 } 1696 1697 func TestBindLowPortNonOpt(t *testing.T) { 1698 t.Parallel() 1699 1700 client, err := newClient(t, address) 1701 if err != nil { 1702 t.Fatal(err) 1703 } 1704 defer client.Close() 1705 1706 var ( 1707 image Image 1708 ctx, cancel = testContext(t) 1709 id = t.Name() 1710 ) 1711 defer cancel() 1712 1713 image, err = client.GetImage(ctx, testImage) 1714 if err != nil { 1715 t.Fatal(err) 1716 } 1717 container, err := client.NewContainer(ctx, id, 1718 WithNewSnapshot(id, image), 1719 WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000), oci.WithAmbientCapabilities([]string{"CAP_NET_BIND_SERVICE"})), 1720 ) 1721 if err != nil { 1722 t.Fatal(err) 1723 } 1724 defer container.Delete(ctx, WithSnapshotCleanup) 1725 1726 task, err := container.NewTask(ctx, empty()) 1727 if err != nil { 1728 t.Fatal(err) 1729 } 1730 defer task.Delete(ctx) 1731 1732 statusC, err := task.Wait(ctx) 1733 if err != nil { 1734 t.Fatal(err) 1735 } 1736 1737 if err := task.Start(ctx); err != nil { 1738 t.Fatal(err) 1739 } 1740 go func() { 1741 time.Sleep(2 * time.Second) 1742 task.Kill(ctx, unix.SIGTERM) 1743 }() 1744 status := <-statusC 1745 code, _, err := status.Result() 1746 if err != nil { 1747 t.Fatal(err) 1748 } 1749 // 128 + sigterm 1750 if code != 143 { 1751 t.Errorf("expected status 143 from wait but received %d", code) 1752 } 1753 if _, err := task.Delete(ctx); err != nil { 1754 t.Fatal(err) 1755 } 1756 } 1757 1758 func TestContainerNoSTDIN(t *testing.T) { 1759 t.Parallel() 1760 1761 client, err := newClient(t, address) 1762 if err != nil { 1763 t.Fatal(err) 1764 } 1765 defer client.Close() 1766 1767 var ( 1768 image Image 1769 ctx, cancel = testContext(t) 1770 id = t.Name() 1771 ) 1772 defer cancel() 1773 1774 image, err = client.GetImage(ctx, testImage) 1775 if err != nil { 1776 t.Fatal(err) 1777 } 1778 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(0))) 1779 if err != nil { 1780 t.Fatal(err) 1781 } 1782 defer container.Delete(ctx, WithSnapshotCleanup) 1783 1784 task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(nil, ioutil.Discard, ioutil.Discard))) 1785 if err != nil { 1786 t.Fatal(err) 1787 } 1788 defer task.Delete(ctx) 1789 1790 statusC, err := task.Wait(ctx) 1791 if err != nil { 1792 t.Fatal(err) 1793 } 1794 if err := task.Start(ctx); err != nil { 1795 t.Fatal(err) 1796 } 1797 status := <-statusC 1798 code, _, err := status.Result() 1799 if err != nil { 1800 t.Fatal(err) 1801 } 1802 if code != 0 { 1803 t.Errorf("expected status 0 from wait but received %d", code) 1804 } 1805 } 1806 1807 func TestShimOOMScore(t *testing.T) { 1808 containerdPid := ctrd.cmd.Process.Pid 1809 containerdScore, err := sys.GetOOMScoreAdj(containerdPid) 1810 if err != nil { 1811 t.Fatal(err) 1812 } 1813 1814 client, err := newClient(t, address) 1815 if err != nil { 1816 t.Fatal(err) 1817 } 1818 defer client.Close() 1819 1820 var ( 1821 image Image 1822 ctx, cancel = testContext(t) 1823 id = t.Name() 1824 ) 1825 defer cancel() 1826 1827 path := "/containerd/oomshim" 1828 var ( 1829 cg cgroups.Cgroup 1830 cg2 *cgroupsv2.Manager 1831 ) 1832 if cgroups.Mode() == cgroups.Unified { 1833 cg2, err = cgroupsv2.NewManager("/sys/fs/cgroup", path, &cgroupsv2.Resources{}) 1834 if err != nil { 1835 t.Fatal(err) 1836 } 1837 defer cg2.Delete() 1838 } else { 1839 cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{}) 1840 if err != nil { 1841 t.Fatal(err) 1842 } 1843 defer cg.Delete() 1844 } 1845 1846 image, err = client.GetImage(ctx, testImage) 1847 if err != nil { 1848 t.Fatal(err) 1849 } 1850 1851 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"))) 1852 if err != nil { 1853 t.Fatal(err) 1854 } 1855 defer container.Delete(ctx, WithSnapshotCleanup) 1856 1857 task, err := container.NewTask(ctx, empty(), WithShimCgroup(path)) 1858 if err != nil { 1859 t.Fatal(err) 1860 } 1861 defer task.Delete(ctx) 1862 1863 statusC, err := task.Wait(ctx) 1864 if err != nil { 1865 t.Fatal(err) 1866 } 1867 1868 expectedScore := containerdScore + 1 1869 // find the shim's pid 1870 if cgroups.Mode() == cgroups.Unified { 1871 processes, err := cg2.Procs(false) 1872 if err != nil { 1873 t.Fatal(err) 1874 } 1875 for _, pid := range processes { 1876 score, err := sys.GetOOMScoreAdj(int(pid)) 1877 if err != nil { 1878 t.Fatal(err) 1879 } 1880 if score != expectedScore { 1881 t.Errorf("expected score %d but got %d for shim process", expectedScore, score) 1882 } 1883 } 1884 } else { 1885 processes, err := cg.Processes(cgroups.Devices, false) 1886 if err != nil { 1887 t.Fatal(err) 1888 } 1889 for _, p := range processes { 1890 score, err := sys.GetOOMScoreAdj(p.Pid) 1891 if err != nil { 1892 t.Fatal(err) 1893 } 1894 if score != expectedScore { 1895 t.Errorf("expected score %d but got %d for shim process", expectedScore, score) 1896 } 1897 } 1898 } 1899 1900 if err := task.Kill(ctx, unix.SIGKILL); err != nil { 1901 t.Fatal(err) 1902 } 1903 1904 <-statusC 1905 } 1906 1907 func TestTaskSpec(t *testing.T) { 1908 t.Parallel() 1909 1910 client, err := newClient(t, address) 1911 if err != nil { 1912 t.Fatal(err) 1913 } 1914 defer client.Close() 1915 1916 var ( 1917 image Image 1918 ctx, cancel = testContext(t) 1919 id = t.Name() 1920 ) 1921 defer cancel() 1922 1923 image, err = client.GetImage(ctx, testImage) 1924 if err != nil { 1925 t.Fatal(err) 1926 } 1927 1928 container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), longCommand)) 1929 if err != nil { 1930 t.Fatal(err) 1931 } 1932 defer container.Delete(ctx, WithSnapshotCleanup) 1933 1934 task, err := container.NewTask(ctx, empty()) 1935 if err != nil { 1936 t.Fatal(err) 1937 } 1938 defer task.Delete(ctx) 1939 1940 statusC, err := task.Wait(ctx) 1941 if err != nil { 1942 t.Fatal(err) 1943 } 1944 1945 spec, err := task.Spec(ctx) 1946 if err != nil { 1947 t.Fatal(err) 1948 } 1949 if spec == nil { 1950 t.Fatal("spec from task is nil") 1951 } 1952 direct, err := newDirectIO(ctx, false) 1953 if err != nil { 1954 t.Fatal(err) 1955 } 1956 defer direct.Delete() 1957 1958 lt, err := container.Task(ctx, direct.IOAttach) 1959 if err != nil { 1960 t.Fatal(err) 1961 } 1962 1963 spec, err = lt.Spec(ctx) 1964 if err != nil { 1965 t.Fatal(err) 1966 } 1967 if spec == nil { 1968 t.Fatal("spec from loaded task is nil") 1969 } 1970 1971 if err := task.Kill(ctx, syscall.SIGKILL); err != nil { 1972 t.Fatal(err) 1973 } 1974 <-statusC 1975 }