github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/drivers/shared/executor/executor_linux_test.go (about) 1 package executor 2 3 import ( 4 "context" 5 "fmt" 6 "io/ioutil" 7 "os" 8 "path/filepath" 9 "regexp" 10 "strconv" 11 "strings" 12 "testing" 13 "time" 14 15 "github.com/hashicorp/nomad/ci" 16 "github.com/hashicorp/nomad/client/allocdir" 17 "github.com/hashicorp/nomad/client/lib/cgutil" 18 "github.com/hashicorp/nomad/client/taskenv" 19 "github.com/hashicorp/nomad/client/testutil" 20 "github.com/hashicorp/nomad/drivers/shared/capabilities" 21 "github.com/hashicorp/nomad/helper/testlog" 22 "github.com/hashicorp/nomad/nomad/mock" 23 "github.com/hashicorp/nomad/plugins/drivers" 24 tu "github.com/hashicorp/nomad/testutil" 25 "github.com/opencontainers/runc/libcontainer/cgroups" 26 lconfigs "github.com/opencontainers/runc/libcontainer/configs" 27 "github.com/opencontainers/runc/libcontainer/devices" 28 "github.com/shoenig/test" 29 "github.com/shoenig/test/must" 30 "github.com/stretchr/testify/require" 31 "golang.org/x/sys/unix" 32 ) 33 34 func init() { 35 executorFactories["LibcontainerExecutor"] = libcontainerFactory 36 } 37 38 var libcontainerFactory = executorFactory{ 39 new: NewExecutorWithIsolation, 40 configureExecCmd: func(t *testing.T, cmd *ExecCommand) { 41 cmd.ResourceLimits = true 42 setupRootfs(t, cmd.TaskDir) 43 }, 44 } 45 46 // testExecutorContextWithChroot returns an ExecutorContext and AllocDir with 47 // chroot. Use testExecutorContext if you don't need a chroot. 48 // 49 // The caller is responsible for calling AllocDir.Destroy() to cleanup. 50 func testExecutorCommandWithChroot(t *testing.T) *testExecCmd { 51 chrootEnv := map[string]string{ 52 "/etc/ld.so.cache": "/etc/ld.so.cache", 53 "/etc/ld.so.conf": "/etc/ld.so.conf", 54 "/etc/ld.so.conf.d": "/etc/ld.so.conf.d", 55 "/etc/passwd": "/etc/passwd", 56 "/lib": "/lib", 57 "/lib64": "/lib64", 58 "/usr/lib": "/usr/lib", 59 "/bin/ls": "/bin/ls", 60 "/bin/cat": "/bin/cat", 61 "/bin/echo": "/bin/echo", 62 "/bin/bash": "/bin/bash", 63 "/bin/sleep": "/bin/sleep", 64 "/foobar": "/does/not/exist", 65 } 66 67 alloc := mock.Alloc() 68 task := alloc.Job.TaskGroups[0].Tasks[0] 69 taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").Build() 70 71 allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), os.TempDir(), alloc.ID) 72 if err := allocDir.Build(); err != nil { 73 t.Fatalf("AllocDir.Build() failed: %v", err) 74 } 75 if err := allocDir.NewTaskDir(task.Name).Build(true, chrootEnv); err != nil { 76 allocDir.Destroy() 77 t.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err) 78 } 79 td := allocDir.TaskDirs[task.Name] 80 cmd := &ExecCommand{ 81 Env: taskEnv.List(), 82 TaskDir: td.Dir, 83 Resources: &drivers.Resources{ 84 NomadResources: alloc.AllocatedResources.Tasks[task.Name], 85 }, 86 } 87 88 if cgutil.UseV2 { 89 cmd.Resources.LinuxResources = &drivers.LinuxResources{ 90 CpusetCgroupPath: filepath.Join(cgutil.CgroupRoot, "testing.scope", cgutil.CgroupScope(alloc.ID, task.Name)), 91 } 92 } 93 94 testCmd := &testExecCmd{ 95 command: cmd, 96 allocDir: allocDir, 97 } 98 configureTLogging(t, testCmd) 99 return testCmd 100 } 101 102 func TestExecutor_configureNamespaces(t *testing.T) { 103 ci.Parallel(t) 104 t.Run("host host", func(t *testing.T) { 105 require.Equal(t, lconfigs.Namespaces{ 106 {Type: lconfigs.NEWNS}, 107 }, configureNamespaces("host", "host")) 108 }) 109 110 t.Run("host private", func(t *testing.T) { 111 require.Equal(t, lconfigs.Namespaces{ 112 {Type: lconfigs.NEWNS}, 113 {Type: lconfigs.NEWIPC}, 114 }, configureNamespaces("host", "private")) 115 }) 116 117 t.Run("private host", func(t *testing.T) { 118 require.Equal(t, lconfigs.Namespaces{ 119 {Type: lconfigs.NEWNS}, 120 {Type: lconfigs.NEWPID}, 121 }, configureNamespaces("private", "host")) 122 }) 123 124 t.Run("private private", func(t *testing.T) { 125 require.Equal(t, lconfigs.Namespaces{ 126 {Type: lconfigs.NEWNS}, 127 {Type: lconfigs.NEWPID}, 128 {Type: lconfigs.NEWIPC}, 129 }, configureNamespaces("private", "private")) 130 }) 131 } 132 133 func TestExecutor_Isolation_PID_and_IPC_hostMode(t *testing.T) { 134 ci.Parallel(t) 135 r := require.New(t) 136 testutil.ExecCompatible(t) 137 138 testExecCmd := testExecutorCommandWithChroot(t) 139 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 140 execCmd.Cmd = "/bin/ls" 141 execCmd.Args = []string{"-F", "/", "/etc/"} 142 defer allocDir.Destroy() 143 144 execCmd.ResourceLimits = true 145 execCmd.ModePID = "host" // disable PID namespace 146 execCmd.ModeIPC = "host" // disable IPC namespace 147 148 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 149 defer executor.Shutdown("SIGKILL", 0) 150 151 ps, err := executor.Launch(execCmd) 152 r.NoError(err) 153 r.NotZero(ps.Pid) 154 155 estate, err := executor.Wait(context.Background()) 156 r.NoError(err) 157 r.Zero(estate.ExitCode) 158 159 lexec, ok := executor.(*LibcontainerExecutor) 160 r.True(ok) 161 162 // Check that namespaces were applied to the container config 163 config := lexec.container.Config() 164 165 r.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWNS}) 166 r.NotContains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWPID}) 167 r.NotContains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWIPC}) 168 169 // Shut down executor 170 r.NoError(executor.Shutdown("", 0)) 171 executor.Wait(context.Background()) 172 } 173 174 func TestExecutor_IsolationAndConstraints(t *testing.T) { 175 ci.Parallel(t) 176 testutil.ExecCompatible(t) 177 testutil.CgroupsCompatibleV1(t) // todo(shoenig): hard codes cgroups v1 lookup 178 179 r := require.New(t) 180 181 testExecCmd := testExecutorCommandWithChroot(t) 182 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 183 execCmd.Cmd = "/bin/ls" 184 execCmd.Args = []string{"-F", "/", "/etc/"} 185 defer allocDir.Destroy() 186 187 execCmd.ResourceLimits = true 188 execCmd.ModePID = "private" 189 execCmd.ModeIPC = "private" 190 191 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 192 defer executor.Shutdown("SIGKILL", 0) 193 194 ps, err := executor.Launch(execCmd) 195 r.NoError(err) 196 r.NotZero(ps.Pid) 197 198 estate, err := executor.Wait(context.Background()) 199 r.NoError(err) 200 r.Zero(estate.ExitCode) 201 202 lexec, ok := executor.(*LibcontainerExecutor) 203 r.True(ok) 204 205 // Check if the resource constraints were applied 206 state, err := lexec.container.State() 207 r.NoError(err) 208 209 memLimits := filepath.Join(state.CgroupPaths["memory"], "memory.limit_in_bytes") 210 data, err := ioutil.ReadFile(memLimits) 211 r.NoError(err) 212 213 expectedMemLim := strconv.Itoa(int(execCmd.Resources.NomadResources.Memory.MemoryMB * 1024 * 1024)) 214 actualMemLim := strings.TrimSpace(string(data)) 215 r.Equal(actualMemLim, expectedMemLim) 216 217 // Check that namespaces were applied to the container config 218 config := lexec.container.Config() 219 220 r.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWNS}) 221 r.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWPID}) 222 r.Contains(config.Namespaces, lconfigs.Namespace{Type: lconfigs.NEWIPC}) 223 224 // Shut down executor 225 r.NoError(executor.Shutdown("", 0)) 226 executor.Wait(context.Background()) 227 228 // Check if Nomad has actually removed the cgroups 229 tu.WaitForResult(func() (bool, error) { 230 _, err = os.Stat(memLimits) 231 if err == nil { 232 return false, fmt.Errorf("expected an error from os.Stat %s", memLimits) 233 } 234 return true, nil 235 }, func(err error) { t.Error(err) }) 236 237 expected := `/: 238 alloc/ 239 bin/ 240 dev/ 241 etc/ 242 lib/ 243 lib64/ 244 local/ 245 proc/ 246 secrets/ 247 sys/ 248 tmp/ 249 usr/ 250 251 /etc/: 252 ld.so.cache 253 ld.so.conf 254 ld.so.conf.d/ 255 passwd` 256 tu.WaitForResult(func() (bool, error) { 257 output := testExecCmd.stdout.String() 258 act := strings.TrimSpace(string(output)) 259 if act != expected { 260 return false, fmt.Errorf("Command output incorrectly: want %v; got %v", expected, act) 261 } 262 return true, nil 263 }, func(err error) { t.Error(err) }) 264 } 265 266 // TestExecutor_CgroupPaths asserts that process starts with independent cgroups 267 // hierarchy created for this process 268 func TestExecutor_CgroupPaths(t *testing.T) { 269 ci.Parallel(t) 270 testutil.ExecCompatible(t) 271 272 require := require.New(t) 273 274 testExecCmd := testExecutorCommandWithChroot(t) 275 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 276 execCmd.Cmd = "/bin/bash" 277 execCmd.Args = []string{"-c", "sleep 0.2; cat /proc/self/cgroup"} 278 defer allocDir.Destroy() 279 280 execCmd.ResourceLimits = true 281 282 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 283 defer executor.Shutdown("SIGKILL", 0) 284 285 ps, err := executor.Launch(execCmd) 286 require.NoError(err) 287 require.NotZero(ps.Pid) 288 289 state, err := executor.Wait(context.Background()) 290 require.NoError(err) 291 require.Zero(state.ExitCode) 292 293 tu.WaitForResult(func() (bool, error) { 294 output := strings.TrimSpace(testExecCmd.stdout.String()) 295 switch cgutil.UseV2 { 296 case true: 297 isScope := strings.HasSuffix(output, ".scope") 298 require.True(isScope) 299 case false: 300 // Verify that we got some cgroups 301 if !strings.Contains(output, ":devices:") { 302 return false, fmt.Errorf("was expected cgroup files but found:\n%v", output) 303 } 304 lines := strings.Split(output, "\n") 305 for _, line := range lines { 306 // Every cgroup entry should be /nomad/$ALLOC_ID 307 if line == "" { 308 continue 309 } 310 311 // Skip rdma & misc subsystem; rdma was added in most recent kernels and libcontainer/docker 312 // don't isolate it by default. 313 // :: filters out odd empty cgroup found in latest Ubuntu lines, e.g. 0::/user.slice/user-1000.slice/session-17.scope 314 // that is also not used for isolation 315 if strings.Contains(line, ":rdma:") || strings.Contains(line, ":misc:") || strings.Contains(line, "::") { 316 continue 317 } 318 if !strings.Contains(line, ":/nomad/") { 319 return false, fmt.Errorf("Not a member of the alloc's cgroup: expected=...:/nomad/... -- found=%q", line) 320 } 321 322 } 323 } 324 return true, nil 325 }, func(err error) { t.Error(err) }) 326 } 327 328 // TestExecutor_CgroupPaths asserts that all cgroups created for a task 329 // are destroyed on shutdown 330 func TestExecutor_CgroupPathsAreDestroyed(t *testing.T) { 331 ci.Parallel(t) 332 testutil.ExecCompatible(t) 333 334 require := require.New(t) 335 336 testExecCmd := testExecutorCommandWithChroot(t) 337 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 338 execCmd.Cmd = "/bin/bash" 339 execCmd.Args = []string{"-c", "sleep 0.2; cat /proc/self/cgroup"} 340 defer allocDir.Destroy() 341 342 execCmd.ResourceLimits = true 343 344 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 345 defer executor.Shutdown("SIGKILL", 0) 346 347 ps, err := executor.Launch(execCmd) 348 require.NoError(err) 349 require.NotZero(ps.Pid) 350 351 state, err := executor.Wait(context.Background()) 352 require.NoError(err) 353 require.Zero(state.ExitCode) 354 355 var cgroupsPaths string 356 tu.WaitForResult(func() (bool, error) { 357 output := strings.TrimSpace(testExecCmd.stdout.String()) 358 359 switch cgutil.UseV2 { 360 case true: 361 isScope := strings.HasSuffix(output, ".scope") 362 require.True(isScope) 363 case false: 364 // Verify that we got some cgroups 365 if !strings.Contains(output, ":devices:") { 366 return false, fmt.Errorf("was expected cgroup files but found:\n%v", output) 367 } 368 lines := strings.Split(output, "\n") 369 for _, line := range lines { 370 // Every cgroup entry should be /nomad/$ALLOC_ID 371 if line == "" { 372 continue 373 } 374 375 // Skip rdma subsystem; rdma was added in most recent kernels and libcontainer/docker 376 // don't isolate it by default. And also misc. 377 if strings.Contains(line, ":rdma:") || strings.Contains(line, "::") || strings.Contains(line, ":misc:") { 378 continue 379 } 380 381 if !strings.Contains(line, ":/nomad/") { 382 return false, fmt.Errorf("Not a member of the alloc's cgroup: expected=...:/nomad/... -- found=%q", line) 383 } 384 } 385 } 386 cgroupsPaths = output 387 return true, nil 388 }, func(err error) { t.Error(err) }) 389 390 // shutdown executor and test that cgroups are destroyed 391 executor.Shutdown("SIGKILL", 0) 392 393 // test that the cgroup paths are not visible 394 tmpFile, err := ioutil.TempFile("", "") 395 require.NoError(err) 396 defer os.Remove(tmpFile.Name()) 397 398 _, err = tmpFile.WriteString(cgroupsPaths) 399 require.NoError(err) 400 tmpFile.Close() 401 402 subsystems, err := cgroups.ParseCgroupFile(tmpFile.Name()) 403 require.NoError(err) 404 405 for subsystem, cgroup := range subsystems { 406 if subsystem == "" || !strings.Contains(cgroup, "nomad/") { 407 continue 408 } 409 p, err := cgutil.GetCgroupPathHelperV1(subsystem, cgroup) 410 require.NoError(err) 411 require.Falsef(cgroups.PathExists(p), "cgroup for %s %s still exists", subsystem, cgroup) 412 } 413 } 414 415 func TestExecutor_LookupTaskBin(t *testing.T) { 416 ci.Parallel(t) 417 418 // Create a temp dir 419 taskDir := t.TempDir() 420 mountDir := t.TempDir() 421 422 // Create the command with mounts 423 cmd := &ExecCommand{ 424 Env: []string{"PATH=/bin"}, 425 TaskDir: taskDir, 426 Mounts: []*drivers.MountConfig{{TaskPath: "/srv", HostPath: mountDir}}, 427 } 428 429 // Make a /foo /local/foo and /usr/local/bin subdirs under task dir 430 // and /bar under mountdir 431 must.NoError(t, os.MkdirAll(filepath.Join(taskDir, "foo"), 0700)) 432 must.NoError(t, os.MkdirAll(filepath.Join(taskDir, "local/foo"), 0700)) 433 must.NoError(t, os.MkdirAll(filepath.Join(taskDir, "usr/local/bin"), 0700)) 434 must.NoError(t, os.MkdirAll(filepath.Join(mountDir, "bar"), 0700)) 435 436 writeFile := func(paths ...string) { 437 t.Helper() 438 path := filepath.Join(paths...) 439 must.NoError(t, os.WriteFile(path, []byte("hello"), 0o700)) 440 } 441 442 // Write some files 443 writeFile(taskDir, "usr/local/bin", "tmp0.txt") // under /usr/local/bin in taskdir 444 writeFile(taskDir, "foo", "tmp1.txt") // under foo in taskdir 445 writeFile(taskDir, "local", "tmp2.txt") // under root of task-local dir 446 writeFile(taskDir, "local/foo", "tmp3.txt") // under foo in task-local dir 447 writeFile(mountDir, "tmp4.txt") // under root of mount dir 448 writeFile(mountDir, "bar/tmp5.txt") // under bar in mount dir 449 450 testCases := []struct { 451 name string 452 cmd string 453 expectErr string 454 expectTaskPath string 455 expectHostPath string 456 }{ 457 { 458 name: "lookup with file name in PATH", 459 cmd: "tmp0.txt", 460 expectTaskPath: "/usr/local/bin/tmp0.txt", 461 expectHostPath: filepath.Join(taskDir, "usr/local/bin/tmp0.txt"), 462 }, 463 { 464 name: "lookup with absolute path to binary", 465 cmd: "/foo/tmp1.txt", 466 expectTaskPath: "/foo/tmp1.txt", 467 expectHostPath: filepath.Join(taskDir, "foo/tmp1.txt"), 468 }, 469 { 470 name: "lookup in task local dir with absolute path to binary", 471 cmd: "/local/tmp2.txt", 472 expectTaskPath: "/local/tmp2.txt", 473 expectHostPath: filepath.Join(taskDir, "local/tmp2.txt"), 474 }, 475 { 476 name: "lookup in task local dir with relative path to binary", 477 cmd: "local/tmp2.txt", 478 expectTaskPath: "/local/tmp2.txt", 479 expectHostPath: filepath.Join(taskDir, "local/tmp2.txt"), 480 }, 481 { 482 name: "lookup in task local dir with file name", 483 cmd: "tmp2.txt", 484 expectTaskPath: "/local/tmp2.txt", 485 expectHostPath: filepath.Join(taskDir, "local/tmp2.txt"), 486 }, 487 { 488 name: "lookup in task local subdir with absolute path to binary", 489 cmd: "/local/foo/tmp3.txt", 490 expectTaskPath: "/local/foo/tmp3.txt", 491 expectHostPath: filepath.Join(taskDir, "local/foo/tmp3.txt"), 492 }, 493 { 494 name: "lookup host absolute path outside taskdir", 495 cmd: "/bin/sh", 496 expectErr: "file /bin/sh not found under path " + taskDir, 497 }, 498 { 499 name: "lookup file from mount with absolute path", 500 cmd: "/srv/tmp4.txt", 501 expectTaskPath: "/srv/tmp4.txt", 502 expectHostPath: filepath.Join(mountDir, "tmp4.txt"), 503 }, 504 { 505 name: "lookup file from mount with file name fails", 506 cmd: "tmp4.txt", 507 expectErr: "file tmp4.txt not found under path", 508 }, 509 { 510 name: "lookup file from mount with subdir", 511 cmd: "/srv/bar/tmp5.txt", 512 expectTaskPath: "/srv/bar/tmp5.txt", 513 expectHostPath: filepath.Join(mountDir, "bar/tmp5.txt"), 514 }, 515 } 516 517 for _, tc := range testCases { 518 t.Run(tc.name, func(t *testing.T) { 519 cmd.Cmd = tc.cmd 520 taskPath, hostPath, err := lookupTaskBin(cmd) 521 if tc.expectErr == "" { 522 must.NoError(t, err) 523 test.Eq(t, tc.expectTaskPath, taskPath) 524 test.Eq(t, tc.expectHostPath, hostPath) 525 } else { 526 test.EqError(t, err, tc.expectErr) 527 } 528 }) 529 } 530 } 531 532 // Exec Launch looks for the binary only inside the chroot 533 func TestExecutor_EscapeContainer(t *testing.T) { 534 ci.Parallel(t) 535 testutil.ExecCompatible(t) 536 testutil.CgroupsCompatibleV1(t) // todo(shoenig) kills the terminal, probably defaulting to / 537 538 require := require.New(t) 539 540 testExecCmd := testExecutorCommandWithChroot(t) 541 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 542 execCmd.Cmd = "/bin/kill" // missing from the chroot container 543 defer allocDir.Destroy() 544 545 execCmd.ResourceLimits = true 546 547 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 548 defer executor.Shutdown("SIGKILL", 0) 549 550 _, err := executor.Launch(execCmd) 551 require.Error(err) 552 require.Regexp("^file /bin/kill not found under path", err) 553 554 // Bare files are looked up using the system path, inside the container 555 allocDir.Destroy() 556 testExecCmd = testExecutorCommandWithChroot(t) 557 execCmd, allocDir = testExecCmd.command, testExecCmd.allocDir 558 execCmd.Cmd = "kill" 559 _, err = executor.Launch(execCmd) 560 require.Error(err) 561 require.Regexp("^file kill not found under path", err) 562 563 allocDir.Destroy() 564 testExecCmd = testExecutorCommandWithChroot(t) 565 execCmd, allocDir = testExecCmd.command, testExecCmd.allocDir 566 execCmd.Cmd = "echo" 567 _, err = executor.Launch(execCmd) 568 require.NoError(err) 569 } 570 571 // TestExecutor_DoesNotInheritOomScoreAdj asserts that the exec processes do not 572 // inherit the oom_score_adj value of Nomad agent/executor process 573 func TestExecutor_DoesNotInheritOomScoreAdj(t *testing.T) { 574 ci.Parallel(t) 575 testutil.ExecCompatible(t) 576 577 oomPath := "/proc/self/oom_score_adj" 578 origValue, err := os.ReadFile(oomPath) 579 require.NoError(t, err, "reading oom_score_adj") 580 581 err = os.WriteFile(oomPath, []byte("-100"), 0644) 582 require.NoError(t, err, "setting temporary oom_score_adj") 583 584 defer func() { 585 err := os.WriteFile(oomPath, origValue, 0644) 586 require.NoError(t, err, "restoring oom_score_adj") 587 }() 588 589 testExecCmd := testExecutorCommandWithChroot(t) 590 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 591 defer allocDir.Destroy() 592 593 execCmd.ResourceLimits = true 594 execCmd.Cmd = "/bin/bash" 595 execCmd.Args = []string{"-c", "cat /proc/self/oom_score_adj"} 596 597 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 598 defer executor.Shutdown("SIGKILL", 0) 599 600 _, err = executor.Launch(execCmd) 601 require.NoError(t, err) 602 603 ch := make(chan interface{}) 604 go func() { 605 executor.Wait(context.Background()) 606 close(ch) 607 }() 608 609 select { 610 case <-ch: 611 // all good 612 case <-time.After(5 * time.Second): 613 require.Fail(t, "timeout waiting for exec to shutdown") 614 } 615 616 expected := "0" 617 tu.WaitForResult(func() (bool, error) { 618 output := strings.TrimSpace(testExecCmd.stdout.String()) 619 if output != expected { 620 return false, fmt.Errorf("oom_score_adj didn't match: want\n%v\n; got:\n%v\n", expected, output) 621 } 622 return true, nil 623 }, func(err error) { require.NoError(t, err) }) 624 625 } 626 627 func TestExecutor_Capabilities(t *testing.T) { 628 ci.Parallel(t) 629 testutil.ExecCompatible(t) 630 631 cases := []struct { 632 user string 633 caps string 634 }{ 635 { 636 user: "nobody", 637 caps: ` 638 CapInh: 0000000000000000 639 CapPrm: 0000000000000000 640 CapEff: 0000000000000000 641 CapBnd: 00000000a80405fb 642 CapAmb: 0000000000000000`, 643 }, 644 { 645 user: "root", 646 caps: ` 647 CapInh: 0000000000000000 648 CapPrm: 0000003fffffffff 649 CapEff: 0000003fffffffff 650 CapBnd: 0000003fffffffff 651 CapAmb: 0000000000000000`, 652 }, 653 } 654 655 for _, c := range cases { 656 t.Run(c.user, func(t *testing.T) { 657 658 testExecCmd := testExecutorCommandWithChroot(t) 659 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 660 defer allocDir.Destroy() 661 662 execCmd.User = c.user 663 execCmd.ResourceLimits = true 664 execCmd.Cmd = "/bin/bash" 665 execCmd.Args = []string{"-c", "cat /proc/$$/status"} 666 execCmd.Capabilities = capabilities.NomadDefaults().Slice(true) 667 668 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 669 defer executor.Shutdown("SIGKILL", 0) 670 671 _, err := executor.Launch(execCmd) 672 require.NoError(t, err) 673 674 ch := make(chan interface{}) 675 go func() { 676 executor.Wait(context.Background()) 677 close(ch) 678 }() 679 680 select { 681 case <-ch: 682 // all good 683 case <-time.After(5 * time.Second): 684 require.Fail(t, "timeout waiting for exec to shutdown") 685 } 686 687 canonical := func(s string) string { 688 s = strings.TrimSpace(s) 689 s = regexp.MustCompile("[ \t]+").ReplaceAllString(s, " ") 690 s = regexp.MustCompile("[\n\r]+").ReplaceAllString(s, "\n") 691 return s 692 } 693 694 expected := canonical(c.caps) 695 tu.WaitForResult(func() (bool, error) { 696 output := canonical(testExecCmd.stdout.String()) 697 if !strings.Contains(output, expected) { 698 return false, fmt.Errorf("capabilities didn't match: want\n%v\n; got:\n%v\n", expected, output) 699 } 700 return true, nil 701 }, func(err error) { require.NoError(t, err) }) 702 }) 703 } 704 705 } 706 707 func TestExecutor_ClientCleanup(t *testing.T) { 708 ci.Parallel(t) 709 testutil.ExecCompatible(t) 710 require := require.New(t) 711 712 testExecCmd := testExecutorCommandWithChroot(t) 713 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 714 defer allocDir.Destroy() 715 716 executor := NewExecutorWithIsolation(testlog.HCLogger(t)) 717 defer executor.Shutdown("", 0) 718 719 // Need to run a command which will produce continuous output but not 720 // too quickly to ensure executor.Exit() stops the process. 721 execCmd.Cmd = "/bin/bash" 722 execCmd.Args = []string{"-c", "while true; do /bin/echo X; /bin/sleep 1; done"} 723 execCmd.ResourceLimits = true 724 725 ps, err := executor.Launch(execCmd) 726 727 require.NoError(err) 728 require.NotZero(ps.Pid) 729 time.Sleep(500 * time.Millisecond) 730 require.NoError(executor.Shutdown("SIGINT", 100*time.Millisecond)) 731 732 ch := make(chan interface{}) 733 go func() { 734 executor.Wait(context.Background()) 735 close(ch) 736 }() 737 738 select { 739 case <-ch: 740 // all good 741 case <-time.After(5 * time.Second): 742 require.Fail("timeout waiting for exec to shutdown") 743 } 744 745 output := testExecCmd.stdout.String() 746 require.NotZero(len(output)) 747 time.Sleep(2 * time.Second) 748 output1 := testExecCmd.stdout.String() 749 require.Equal(len(output), len(output1)) 750 } 751 752 func TestExecutor_cmdDevices(t *testing.T) { 753 ci.Parallel(t) 754 input := []*drivers.DeviceConfig{ 755 { 756 HostPath: "/dev/null", 757 TaskPath: "/task/dev/null", 758 Permissions: "rwm", 759 }, 760 } 761 762 expected := &devices.Device{ 763 Rule: devices.Rule{ 764 Type: 99, 765 Major: 1, 766 Minor: 3, 767 Permissions: "rwm", 768 }, 769 Path: "/task/dev/null", 770 } 771 772 found, err := cmdDevices(input) 773 require.NoError(t, err) 774 require.Len(t, found, 1) 775 776 // ignore file permission and ownership 777 // as they are host specific potentially 778 d := found[0] 779 d.FileMode = 0 780 d.Uid = 0 781 d.Gid = 0 782 783 require.EqualValues(t, expected, d) 784 } 785 786 func TestExecutor_cmdMounts(t *testing.T) { 787 ci.Parallel(t) 788 input := []*drivers.MountConfig{ 789 { 790 HostPath: "/host/path-ro", 791 TaskPath: "/task/path-ro", 792 Readonly: true, 793 }, 794 { 795 HostPath: "/host/path-rw", 796 TaskPath: "/task/path-rw", 797 Readonly: false, 798 }, 799 } 800 801 expected := []*lconfigs.Mount{ 802 { 803 Source: "/host/path-ro", 804 Destination: "/task/path-ro", 805 Flags: unix.MS_BIND | unix.MS_RDONLY, 806 Device: "bind", 807 PropagationFlags: []int{unix.MS_PRIVATE | unix.MS_REC}, 808 }, 809 { 810 Source: "/host/path-rw", 811 Destination: "/task/path-rw", 812 Flags: unix.MS_BIND, 813 Device: "bind", 814 PropagationFlags: []int{unix.MS_PRIVATE | unix.MS_REC}, 815 }, 816 } 817 818 require.EqualValues(t, expected, cmdMounts(input)) 819 } 820 821 // TestUniversalExecutor_NoCgroup asserts that commands are executed in the 822 // same cgroup as parent process 823 func TestUniversalExecutor_NoCgroup(t *testing.T) { 824 ci.Parallel(t) 825 testutil.ExecCompatible(t) 826 827 expectedBytes, err := ioutil.ReadFile("/proc/self/cgroup") 828 require.NoError(t, err) 829 830 expected := strings.TrimSpace(string(expectedBytes)) 831 832 testExecCmd := testExecutorCommand(t) 833 execCmd, allocDir := testExecCmd.command, testExecCmd.allocDir 834 execCmd.Cmd = "/bin/cat" 835 execCmd.Args = []string{"/proc/self/cgroup"} 836 defer allocDir.Destroy() 837 838 execCmd.BasicProcessCgroup = false 839 execCmd.ResourceLimits = false 840 841 executor := NewExecutor(testlog.HCLogger(t)) 842 defer executor.Shutdown("SIGKILL", 0) 843 844 _, err = executor.Launch(execCmd) 845 require.NoError(t, err) 846 847 _, err = executor.Wait(context.Background()) 848 require.NoError(t, err) 849 850 tu.WaitForResult(func() (bool, error) { 851 act := strings.TrimSpace(string(testExecCmd.stdout.String())) 852 if expected != act { 853 return false, fmt.Errorf("expected:\n%s actual:\n%s", expected, act) 854 } 855 return true, nil 856 }, func(err error) { 857 stderr := strings.TrimSpace(string(testExecCmd.stderr.String())) 858 t.Logf("stderr: %v", stderr) 859 require.NoError(t, err) 860 }) 861 862 }