github.com/janma/nomad@v0.11.3/drivers/exec/driver_test.go (about) 1 package exec 2 3 import ( 4 "bytes" 5 "context" 6 "fmt" 7 "io/ioutil" 8 "os" 9 "path/filepath" 10 "regexp" 11 "runtime" 12 "strconv" 13 "strings" 14 "sync" 15 "syscall" 16 "testing" 17 "time" 18 19 ctestutils "github.com/hashicorp/nomad/client/testutil" 20 "github.com/hashicorp/nomad/helper/pluginutils/hclutils" 21 "github.com/hashicorp/nomad/helper/testlog" 22 "github.com/hashicorp/nomad/helper/testtask" 23 "github.com/hashicorp/nomad/helper/uuid" 24 "github.com/hashicorp/nomad/nomad/structs" 25 basePlug "github.com/hashicorp/nomad/plugins/base" 26 "github.com/hashicorp/nomad/plugins/drivers" 27 dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" 28 "github.com/hashicorp/nomad/testutil" 29 "github.com/stretchr/testify/require" 30 ) 31 32 func TestMain(m *testing.M) { 33 if !testtask.Run() { 34 os.Exit(m.Run()) 35 } 36 } 37 38 var testResources = &drivers.Resources{ 39 NomadResources: &structs.AllocatedTaskResources{ 40 Memory: structs.AllocatedMemoryResources{ 41 MemoryMB: 128, 42 }, 43 Cpu: structs.AllocatedCpuResources{ 44 CpuShares: 100, 45 }, 46 }, 47 LinuxResources: &drivers.LinuxResources{ 48 MemoryLimitBytes: 134217728, 49 CPUShares: 100, 50 }, 51 } 52 53 func TestExecDriver_Fingerprint_NonLinux(t *testing.T) { 54 if !testutil.IsCI() { 55 t.Parallel() 56 } 57 require := require.New(t) 58 if runtime.GOOS == "linux" { 59 t.Skip("Test only available not on Linux") 60 } 61 62 ctx, cancel := context.WithCancel(context.Background()) 63 defer cancel() 64 65 d := NewExecDriver(ctx, testlog.HCLogger(t)) 66 harness := dtestutil.NewDriverHarness(t, d) 67 68 fingerCh, err := harness.Fingerprint(context.Background()) 69 require.NoError(err) 70 select { 71 case finger := <-fingerCh: 72 require.Equal(drivers.HealthStateUndetected, finger.Health) 73 case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second): 74 require.Fail("timeout receiving fingerprint") 75 } 76 } 77 78 func TestExecDriver_Fingerprint(t *testing.T) { 79 t.Parallel() 80 require := require.New(t) 81 82 ctestutils.ExecCompatible(t) 83 84 ctx, cancel := context.WithCancel(context.Background()) 85 defer cancel() 86 87 d := NewExecDriver(ctx, testlog.HCLogger(t)) 88 harness := dtestutil.NewDriverHarness(t, d) 89 90 fingerCh, err := harness.Fingerprint(context.Background()) 91 require.NoError(err) 92 select { 93 case finger := <-fingerCh: 94 require.Equal(drivers.HealthStateHealthy, finger.Health) 95 require.True(finger.Attributes["driver.exec"].GetBool()) 96 case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second): 97 require.Fail("timeout receiving fingerprint") 98 } 99 } 100 101 func TestExecDriver_StartWait(t *testing.T) { 102 t.Parallel() 103 require := require.New(t) 104 ctestutils.ExecCompatible(t) 105 106 ctx, cancel := context.WithCancel(context.Background()) 107 defer cancel() 108 109 d := NewExecDriver(ctx, testlog.HCLogger(t)) 110 harness := dtestutil.NewDriverHarness(t, d) 111 task := &drivers.TaskConfig{ 112 ID: uuid.Generate(), 113 Name: "test", 114 Resources: testResources, 115 } 116 117 tc := &TaskConfig{ 118 Command: "cat", 119 Args: []string{"/proc/self/cgroup"}, 120 } 121 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 122 123 cleanup := harness.MkAllocDir(task, false) 124 defer cleanup() 125 126 handle, _, err := harness.StartTask(task) 127 require.NoError(err) 128 129 ch, err := harness.WaitTask(context.Background(), handle.Config.ID) 130 require.NoError(err) 131 result := <-ch 132 require.Zero(result.ExitCode) 133 require.NoError(harness.DestroyTask(task.ID, true)) 134 } 135 136 func TestExecDriver_StartWaitStopKill(t *testing.T) { 137 t.Parallel() 138 require := require.New(t) 139 ctestutils.ExecCompatible(t) 140 141 ctx, cancel := context.WithCancel(context.Background()) 142 defer cancel() 143 144 d := NewExecDriver(ctx, testlog.HCLogger(t)) 145 harness := dtestutil.NewDriverHarness(t, d) 146 task := &drivers.TaskConfig{ 147 ID: uuid.Generate(), 148 Name: "test", 149 Resources: testResources, 150 } 151 152 tc := &TaskConfig{ 153 Command: "/bin/bash", 154 Args: []string{"-c", "echo hi; sleep 600"}, 155 } 156 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 157 158 cleanup := harness.MkAllocDir(task, false) 159 defer cleanup() 160 161 handle, _, err := harness.StartTask(task) 162 require.NoError(err) 163 defer harness.DestroyTask(task.ID, true) 164 165 ch, err := harness.WaitTask(context.Background(), handle.Config.ID) 166 require.NoError(err) 167 168 require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second)) 169 170 go func() { 171 harness.StopTask(task.ID, 2*time.Second, "SIGINT") 172 }() 173 174 select { 175 case result := <-ch: 176 require.False(result.Successful()) 177 case <-time.After(10 * time.Second): 178 require.Fail("timeout waiting for task to shutdown") 179 } 180 181 // Ensure that the task is marked as dead, but account 182 // for WaitTask() closing channel before internal state is updated 183 testutil.WaitForResult(func() (bool, error) { 184 status, err := harness.InspectTask(task.ID) 185 if err != nil { 186 return false, fmt.Errorf("inspecting task failed: %v", err) 187 } 188 if status.State != drivers.TaskStateExited { 189 return false, fmt.Errorf("task hasn't exited yet; status: %v", status.State) 190 } 191 192 return true, nil 193 }, func(err error) { 194 require.NoError(err) 195 }) 196 197 require.NoError(harness.DestroyTask(task.ID, true)) 198 } 199 200 func TestExecDriver_StartWaitRecover(t *testing.T) { 201 t.Parallel() 202 require := require.New(t) 203 ctestutils.ExecCompatible(t) 204 205 dctx, dcancel := context.WithCancel(context.Background()) 206 defer dcancel() 207 208 d := NewExecDriver(dctx, testlog.HCLogger(t)) 209 harness := dtestutil.NewDriverHarness(t, d) 210 task := &drivers.TaskConfig{ 211 ID: uuid.Generate(), 212 Name: "test", 213 Resources: testResources, 214 } 215 216 tc := &TaskConfig{ 217 Command: "/bin/sleep", 218 Args: []string{"5"}, 219 } 220 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 221 222 cleanup := harness.MkAllocDir(task, false) 223 defer cleanup() 224 225 handle, _, err := harness.StartTask(task) 226 require.NoError(err) 227 228 ctx, cancel := context.WithCancel(context.Background()) 229 230 ch, err := harness.WaitTask(ctx, handle.Config.ID) 231 require.NoError(err) 232 233 var wg sync.WaitGroup 234 wg.Add(1) 235 go func() { 236 defer wg.Done() 237 result := <-ch 238 require.Error(result.Err) 239 }() 240 241 require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second)) 242 cancel() 243 244 waitCh := make(chan struct{}) 245 go func() { 246 defer close(waitCh) 247 wg.Wait() 248 }() 249 250 select { 251 case <-waitCh: 252 status, err := harness.InspectTask(task.ID) 253 require.NoError(err) 254 require.Equal(drivers.TaskStateRunning, status.State) 255 case <-time.After(1 * time.Second): 256 require.Fail("timeout waiting for task wait to cancel") 257 } 258 259 // Loose task 260 d.(*Driver).tasks.Delete(task.ID) 261 _, err = harness.InspectTask(task.ID) 262 require.Error(err) 263 264 require.NoError(harness.RecoverTask(handle)) 265 status, err := harness.InspectTask(task.ID) 266 require.NoError(err) 267 require.Equal(drivers.TaskStateRunning, status.State) 268 269 require.NoError(harness.StopTask(task.ID, 0, "")) 270 require.NoError(harness.DestroyTask(task.ID, true)) 271 } 272 273 // TestExecDriver_DestroyKillsAll asserts that when TaskDestroy is called all 274 // task processes are cleaned up. 275 func TestExecDriver_DestroyKillsAll(t *testing.T) { 276 t.Parallel() 277 require := require.New(t) 278 ctestutils.ExecCompatible(t) 279 280 ctx, cancel := context.WithCancel(context.Background()) 281 defer cancel() 282 283 d := NewExecDriver(ctx, testlog.HCLogger(t)) 284 harness := dtestutil.NewDriverHarness(t, d) 285 defer harness.Kill() 286 287 task := &drivers.TaskConfig{ 288 ID: uuid.Generate(), 289 Name: "test", 290 } 291 292 cleanup := harness.MkAllocDir(task, true) 293 defer cleanup() 294 295 taskConfig := map[string]interface{}{} 296 taskConfig["command"] = "/bin/sh" 297 taskConfig["args"] = []string{"-c", fmt.Sprintf(`sleep 3600 & echo "SLEEP_PID=$!"`)} 298 299 require.NoError(task.EncodeConcreteDriverConfig(&taskConfig)) 300 301 handle, _, err := harness.StartTask(task) 302 require.NoError(err) 303 defer harness.DestroyTask(task.ID, true) 304 305 ch, err := harness.WaitTask(context.Background(), handle.Config.ID) 306 require.NoError(err) 307 308 select { 309 case result := <-ch: 310 require.True(result.Successful(), "command failed: %#v", result) 311 case <-time.After(10 * time.Second): 312 require.Fail("timeout waiting for task to shutdown") 313 } 314 315 sleepPid := 0 316 317 // Ensure that the task is marked as dead, but account 318 // for WaitTask() closing channel before internal state is updated 319 testutil.WaitForResult(func() (bool, error) { 320 stdout, err := ioutil.ReadFile(filepath.Join(task.TaskDir().LogDir, "test.stdout.0")) 321 if err != nil { 322 return false, fmt.Errorf("failed to output pid file: %v", err) 323 } 324 325 pidMatch := regexp.MustCompile(`SLEEP_PID=(\d+)`).FindStringSubmatch(string(stdout)) 326 if len(pidMatch) != 2 { 327 return false, fmt.Errorf("failed to find pid in %s", string(stdout)) 328 } 329 330 pid, err := strconv.Atoi(pidMatch[1]) 331 if err != nil { 332 return false, fmt.Errorf("pid parts aren't int: %s", pidMatch[1]) 333 } 334 335 sleepPid = pid 336 return true, nil 337 }, func(err error) { 338 require.NoError(err) 339 }) 340 341 // isProcessRunning returns an error if process is not running 342 isProcessRunning := func(pid int) error { 343 process, err := os.FindProcess(pid) 344 if err != nil { 345 return fmt.Errorf("failed to find process: %s", err) 346 } 347 348 err = process.Signal(syscall.Signal(0)) 349 if err != nil { 350 return fmt.Errorf("failed to signal process: %s", err) 351 } 352 353 return nil 354 } 355 356 require.NoError(isProcessRunning(sleepPid)) 357 358 require.NoError(harness.DestroyTask(task.ID, true)) 359 360 testutil.WaitForResult(func() (bool, error) { 361 err := isProcessRunning(sleepPid) 362 if err == nil { 363 return false, fmt.Errorf("child process is still running") 364 } 365 366 if !strings.Contains(err.Error(), "failed to signal process") { 367 return false, fmt.Errorf("unexpected error: %v", err) 368 } 369 370 return true, nil 371 }, func(err error) { 372 require.NoError(err) 373 }) 374 } 375 376 func TestExecDriver_Stats(t *testing.T) { 377 t.Parallel() 378 require := require.New(t) 379 ctestutils.ExecCompatible(t) 380 381 dctx, dcancel := context.WithCancel(context.Background()) 382 defer dcancel() 383 384 d := NewExecDriver(dctx, testlog.HCLogger(t)) 385 harness := dtestutil.NewDriverHarness(t, d) 386 task := &drivers.TaskConfig{ 387 ID: uuid.Generate(), 388 Name: "test", 389 Resources: testResources, 390 } 391 392 tc := &TaskConfig{ 393 Command: "/bin/sleep", 394 Args: []string{"5"}, 395 } 396 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 397 398 cleanup := harness.MkAllocDir(task, false) 399 defer cleanup() 400 401 handle, _, err := harness.StartTask(task) 402 require.NoError(err) 403 require.NotNil(handle) 404 405 require.NoError(harness.WaitUntilStarted(task.ID, 1*time.Second)) 406 ctx, cancel := context.WithCancel(context.Background()) 407 defer cancel() 408 statsCh, err := harness.TaskStats(ctx, task.ID, time.Second*10) 409 require.NoError(err) 410 select { 411 case stats := <-statsCh: 412 require.NotZero(stats.ResourceUsage.MemoryStats.RSS) 413 require.NotZero(stats.Timestamp) 414 require.WithinDuration(time.Now(), time.Unix(0, stats.Timestamp), time.Second) 415 case <-time.After(time.Second): 416 require.Fail("timeout receiving from channel") 417 } 418 419 require.NoError(harness.DestroyTask(task.ID, true)) 420 } 421 422 func TestExecDriver_Start_Wait_AllocDir(t *testing.T) { 423 t.Parallel() 424 require := require.New(t) 425 ctestutils.ExecCompatible(t) 426 427 ctx, cancel := context.WithCancel(context.Background()) 428 defer cancel() 429 430 d := NewExecDriver(ctx, testlog.HCLogger(t)) 431 harness := dtestutil.NewDriverHarness(t, d) 432 task := &drivers.TaskConfig{ 433 ID: uuid.Generate(), 434 Name: "sleep", 435 Resources: testResources, 436 } 437 cleanup := harness.MkAllocDir(task, false) 438 defer cleanup() 439 440 exp := []byte{'w', 'i', 'n'} 441 file := "output.txt" 442 tc := &TaskConfig{ 443 Command: "/bin/bash", 444 Args: []string{ 445 "-c", 446 fmt.Sprintf(`sleep 1; echo -n %s > /alloc/%s`, string(exp), file), 447 }, 448 } 449 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 450 451 handle, _, err := harness.StartTask(task) 452 require.NoError(err) 453 require.NotNil(handle) 454 455 // Task should terminate quickly 456 waitCh, err := harness.WaitTask(context.Background(), task.ID) 457 require.NoError(err) 458 select { 459 case res := <-waitCh: 460 require.True(res.Successful(), "task should have exited successfully: %v", res) 461 case <-time.After(time.Duration(testutil.TestMultiplier()*5) * time.Second): 462 require.Fail("timeout waiting for task") 463 } 464 465 // Check that data was written to the shared alloc directory. 466 outputFile := filepath.Join(task.TaskDir().SharedAllocDir, file) 467 act, err := ioutil.ReadFile(outputFile) 468 require.NoError(err) 469 require.Exactly(exp, act) 470 471 require.NoError(harness.DestroyTask(task.ID, true)) 472 } 473 474 func TestExecDriver_User(t *testing.T) { 475 t.Parallel() 476 require := require.New(t) 477 ctestutils.ExecCompatible(t) 478 479 ctx, cancel := context.WithCancel(context.Background()) 480 defer cancel() 481 482 d := NewExecDriver(ctx, testlog.HCLogger(t)) 483 harness := dtestutil.NewDriverHarness(t, d) 484 task := &drivers.TaskConfig{ 485 ID: uuid.Generate(), 486 Name: "sleep", 487 User: "alice", 488 Resources: testResources, 489 } 490 cleanup := harness.MkAllocDir(task, false) 491 defer cleanup() 492 493 tc := &TaskConfig{ 494 Command: "/bin/sleep", 495 Args: []string{"100"}, 496 } 497 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 498 499 handle, _, err := harness.StartTask(task) 500 require.Error(err) 501 require.Nil(handle) 502 503 msg := "user alice" 504 if !strings.Contains(err.Error(), msg) { 505 t.Fatalf("Expecting '%v' in '%v'", msg, err) 506 } 507 } 508 509 // TestExecDriver_HandlerExec ensures the exec driver's handle properly 510 // executes commands inside the container. 511 func TestExecDriver_HandlerExec(t *testing.T) { 512 t.Parallel() 513 require := require.New(t) 514 ctestutils.ExecCompatible(t) 515 516 ctx, cancel := context.WithCancel(context.Background()) 517 defer cancel() 518 519 d := NewExecDriver(ctx, testlog.HCLogger(t)) 520 harness := dtestutil.NewDriverHarness(t, d) 521 task := &drivers.TaskConfig{ 522 ID: uuid.Generate(), 523 Name: "sleep", 524 Resources: testResources, 525 } 526 cleanup := harness.MkAllocDir(task, false) 527 defer cleanup() 528 529 tc := &TaskConfig{ 530 Command: "/bin/sleep", 531 Args: []string{"9000"}, 532 } 533 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 534 535 handle, _, err := harness.StartTask(task) 536 require.NoError(err) 537 require.NotNil(handle) 538 539 // Exec a command that should work and dump the environment 540 // TODO: enable section when exec env is fully loaded 541 /*res, err := harness.ExecTask(task.ID, []string{"/bin/sh", "-c", "env | grep ^NOMAD"}, time.Second) 542 require.NoError(err) 543 require.True(res.ExitResult.Successful()) 544 545 // Assert exec'd commands are run in a task-like environment 546 scriptEnv := make(map[string]string) 547 for _, line := range strings.Split(string(res.Stdout), "\n") { 548 if line == "" { 549 continue 550 } 551 parts := strings.SplitN(string(line), "=", 2) 552 if len(parts) != 2 { 553 t.Fatalf("Invalid env var: %q", line) 554 } 555 scriptEnv[parts[0]] = parts[1] 556 } 557 if v, ok := scriptEnv["NOMAD_SECRETS_DIR"]; !ok || v != "/secrets" { 558 t.Errorf("Expected NOMAD_SECRETS_DIR=/secrets but found=%t value=%q", ok, v) 559 }*/ 560 561 // Assert cgroup membership 562 res, err := harness.ExecTask(task.ID, []string{"/bin/cat", "/proc/self/cgroup"}, time.Second) 563 require.NoError(err) 564 require.True(res.ExitResult.Successful()) 565 found := false 566 for _, line := range strings.Split(string(res.Stdout), "\n") { 567 // Every cgroup entry should be /nomad/$ALLOC_ID 568 if line == "" { 569 continue 570 } 571 // Skip rdma subsystem; rdma was added in most recent kernels and libcontainer/docker 572 // don't isolate it by default. 573 if strings.Contains(line, ":rdma:") { 574 continue 575 } 576 if !strings.Contains(line, ":/nomad/") { 577 t.Errorf("Not a member of the alloc's cgroup: expected=...:/nomad/... -- found=%q", line) 578 continue 579 } 580 found = true 581 } 582 require.True(found, "exec'd command isn't in the task's cgroup") 583 584 // Exec a command that should fail 585 res, err = harness.ExecTask(task.ID, []string{"/usr/bin/stat", "lkjhdsaflkjshowaisxmcvnlia"}, time.Second) 586 require.NoError(err) 587 require.False(res.ExitResult.Successful()) 588 if expected := "No such file or directory"; !bytes.Contains(res.Stdout, []byte(expected)) { 589 t.Fatalf("expected output to contain %q but found: %q", expected, res.Stdout) 590 } 591 592 require.NoError(harness.DestroyTask(task.ID, true)) 593 } 594 595 func TestExecDriver_DevicesAndMounts(t *testing.T) { 596 t.Parallel() 597 require := require.New(t) 598 ctestutils.ExecCompatible(t) 599 600 tmpDir, err := ioutil.TempDir("", "exec_binds_mounts") 601 require.NoError(err) 602 defer os.RemoveAll(tmpDir) 603 604 err = ioutil.WriteFile(filepath.Join(tmpDir, "testfile"), []byte("from-host"), 600) 605 require.NoError(err) 606 607 ctx, cancel := context.WithCancel(context.Background()) 608 defer cancel() 609 610 d := NewExecDriver(ctx, testlog.HCLogger(t)) 611 harness := dtestutil.NewDriverHarness(t, d) 612 task := &drivers.TaskConfig{ 613 ID: uuid.Generate(), 614 Name: "test", 615 User: "root", // need permission to read mounts paths 616 Resources: testResources, 617 StdoutPath: filepath.Join(tmpDir, "task-stdout"), 618 StderrPath: filepath.Join(tmpDir, "task-stderr"), 619 Devices: []*drivers.DeviceConfig{ 620 { 621 TaskPath: "/dev/inserted-random", 622 HostPath: "/dev/random", 623 Permissions: "rw", 624 }, 625 }, 626 Mounts: []*drivers.MountConfig{ 627 { 628 TaskPath: "/tmp/task-path-rw", 629 HostPath: tmpDir, 630 Readonly: false, 631 }, 632 { 633 TaskPath: "/tmp/task-path-ro", 634 HostPath: tmpDir, 635 Readonly: true, 636 }, 637 }, 638 } 639 640 require.NoError(ioutil.WriteFile(task.StdoutPath, []byte{}, 660)) 641 require.NoError(ioutil.WriteFile(task.StderrPath, []byte{}, 660)) 642 643 tc := &TaskConfig{ 644 Command: "/bin/bash", 645 Args: []string{"-c", ` 646 export LANG=en.UTF-8 647 echo "mounted device /inserted-random: $(stat -c '%t:%T' /dev/inserted-random)" 648 echo "reading from ro path: $(cat /tmp/task-path-ro/testfile)" 649 echo "reading from rw path: $(cat /tmp/task-path-rw/testfile)" 650 touch /tmp/task-path-rw/testfile && echo 'overwriting file in rw succeeded' 651 touch /tmp/task-path-rw/testfile-from-rw && echo from-exec > /tmp/task-path-rw/testfile-from-rw && echo 'writing new file in rw succeeded' 652 touch /tmp/task-path-ro/testfile && echo 'overwriting file in ro succeeded' 653 touch /tmp/task-path-ro/testfile-from-ro && echo from-exec > /tmp/task-path-ro/testfile-from-ro && echo 'writing new file in ro succeeded' 654 exit 0 655 `}, 656 } 657 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 658 659 cleanup := harness.MkAllocDir(task, false) 660 defer cleanup() 661 662 handle, _, err := harness.StartTask(task) 663 require.NoError(err) 664 665 ch, err := harness.WaitTask(context.Background(), handle.Config.ID) 666 require.NoError(err) 667 result := <-ch 668 require.NoError(harness.DestroyTask(task.ID, true)) 669 670 stdout, err := ioutil.ReadFile(task.StdoutPath) 671 require.NoError(err) 672 require.Equal(`mounted device /inserted-random: 1:8 673 reading from ro path: from-host 674 reading from rw path: from-host 675 overwriting file in rw succeeded 676 writing new file in rw succeeded`, strings.TrimSpace(string(stdout))) 677 678 stderr, err := ioutil.ReadFile(task.StderrPath) 679 require.NoError(err) 680 require.Equal(`touch: cannot touch '/tmp/task-path-ro/testfile': Read-only file system 681 touch: cannot touch '/tmp/task-path-ro/testfile-from-ro': Read-only file system`, strings.TrimSpace(string(stderr))) 682 683 // testing exit code last so we can inspect output first 684 require.Zero(result.ExitCode) 685 686 fromRWContent, err := ioutil.ReadFile(filepath.Join(tmpDir, "testfile-from-rw")) 687 require.NoError(err) 688 require.Equal("from-exec", strings.TrimSpace(string(fromRWContent))) 689 } 690 691 func TestConfig_ParseAllHCL(t *testing.T) { 692 cfgStr := ` 693 config { 694 command = "/bin/bash" 695 args = ["-c", "echo hello"] 696 }` 697 698 expected := &TaskConfig{ 699 Command: "/bin/bash", 700 Args: []string{"-c", "echo hello"}, 701 } 702 703 var tc *TaskConfig 704 hclutils.NewConfigParser(taskConfigSpec).ParseHCL(t, cfgStr, &tc) 705 706 require.EqualValues(t, expected, tc) 707 } 708 709 func TestExecDriver_NoPivotRoot(t *testing.T) { 710 t.Parallel() 711 require := require.New(t) 712 ctestutils.ExecCompatible(t) 713 714 ctx, cancel := context.WithCancel(context.Background()) 715 defer cancel() 716 717 d := NewExecDriver(ctx, testlog.HCLogger(t)) 718 harness := dtestutil.NewDriverHarness(t, d) 719 720 config := &Config{NoPivotRoot: true} 721 var data []byte 722 require.NoError(basePlug.MsgPackEncode(&data, config)) 723 bconfig := &basePlug.Config{PluginConfig: data} 724 require.NoError(harness.SetConfig(bconfig)) 725 726 task := &drivers.TaskConfig{ 727 ID: uuid.Generate(), 728 Name: "sleep", 729 Resources: testResources, 730 } 731 cleanup := harness.MkAllocDir(task, false) 732 defer cleanup() 733 734 tc := &TaskConfig{ 735 Command: "/bin/sleep", 736 Args: []string{"100"}, 737 } 738 require.NoError(task.EncodeConcreteDriverConfig(&tc)) 739 740 handle, _, err := harness.StartTask(task) 741 require.NoError(err) 742 require.NotNil(handle) 743 }