github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/drivers/exec/driver.go (about) 1 package exec 2 3 import ( 4 "context" 5 "fmt" 6 "os" 7 "path/filepath" 8 "runtime" 9 "sync" 10 "time" 11 12 "github.com/hashicorp/consul-template/signals" 13 hclog "github.com/hashicorp/go-hclog" 14 "github.com/hashicorp/nomad/client/lib/cgutil" 15 "github.com/hashicorp/nomad/drivers/shared/capabilities" 16 "github.com/hashicorp/nomad/drivers/shared/eventer" 17 "github.com/hashicorp/nomad/drivers/shared/executor" 18 "github.com/hashicorp/nomad/drivers/shared/resolvconf" 19 "github.com/hashicorp/nomad/helper/pluginutils/loader" 20 "github.com/hashicorp/nomad/helper/pointer" 21 "github.com/hashicorp/nomad/plugins/base" 22 "github.com/hashicorp/nomad/plugins/drivers" 23 "github.com/hashicorp/nomad/plugins/drivers/utils" 24 "github.com/hashicorp/nomad/plugins/shared/hclspec" 25 pstructs "github.com/hashicorp/nomad/plugins/shared/structs" 26 ) 27 28 const ( 29 // pluginName is the name of the plugin 30 pluginName = "exec" 31 32 // fingerprintPeriod is the interval at which the driver will send fingerprint responses 33 fingerprintPeriod = 30 * time.Second 34 35 // taskHandleVersion is the version of task handle which this driver sets 36 // and understands how to decode driver state 37 taskHandleVersion = 1 38 ) 39 40 var ( 41 // PluginID is the exec plugin metadata registered in the plugin 42 // catalog. 43 PluginID = loader.PluginID{ 44 Name: pluginName, 45 PluginType: base.PluginTypeDriver, 46 } 47 48 // PluginConfig is the exec driver factory function registered in the 49 // plugin catalog. 50 PluginConfig = &loader.InternalPluginConfig{ 51 Config: map[string]interface{}{}, 52 Factory: func(ctx context.Context, l hclog.Logger) interface{} { return NewExecDriver(ctx, l) }, 53 } 54 55 // pluginInfo is the response returned for the PluginInfo RPC 56 pluginInfo = &base.PluginInfoResponse{ 57 Type: base.PluginTypeDriver, 58 PluginApiVersions: []string{drivers.ApiVersion010}, 59 PluginVersion: "0.1.0", 60 Name: pluginName, 61 } 62 63 // configSpec is the hcl specification returned by the ConfigSchema RPC 64 configSpec = hclspec.NewObject(map[string]*hclspec.Spec{ 65 "no_pivot_root": hclspec.NewDefault( 66 hclspec.NewAttr("no_pivot_root", "bool", false), 67 hclspec.NewLiteral("false"), 68 ), 69 "default_pid_mode": hclspec.NewDefault( 70 hclspec.NewAttr("default_pid_mode", "string", false), 71 hclspec.NewLiteral(`"private"`), 72 ), 73 "default_ipc_mode": hclspec.NewDefault( 74 hclspec.NewAttr("default_ipc_mode", "string", false), 75 hclspec.NewLiteral(`"private"`), 76 ), 77 "allow_caps": hclspec.NewDefault( 78 hclspec.NewAttr("allow_caps", "list(string)", false), 79 hclspec.NewLiteral(capabilities.HCLSpecLiteral), 80 ), 81 }) 82 83 // taskConfigSpec is the hcl specification for the driver config section of 84 // a task within a job. It is returned in the TaskConfigSchema RPC 85 taskConfigSpec = hclspec.NewObject(map[string]*hclspec.Spec{ 86 "command": hclspec.NewAttr("command", "string", true), 87 "args": hclspec.NewAttr("args", "list(string)", false), 88 "pid_mode": hclspec.NewAttr("pid_mode", "string", false), 89 "ipc_mode": hclspec.NewAttr("ipc_mode", "string", false), 90 "cap_add": hclspec.NewAttr("cap_add", "list(string)", false), 91 "cap_drop": hclspec.NewAttr("cap_drop", "list(string)", false), 92 }) 93 94 // driverCapabilities represents the RPC response for what features are 95 // implemented by the exec task driver 96 driverCapabilities = &drivers.Capabilities{ 97 SendSignals: true, 98 Exec: true, 99 FSIsolation: drivers.FSIsolationChroot, 100 NetIsolationModes: []drivers.NetIsolationMode{ 101 drivers.NetIsolationModeHost, 102 drivers.NetIsolationModeGroup, 103 }, 104 MountConfigs: drivers.MountConfigSupportAll, 105 } 106 ) 107 108 // Driver fork/execs tasks using many of the underlying OS's isolation 109 // features where configured. 110 type Driver struct { 111 // eventer is used to handle multiplexing of TaskEvents calls such that an 112 // event can be broadcast to all callers 113 eventer *eventer.Eventer 114 115 // config is the driver configuration set by the SetConfig RPC 116 config Config 117 118 // nomadConfig is the client config from nomad 119 nomadConfig *base.ClientDriverConfig 120 121 // tasks is the in memory datastore mapping taskIDs to driverHandles 122 tasks *taskStore 123 124 // ctx is the context for the driver. It is passed to other subsystems to 125 // coordinate shutdown 126 ctx context.Context 127 128 // logger will log to the Nomad agent 129 logger hclog.Logger 130 131 // A tri-state boolean to know if the fingerprinting has happened and 132 // whether it has been successful 133 fingerprintSuccess *bool 134 fingerprintLock sync.Mutex 135 } 136 137 // Config is the driver configuration set by the SetConfig RPC call 138 type Config struct { 139 // NoPivotRoot disables the use of pivot_root, useful when the root partition 140 // is on ramdisk 141 NoPivotRoot bool `codec:"no_pivot_root"` 142 143 // DefaultModePID is the default PID isolation set for all tasks using 144 // exec-based task drivers. 145 DefaultModePID string `codec:"default_pid_mode"` 146 147 // DefaultModeIPC is the default IPC isolation set for all tasks using 148 // exec-based task drivers. 149 DefaultModeIPC string `codec:"default_ipc_mode"` 150 151 // AllowCaps configures which Linux Capabilities are enabled for tasks 152 // running on this node. 153 AllowCaps []string `codec:"allow_caps"` 154 } 155 156 func (c *Config) validate() error { 157 switch c.DefaultModePID { 158 case executor.IsolationModePrivate, executor.IsolationModeHost: 159 default: 160 return fmt.Errorf("default_pid_mode must be %q or %q, got %q", executor.IsolationModePrivate, executor.IsolationModeHost, c.DefaultModePID) 161 } 162 163 switch c.DefaultModeIPC { 164 case executor.IsolationModePrivate, executor.IsolationModeHost: 165 default: 166 return fmt.Errorf("default_ipc_mode must be %q or %q, got %q", executor.IsolationModePrivate, executor.IsolationModeHost, c.DefaultModeIPC) 167 } 168 169 badCaps := capabilities.Supported().Difference(capabilities.New(c.AllowCaps)) 170 if !badCaps.Empty() { 171 return fmt.Errorf("allow_caps configured with capabilities not supported by system: %s", badCaps) 172 } 173 174 return nil 175 } 176 177 // TaskConfig is the driver configuration of a task within a job 178 type TaskConfig struct { 179 // Command is the thing to exec. 180 Command string `codec:"command"` 181 182 // Args are passed along to Command. 183 Args []string `codec:"args"` 184 185 // ModePID indicates whether PID namespace isolation is enabled for the task. 186 // Must be "private" or "host" if set. 187 ModePID string `codec:"pid_mode"` 188 189 // ModeIPC indicates whether IPC namespace isolation is enabled for the task. 190 // Must be "private" or "host" if set. 191 ModeIPC string `codec:"ipc_mode"` 192 193 // CapAdd is a set of linux capabilities to enable. 194 CapAdd []string `codec:"cap_add"` 195 196 // CapDrop is a set of linux capabilities to disable. 197 CapDrop []string `codec:"cap_drop"` 198 } 199 200 func (tc *TaskConfig) validate() error { 201 switch tc.ModePID { 202 case "", executor.IsolationModePrivate, executor.IsolationModeHost: 203 default: 204 return fmt.Errorf("pid_mode must be %q or %q, got %q", executor.IsolationModePrivate, executor.IsolationModeHost, tc.ModePID) 205 } 206 207 switch tc.ModeIPC { 208 case "", executor.IsolationModePrivate, executor.IsolationModeHost: 209 default: 210 return fmt.Errorf("ipc_mode must be %q or %q, got %q", executor.IsolationModePrivate, executor.IsolationModeHost, tc.ModeIPC) 211 } 212 213 supported := capabilities.Supported() 214 badAdds := supported.Difference(capabilities.New(tc.CapAdd)) 215 if !badAdds.Empty() { 216 return fmt.Errorf("cap_add configured with capabilities not supported by system: %s", badAdds) 217 } 218 badDrops := supported.Difference(capabilities.New(tc.CapDrop)) 219 if !badDrops.Empty() { 220 return fmt.Errorf("cap_drop configured with capabilities not supported by system: %s", badDrops) 221 } 222 223 return nil 224 } 225 226 // TaskState is the state which is encoded in the handle returned in 227 // StartTask. This information is needed to rebuild the task state and handler 228 // during recovery. 229 type TaskState struct { 230 ReattachConfig *pstructs.ReattachConfig 231 TaskConfig *drivers.TaskConfig 232 Pid int 233 StartedAt time.Time 234 } 235 236 // NewExecDriver returns a new DrivePlugin implementation 237 func NewExecDriver(ctx context.Context, logger hclog.Logger) drivers.DriverPlugin { 238 logger = logger.Named(pluginName) 239 return &Driver{ 240 eventer: eventer.NewEventer(ctx, logger), 241 tasks: newTaskStore(), 242 ctx: ctx, 243 logger: logger, 244 } 245 } 246 247 // setFingerprintSuccess marks the driver as having fingerprinted successfully 248 func (d *Driver) setFingerprintSuccess() { 249 d.fingerprintLock.Lock() 250 d.fingerprintSuccess = pointer.Of(true) 251 d.fingerprintLock.Unlock() 252 } 253 254 // setFingerprintFailure marks the driver as having failed fingerprinting 255 func (d *Driver) setFingerprintFailure() { 256 d.fingerprintLock.Lock() 257 d.fingerprintSuccess = pointer.Of(false) 258 d.fingerprintLock.Unlock() 259 } 260 261 // fingerprintSuccessful returns true if the driver has 262 // never fingerprinted or has successfully fingerprinted 263 func (d *Driver) fingerprintSuccessful() bool { 264 d.fingerprintLock.Lock() 265 defer d.fingerprintLock.Unlock() 266 return d.fingerprintSuccess == nil || *d.fingerprintSuccess 267 } 268 269 func (d *Driver) PluginInfo() (*base.PluginInfoResponse, error) { 270 return pluginInfo, nil 271 } 272 273 func (d *Driver) ConfigSchema() (*hclspec.Spec, error) { 274 return configSpec, nil 275 } 276 277 func (d *Driver) SetConfig(cfg *base.Config) error { 278 // unpack, validate, and set agent plugin config 279 var config Config 280 if len(cfg.PluginConfig) != 0 { 281 if err := base.MsgPackDecode(cfg.PluginConfig, &config); err != nil { 282 return err 283 } 284 } 285 if err := config.validate(); err != nil { 286 return err 287 } 288 d.config = config 289 290 if cfg != nil && cfg.AgentConfig != nil { 291 d.nomadConfig = cfg.AgentConfig.Driver 292 } 293 return nil 294 } 295 296 func (d *Driver) TaskConfigSchema() (*hclspec.Spec, error) { 297 return taskConfigSpec, nil 298 } 299 300 // Capabilities is returned by the Capabilities RPC and indicates what 301 // optional features this driver supports 302 func (d *Driver) Capabilities() (*drivers.Capabilities, error) { 303 return driverCapabilities, nil 304 } 305 306 func (d *Driver) Fingerprint(ctx context.Context) (<-chan *drivers.Fingerprint, error) { 307 ch := make(chan *drivers.Fingerprint) 308 go d.handleFingerprint(ctx, ch) 309 return ch, nil 310 311 } 312 func (d *Driver) handleFingerprint(ctx context.Context, ch chan<- *drivers.Fingerprint) { 313 defer close(ch) 314 ticker := time.NewTimer(0) 315 for { 316 select { 317 case <-ctx.Done(): 318 return 319 case <-d.ctx.Done(): 320 return 321 case <-ticker.C: 322 ticker.Reset(fingerprintPeriod) 323 ch <- d.buildFingerprint() 324 } 325 } 326 } 327 328 func (d *Driver) buildFingerprint() *drivers.Fingerprint { 329 if runtime.GOOS != "linux" { 330 d.setFingerprintFailure() 331 return &drivers.Fingerprint{ 332 Health: drivers.HealthStateUndetected, 333 HealthDescription: "exec driver unsupported on client OS", 334 } 335 } 336 337 fp := &drivers.Fingerprint{ 338 Attributes: map[string]*pstructs.Attribute{}, 339 Health: drivers.HealthStateHealthy, 340 HealthDescription: drivers.DriverHealthy, 341 } 342 343 if !utils.IsUnixRoot() { 344 fp.Health = drivers.HealthStateUndetected 345 fp.HealthDescription = drivers.DriverRequiresRootMessage 346 d.setFingerprintFailure() 347 return fp 348 } 349 350 mount, err := cgutil.FindCgroupMountpointDir() 351 if err != nil { 352 fp.Health = drivers.HealthStateUnhealthy 353 fp.HealthDescription = drivers.NoCgroupMountMessage 354 if d.fingerprintSuccessful() { 355 d.logger.Warn(fp.HealthDescription, "error", err) 356 } 357 d.setFingerprintFailure() 358 return fp 359 } 360 361 if mount == "" { 362 fp.Health = drivers.HealthStateUnhealthy 363 fp.HealthDescription = drivers.CgroupMountEmpty 364 d.setFingerprintFailure() 365 return fp 366 } 367 368 fp.Attributes["driver.exec"] = pstructs.NewBoolAttribute(true) 369 d.setFingerprintSuccess() 370 return fp 371 } 372 373 func (d *Driver) RecoverTask(handle *drivers.TaskHandle) error { 374 if handle == nil { 375 return fmt.Errorf("handle cannot be nil") 376 } 377 378 // If already attached to handle there's nothing to recover. 379 if _, ok := d.tasks.Get(handle.Config.ID); ok { 380 d.logger.Trace("nothing to recover; task already exists", 381 "task_id", handle.Config.ID, 382 "task_name", handle.Config.Name, 383 ) 384 return nil 385 } 386 387 // Handle doesn't already exist, try to reattach 388 var taskState TaskState 389 if err := handle.GetDriverState(&taskState); err != nil { 390 d.logger.Error("failed to decode task state from handle", "error", err, "task_id", handle.Config.ID) 391 return fmt.Errorf("failed to decode task state from handle: %v", err) 392 } 393 394 // Create client for reattached executor 395 plugRC, err := pstructs.ReattachConfigToGoPlugin(taskState.ReattachConfig) 396 if err != nil { 397 d.logger.Error("failed to build ReattachConfig from task state", "error", err, "task_id", handle.Config.ID) 398 return fmt.Errorf("failed to build ReattachConfig from task state: %v", err) 399 } 400 401 exec, pluginClient, err := executor.ReattachToExecutor(plugRC, 402 d.logger.With("task_name", handle.Config.Name, "alloc_id", handle.Config.AllocID)) 403 if err != nil { 404 d.logger.Error("failed to reattach to executor", "error", err, "task_id", handle.Config.ID) 405 return fmt.Errorf("failed to reattach to executor: %v", err) 406 } 407 408 h := &taskHandle{ 409 exec: exec, 410 pid: taskState.Pid, 411 pluginClient: pluginClient, 412 taskConfig: taskState.TaskConfig, 413 procState: drivers.TaskStateRunning, 414 startedAt: taskState.StartedAt, 415 exitResult: &drivers.ExitResult{}, 416 logger: d.logger, 417 } 418 419 d.tasks.Set(taskState.TaskConfig.ID, h) 420 421 go h.run() 422 return nil 423 } 424 425 func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drivers.DriverNetwork, error) { 426 if _, ok := d.tasks.Get(cfg.ID); ok { 427 return nil, nil, fmt.Errorf("task with ID %q already started", cfg.ID) 428 } 429 430 var driverConfig TaskConfig 431 if err := cfg.DecodeDriverConfig(&driverConfig); err != nil { 432 return nil, nil, fmt.Errorf("failed to decode driver config: %v", err) 433 } 434 435 if err := driverConfig.validate(); err != nil { 436 return nil, nil, fmt.Errorf("failed driver config validation: %v", err) 437 } 438 439 d.logger.Info("starting task", "driver_cfg", hclog.Fmt("%+v", driverConfig)) 440 handle := drivers.NewTaskHandle(taskHandleVersion) 441 handle.Config = cfg 442 443 pluginLogFile := filepath.Join(cfg.TaskDir().Dir, "executor.out") 444 executorConfig := &executor.ExecutorConfig{ 445 LogFile: pluginLogFile, 446 LogLevel: "debug", 447 FSIsolation: true, 448 } 449 450 exec, pluginClient, err := executor.CreateExecutor( 451 d.logger.With("task_name", handle.Config.Name, "alloc_id", handle.Config.AllocID), 452 d.nomadConfig, executorConfig) 453 if err != nil { 454 return nil, nil, fmt.Errorf("failed to create executor: %v", err) 455 } 456 457 user := cfg.User 458 if user == "" { 459 user = "nobody" 460 } 461 462 if cfg.DNS != nil { 463 dnsMount, err := resolvconf.GenerateDNSMount(cfg.TaskDir().Dir, cfg.DNS) 464 if err != nil { 465 return nil, nil, fmt.Errorf("failed to build mount for resolv.conf: %v", err) 466 } 467 cfg.Mounts = append(cfg.Mounts, dnsMount) 468 } 469 470 caps, err := capabilities.Calculate( 471 capabilities.NomadDefaults(), d.config.AllowCaps, driverConfig.CapAdd, driverConfig.CapDrop, 472 ) 473 if err != nil { 474 return nil, nil, err 475 } 476 d.logger.Debug("task capabilities", "capabilities", caps) 477 478 execCmd := &executor.ExecCommand{ 479 Cmd: driverConfig.Command, 480 Args: driverConfig.Args, 481 Env: cfg.EnvList(), 482 User: user, 483 ResourceLimits: true, 484 NoPivotRoot: d.config.NoPivotRoot, 485 Resources: cfg.Resources, 486 TaskDir: cfg.TaskDir().Dir, 487 StdoutPath: cfg.StdoutPath, 488 StderrPath: cfg.StderrPath, 489 Mounts: cfg.Mounts, 490 Devices: cfg.Devices, 491 NetworkIsolation: cfg.NetworkIsolation, 492 ModePID: executor.IsolationMode(d.config.DefaultModePID, driverConfig.ModePID), 493 ModeIPC: executor.IsolationMode(d.config.DefaultModeIPC, driverConfig.ModeIPC), 494 Capabilities: caps, 495 } 496 497 ps, err := exec.Launch(execCmd) 498 if err != nil { 499 pluginClient.Kill() 500 return nil, nil, fmt.Errorf("failed to launch command with executor: %v", err) 501 } 502 503 h := &taskHandle{ 504 exec: exec, 505 pid: ps.Pid, 506 pluginClient: pluginClient, 507 taskConfig: cfg, 508 procState: drivers.TaskStateRunning, 509 startedAt: time.Now().Round(time.Millisecond), 510 logger: d.logger, 511 } 512 513 driverState := TaskState{ 514 ReattachConfig: pstructs.ReattachConfigFromGoPlugin(pluginClient.ReattachConfig()), 515 Pid: ps.Pid, 516 TaskConfig: cfg, 517 StartedAt: h.startedAt, 518 } 519 520 if err := handle.SetDriverState(&driverState); err != nil { 521 d.logger.Error("failed to start task, error setting driver state", "error", err) 522 _ = exec.Shutdown("", 0) 523 pluginClient.Kill() 524 return nil, nil, fmt.Errorf("failed to set driver state: %v", err) 525 } 526 527 d.tasks.Set(cfg.ID, h) 528 go h.run() 529 return handle, nil, nil 530 } 531 532 func (d *Driver) WaitTask(ctx context.Context, taskID string) (<-chan *drivers.ExitResult, error) { 533 handle, ok := d.tasks.Get(taskID) 534 if !ok { 535 return nil, drivers.ErrTaskNotFound 536 } 537 538 ch := make(chan *drivers.ExitResult) 539 go d.handleWait(ctx, handle, ch) 540 541 return ch, nil 542 } 543 544 func (d *Driver) handleWait(ctx context.Context, handle *taskHandle, ch chan *drivers.ExitResult) { 545 defer close(ch) 546 var result *drivers.ExitResult 547 ps, err := handle.exec.Wait(ctx) 548 if err != nil { 549 result = &drivers.ExitResult{ 550 Err: fmt.Errorf("executor: error waiting on process: %v", err), 551 } 552 } else { 553 result = &drivers.ExitResult{ 554 ExitCode: ps.ExitCode, 555 Signal: ps.Signal, 556 } 557 } 558 559 select { 560 case <-ctx.Done(): 561 return 562 case <-d.ctx.Done(): 563 return 564 case ch <- result: 565 } 566 } 567 568 func (d *Driver) StopTask(taskID string, timeout time.Duration, signal string) error { 569 handle, ok := d.tasks.Get(taskID) 570 if !ok { 571 return drivers.ErrTaskNotFound 572 } 573 574 if err := handle.exec.Shutdown(signal, timeout); err != nil { 575 if handle.pluginClient.Exited() { 576 return nil 577 } 578 return fmt.Errorf("executor Shutdown failed: %v", err) 579 } 580 581 return nil 582 } 583 584 // resetCgroup will re-create the v2 cgroup for the task after the task has been 585 // destroyed by libcontainer. In the case of a task restart we call DestroyTask 586 // which removes the cgroup - but we still need it! 587 // 588 // Ideally the cgroup management would be more unified - and we could do the creation 589 // on a task runner pre-start hook, eliminating the need for this hack. 590 func (d *Driver) resetCgroup(handle *taskHandle) { 591 if cgutil.UseV2 { 592 if handle.taskConfig.Resources != nil && 593 handle.taskConfig.Resources.LinuxResources != nil && 594 handle.taskConfig.Resources.LinuxResources.CpusetCgroupPath != "" { 595 err := os.Mkdir(handle.taskConfig.Resources.LinuxResources.CpusetCgroupPath, 0755) 596 if err != nil { 597 d.logger.Trace("failed to reset cgroup", "path", handle.taskConfig.Resources.LinuxResources.CpusetCgroupPath) 598 } 599 } 600 } 601 } 602 603 func (d *Driver) DestroyTask(taskID string, force bool) error { 604 handle, ok := d.tasks.Get(taskID) 605 if !ok { 606 return drivers.ErrTaskNotFound 607 } 608 609 if handle.IsRunning() && !force { 610 return fmt.Errorf("cannot destroy running task") 611 } 612 613 if !handle.pluginClient.Exited() { 614 if err := handle.exec.Shutdown("", 0); err != nil { 615 handle.logger.Error("destroying executor failed", "error", err) 616 } 617 618 handle.pluginClient.Kill() 619 } 620 621 // workaround for the case where DestroyTask was issued on task restart 622 d.resetCgroup(handle) 623 624 d.tasks.Delete(taskID) 625 return nil 626 } 627 628 func (d *Driver) InspectTask(taskID string) (*drivers.TaskStatus, error) { 629 handle, ok := d.tasks.Get(taskID) 630 if !ok { 631 return nil, drivers.ErrTaskNotFound 632 } 633 634 return handle.TaskStatus(), nil 635 } 636 637 func (d *Driver) TaskStats(ctx context.Context, taskID string, interval time.Duration) (<-chan *drivers.TaskResourceUsage, error) { 638 handle, ok := d.tasks.Get(taskID) 639 if !ok { 640 return nil, drivers.ErrTaskNotFound 641 } 642 643 return handle.exec.Stats(ctx, interval) 644 } 645 646 func (d *Driver) TaskEvents(ctx context.Context) (<-chan *drivers.TaskEvent, error) { 647 return d.eventer.TaskEvents(ctx) 648 } 649 650 func (d *Driver) SignalTask(taskID string, signal string) error { 651 handle, ok := d.tasks.Get(taskID) 652 if !ok { 653 return drivers.ErrTaskNotFound 654 } 655 656 sig := os.Interrupt 657 if s, ok := signals.SignalLookup[signal]; ok { 658 sig = s 659 } else { 660 d.logger.Warn("unknown signal to send to task, using SIGINT instead", "signal", signal, "task_id", handle.taskConfig.ID) 661 662 } 663 return handle.exec.Signal(sig) 664 } 665 666 func (d *Driver) ExecTask(taskID string, cmd []string, timeout time.Duration) (*drivers.ExecTaskResult, error) { 667 if len(cmd) == 0 { 668 return nil, fmt.Errorf("error cmd must have at least one value") 669 } 670 handle, ok := d.tasks.Get(taskID) 671 if !ok { 672 return nil, drivers.ErrTaskNotFound 673 } 674 675 args := []string{} 676 if len(cmd) > 1 { 677 args = cmd[1:] 678 } 679 680 out, exitCode, err := handle.exec.Exec(time.Now().Add(timeout), cmd[0], args) 681 if err != nil { 682 return nil, err 683 } 684 685 return &drivers.ExecTaskResult{ 686 Stdout: out, 687 ExitResult: &drivers.ExitResult{ 688 ExitCode: exitCode, 689 }, 690 }, nil 691 } 692 693 var _ drivers.ExecTaskStreamingRawDriver = (*Driver)(nil) 694 695 func (d *Driver) ExecTaskStreamingRaw(ctx context.Context, 696 taskID string, 697 command []string, 698 tty bool, 699 stream drivers.ExecTaskStream) error { 700 701 if len(command) == 0 { 702 return fmt.Errorf("error cmd must have at least one value") 703 } 704 handle, ok := d.tasks.Get(taskID) 705 if !ok { 706 return drivers.ErrTaskNotFound 707 } 708 709 return handle.exec.ExecStreaming(ctx, command, tty, stream) 710 }