github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/client/taskenv/env.go (about) 1 package taskenv 2 3 import ( 4 "fmt" 5 "net" 6 "os" 7 "path/filepath" 8 "strconv" 9 "strings" 10 "sync" 11 12 "github.com/hashicorp/nomad/helper" 13 hargs "github.com/hashicorp/nomad/helper/args" 14 "github.com/hashicorp/nomad/helper/escapingfs" 15 "github.com/hashicorp/nomad/lib/cpuset" 16 "github.com/hashicorp/nomad/nomad/structs" 17 "github.com/hashicorp/nomad/plugins/drivers" 18 "github.com/zclconf/go-cty/cty" 19 ) 20 21 // A set of environment variables that are exported by each driver. 22 const ( 23 // AllocDir is the environment variable with the path to the alloc directory 24 // that is shared across tasks within a task group. 25 AllocDir = "NOMAD_ALLOC_DIR" 26 27 // TaskLocalDir is the environment variable with the path to the tasks local 28 // directory where it can store data that is persisted to the alloc is 29 // removed. 30 TaskLocalDir = "NOMAD_TASK_DIR" 31 32 // SecretsDir is the environment variable with the path to the tasks secret 33 // directory where it can store sensitive data. 34 SecretsDir = "NOMAD_SECRETS_DIR" 35 36 // MemLimit is the environment variable with the tasks memory limit in MBs. 37 MemLimit = "NOMAD_MEMORY_LIMIT" 38 39 // MemMaxLimit is the environment variable with the tasks maximum memory limit in MBs. 40 MemMaxLimit = "NOMAD_MEMORY_MAX_LIMIT" 41 42 // CpuLimit is the environment variable with the tasks CPU limit in MHz. 43 CpuLimit = "NOMAD_CPU_LIMIT" 44 45 // CpuCores is the environment variable for passing the task's reserved cpu cores 46 CpuCores = "NOMAD_CPU_CORES" 47 48 // AllocID is the environment variable for passing the allocation ID. 49 AllocID = "NOMAD_ALLOC_ID" 50 51 // ShortAllocID is the environment variable for passing the short version 52 // of the allocation ID. 53 ShortAllocID = "NOMAD_SHORT_ALLOC_ID" 54 55 // AllocName is the environment variable for passing the allocation name. 56 AllocName = "NOMAD_ALLOC_NAME" 57 58 // TaskName is the environment variable for passing the task name. 59 TaskName = "NOMAD_TASK_NAME" 60 61 // GroupName is the environment variable for passing the task group name. 62 GroupName = "NOMAD_GROUP_NAME" 63 64 // JobID is the environment variable for passing the job ID. 65 JobID = "NOMAD_JOB_ID" 66 67 // JobName is the environment variable for passing the job name. 68 JobName = "NOMAD_JOB_NAME" 69 70 // JobParentID is the environment variable for passing the ID of the parnt of the job 71 JobParentID = "NOMAD_JOB_PARENT_ID" 72 73 // AllocIndex is the environment variable for passing the allocation index. 74 AllocIndex = "NOMAD_ALLOC_INDEX" 75 76 // Datacenter is the environment variable for passing the datacenter in which the alloc is running. 77 Datacenter = "NOMAD_DC" 78 79 // CgroupParent is the environment variable for passing the cgroup parent in which cgroups are made. 80 CgroupParent = "NOMAD_PARENT_CGROUP" 81 82 // Namespace is the environment variable for passing the namespace in which the alloc is running. 83 Namespace = "NOMAD_NAMESPACE" 84 85 // Region is the environment variable for passing the region in which the alloc is running. 86 Region = "NOMAD_REGION" 87 88 // AddrPrefix is the prefix for passing both dynamic and static port 89 // allocations to tasks. 90 // E.g $NOMAD_ADDR_http=127.0.0.1:80 91 // 92 // The ip:port are always the host's. 93 AddrPrefix = "NOMAD_ADDR_" 94 95 HostAddrPrefix = "NOMAD_HOST_ADDR_" 96 97 // IpPrefix is the prefix for passing the host IP of a port allocation 98 // to a task. 99 IpPrefix = "NOMAD_IP_" 100 101 HostIpPrefix = "NOMAD_HOST_IP_" 102 103 // PortPrefix is the prefix for passing the port allocation to a task. 104 // It will be the task's port if a port map is specified. Task's should 105 // bind to this port. 106 PortPrefix = "NOMAD_PORT_" 107 108 AllocPortPrefix = "NOMAD_ALLOC_PORT_" 109 110 // HostPortPrefix is the prefix for passing the host port when a port 111 // map is specified. 112 HostPortPrefix = "NOMAD_HOST_PORT_" 113 114 // MetaPrefix is the prefix for passing task meta data. 115 MetaPrefix = "NOMAD_META_" 116 117 // UpstreamPrefix is the prefix for passing upstream IP and ports to the alloc 118 UpstreamPrefix = "NOMAD_UPSTREAM_" 119 120 // VaultToken is the environment variable for passing the Vault token 121 VaultToken = "VAULT_TOKEN" 122 123 // VaultNamespace is the environment variable for passing the Vault namespace, if applicable 124 VaultNamespace = "VAULT_NAMESPACE" 125 ) 126 127 // The node values that can be interpreted. 128 const ( 129 nodeIdKey = "node.unique.id" 130 nodeDcKey = "node.datacenter" 131 nodeRegionKey = "node.region" 132 nodeNameKey = "node.unique.name" 133 nodeClassKey = "node.class" 134 135 // Prefixes used for lookups. 136 nodeAttributePrefix = "attr." 137 nodeMetaPrefix = "meta." 138 ) 139 140 // TaskEnv is a task's environment as well as node attribute's for 141 // interpolation. 142 type TaskEnv struct { 143 // NodeAttrs is the map of node attributes for interpolation 144 NodeAttrs map[string]string 145 146 // EnvMap is the map of environment variables 147 EnvMap map[string]string 148 149 // deviceEnv is the environment variables populated from the device hooks. 150 deviceEnv map[string]string 151 152 // envList is a memoized list created by List() 153 envList []string 154 155 // EnvMap is the map of environment variables with client-specific 156 // task directories 157 // See https://github.com/hashicorp/nomad/pull/9671 158 EnvMapClient map[string]string 159 160 // clientTaskDir is the absolute path to the task root directory on the host 161 // <alloc_dir>/<task> 162 clientTaskDir string 163 164 // clientSharedAllocDir is the path to shared alloc directory on the host 165 // <alloc_dir>/alloc/ 166 clientSharedAllocDir string 167 } 168 169 // NewTaskEnv creates a new task environment with the given environment, device 170 // environment and node attribute maps. 171 func NewTaskEnv(env, envClient, deviceEnv, node map[string]string, clientTaskDir, clientAllocDir string) *TaskEnv { 172 return &TaskEnv{ 173 NodeAttrs: node, 174 deviceEnv: deviceEnv, 175 EnvMap: env, 176 EnvMapClient: envClient, 177 clientTaskDir: clientTaskDir, 178 clientSharedAllocDir: clientAllocDir, 179 } 180 } 181 182 // NewEmptyTaskEnv creates a new empty task environment. 183 func NewEmptyTaskEnv() *TaskEnv { 184 return &TaskEnv{ 185 NodeAttrs: make(map[string]string), 186 deviceEnv: make(map[string]string), 187 EnvMap: make(map[string]string), 188 } 189 } 190 191 // List returns the task's environment as a slice of NAME=value pair strings. 192 func (t *TaskEnv) List() []string { 193 if t.envList != nil { 194 return t.envList 195 } 196 197 env := []string{} 198 for k, v := range t.EnvMap { 199 env = append(env, fmt.Sprintf("%s=%s", k, v)) 200 } 201 202 return env 203 } 204 205 // DeviceEnv returns the task's environment variables set by device hooks. 206 func (t *TaskEnv) DeviceEnv() map[string]string { 207 m := make(map[string]string, len(t.deviceEnv)) 208 for k, v := range t.deviceEnv { 209 m[k] = v 210 } 211 212 return m 213 } 214 215 // Map of the task's environment variables. 216 func (t *TaskEnv) Map() map[string]string { 217 m := make(map[string]string, len(t.EnvMap)) 218 for k, v := range t.EnvMap { 219 m[k] = v 220 } 221 222 return m 223 } 224 225 // All of the task's environment variables and the node's attributes in a 226 // single map. 227 func (t *TaskEnv) All() map[string]string { 228 m := make(map[string]string, len(t.EnvMap)+len(t.NodeAttrs)) 229 for k, v := range t.EnvMap { 230 m[k] = v 231 } 232 for k, v := range t.NodeAttrs { 233 m[k] = v 234 } 235 236 return m 237 } 238 239 // AllValues is a map of the task's environment variables and the node's 240 // attributes with cty.Value (String) values. Errors including keys are 241 // returned in a map by key name. 242 // 243 // In the rare case of a fatal error, only an error value is returned. This is 244 // likely a programming error as user input should not be able to cause a fatal 245 // error. 246 func (t *TaskEnv) AllValues() (map[string]cty.Value, map[string]error, error) { 247 errs := make(map[string]error) 248 249 // Intermediate map for building up nested go types 250 allMap := make(map[string]interface{}, len(t.EnvMap)+len(t.NodeAttrs)) 251 252 // Intermediate map for all env vars including those whose keys that 253 // cannot be nested (eg foo...bar) 254 envMap := make(map[string]cty.Value, len(t.EnvMap)) 255 256 // Prepare job-based variables (eg job.meta, job.group.task.env, etc) 257 for k, v := range t.EnvMap { 258 if err := addNestedKey(allMap, k, v); err != nil { 259 errs[k] = err 260 } 261 envMap[k] = cty.StringVal(v) 262 } 263 264 // Prepare node-based variables (eg node.*, attr.*, meta.*) 265 for k, v := range t.NodeAttrs { 266 if err := addNestedKey(allMap, k, v); err != nil { 267 errs[k] = err 268 } 269 } 270 271 // Add flat envMap as a Map to allMap so users can access any key via 272 // HCL2's indexing syntax: ${env["foo...bar"]} 273 allMap["env"] = cty.MapVal(envMap) 274 275 // Add meta and attr to node if they exist to properly namespace things 276 // a bit. 277 nodeMapI, ok := allMap["node"] 278 if !ok { 279 return nil, nil, fmt.Errorf("missing node variable") 280 } 281 nodeMap, ok := nodeMapI.(map[string]interface{}) 282 if !ok { 283 return nil, nil, fmt.Errorf("invalid type for node variable: %T", nodeMapI) 284 } 285 if attrMap, ok := allMap["attr"]; ok { 286 nodeMap["attr"] = attrMap 287 } 288 if metaMap, ok := allMap["meta"]; ok { 289 nodeMap["meta"] = metaMap 290 } 291 292 // ctyify the entire tree of strings and maps 293 tree, err := ctyify(allMap) 294 if err != nil { 295 // This should not be possible and is likely a programming 296 // error. Invalid user input should be cleaned earlier. 297 return nil, nil, err 298 } 299 300 return tree, errs, nil 301 } 302 303 // ParseAndReplace takes the user supplied args replaces any instance of an 304 // environment variable or Nomad variable in the args with the actual value. 305 func (t *TaskEnv) ParseAndReplace(args []string) []string { 306 if args == nil { 307 return nil 308 } 309 310 replaced := make([]string, len(args)) 311 for i, arg := range args { 312 replaced[i] = hargs.ReplaceEnv(arg, t.EnvMap, t.NodeAttrs) 313 } 314 315 return replaced 316 } 317 318 // ReplaceEnv takes an arg and replaces all occurrences of environment variables 319 // and Nomad variables. If the variable is found in the passed map it is 320 // replaced, otherwise the original string is returned. 321 func (t *TaskEnv) ReplaceEnv(arg string) string { 322 return hargs.ReplaceEnv(arg, t.EnvMap, t.NodeAttrs) 323 } 324 325 // replaceEnvClient takes an arg and replaces all occurrences of client-specific 326 // environment variables and Nomad variables. If the variable is found in the 327 // passed map it is replaced, otherwise the original string is returned. 328 // The difference from ReplaceEnv client is potentially different values for 329 // the following variables: 330 // * NOMAD_ALLOC_DIR 331 // * NOMAD_TASK_DIR 332 // * NOMAD_SECRETS_DIR 333 // and anything that was interpolated using them. 334 // 335 // See https://github.com/hashicorp/nomad/pull/9671 336 func (t *TaskEnv) replaceEnvClient(arg string) string { 337 return hargs.ReplaceEnv(arg, t.EnvMapClient, t.NodeAttrs) 338 } 339 340 // checkEscape returns true if the absolute path testPath escapes both the 341 // task directory and shared allocation directory specified in the 342 // directory path fields of this TaskEnv 343 func (t *TaskEnv) checkEscape(testPath string) bool { 344 for _, p := range []string{t.clientTaskDir, t.clientSharedAllocDir} { 345 if p != "" && !escapingfs.PathEscapesSandbox(p, testPath) { 346 return false 347 } 348 } 349 return true 350 } 351 352 // ClientPath interpolates the argument as a path, using the 353 // environment variables with client-relative directories. The 354 // result is an absolute path on the client filesystem. 355 // 356 // If the interpolated result is a relative path, it is made absolute 357 // If joinEscape, an interpolated path that escapes will be joined with the 358 // task dir. 359 // The result is checked to see whether it (still) escapes both the task working 360 // directory and the shared allocation directory. 361 func (t *TaskEnv) ClientPath(rawPath string, joinEscape bool) (string, bool) { 362 path := t.replaceEnvClient(rawPath) 363 if !filepath.IsAbs(path) || (t.checkEscape(path) && joinEscape) { 364 path = filepath.Join(t.clientTaskDir, path) 365 } 366 path = filepath.Clean(path) 367 escapes := t.checkEscape(path) 368 return path, escapes 369 } 370 371 // Builder is used to build task environment's and is safe for concurrent use. 372 type Builder struct { 373 // envvars are custom set environment variables 374 envvars map[string]string 375 376 // templateEnv are env vars set from templates 377 templateEnv map[string]string 378 379 // hostEnv are environment variables filtered from the host 380 hostEnv map[string]string 381 382 // nodeAttrs are Node attributes and metadata 383 nodeAttrs map[string]string 384 385 // taskMeta are the meta attributes on the task 386 taskMeta map[string]string 387 388 // allocDir from task's perspective; eg /alloc 389 allocDir string 390 391 // localDir from task's perspective; eg /local 392 localDir string 393 394 // secretsDir from task's perspective; eg /secrets 395 secretsDir string 396 397 // clientSharedAllocDir is the shared alloc dir from the client's perspective; eg, <alloc_dir>/<alloc_id>/alloc 398 clientSharedAllocDir string 399 400 // clientTaskRoot is the task working directory from the client's perspective; eg <alloc_dir>/<alloc_id>/<task> 401 clientTaskRoot string 402 403 // clientTaskLocalDir is the local dir from the client's perspective; eg <client_task_root>/local 404 clientTaskLocalDir string 405 406 // clientTaskSecretsDir is the secrets dir from the client's perspective; eg <client_task_root>/secrets 407 clientTaskSecretsDir string 408 409 cpuCores string 410 cpuLimit int64 411 memLimit int64 412 memMaxLimit int64 413 taskName string 414 allocIndex int 415 datacenter string 416 cgroupParent string 417 namespace string 418 region string 419 allocId string 420 allocName string 421 groupName string 422 vaultToken string 423 vaultNamespace string 424 injectVaultToken bool 425 jobID string 426 jobName string 427 jobParentID string 428 429 // otherPorts for tasks in the same alloc 430 otherPorts map[string]string 431 432 // driverNetwork is the network defined by the driver (or nil if none 433 // was defined). 434 driverNetwork *drivers.DriverNetwork 435 436 // network resources from the task; must be lazily turned into env vars 437 // because portMaps and advertiseIP can change after builder creation 438 // and affect network env vars. 439 networks []*structs.NetworkResource 440 441 // hookEnvs are env vars set by hooks and stored by hook name to 442 // support adding/removing vars from multiple hooks (eg HookA adds A:1, 443 // HookB adds A:2, HookA removes A, A should equal 2) 444 hookEnvs map[string]map[string]string 445 446 // hookNames is a slice of hooks in hookEnvs to apply hookEnvs in the 447 // order the hooks are run. 448 hookNames []string 449 450 // deviceHookName is the device hook name. It is set only if device hooks 451 // are set. While a bit round about, this enables us to return device hook 452 // environment variables without having to hardcode the name of the hook. 453 deviceHookName string 454 455 // upstreams from the group connect enabled services 456 upstreams []structs.ConsulUpstream 457 458 mu *sync.RWMutex 459 } 460 461 // NewBuilder creates a new task environment builder. 462 func NewBuilder(node *structs.Node, alloc *structs.Allocation, task *structs.Task, region string) *Builder { 463 b := NewEmptyBuilder() 464 b.region = region 465 return b.setTask(task).setAlloc(alloc).setNode(node) 466 } 467 468 // NewEmptyBuilder creates a new environment builder. 469 func NewEmptyBuilder() *Builder { 470 return &Builder{ 471 mu: &sync.RWMutex{}, 472 hookEnvs: map[string]map[string]string{}, 473 envvars: make(map[string]string), 474 } 475 } 476 477 // buildEnv returns the environment variables and device environment 478 // variables with respect to the task directories passed in the arguments. 479 func (b *Builder) buildEnv(allocDir, localDir, secretsDir string, 480 nodeAttrs map[string]string) (map[string]string, map[string]string) { 481 482 envMap := make(map[string]string) 483 var deviceEnvs map[string]string 484 485 // Add the directories 486 if allocDir != "" { 487 envMap[AllocDir] = allocDir 488 } 489 if localDir != "" { 490 envMap[TaskLocalDir] = localDir 491 } 492 if secretsDir != "" { 493 envMap[SecretsDir] = secretsDir 494 } 495 496 // Add the resource limits 497 if b.memLimit != 0 { 498 envMap[MemLimit] = strconv.FormatInt(b.memLimit, 10) 499 } 500 if b.memMaxLimit != 0 { 501 envMap[MemMaxLimit] = strconv.FormatInt(b.memMaxLimit, 10) 502 } 503 if b.cpuLimit != 0 { 504 envMap[CpuLimit] = strconv.FormatInt(b.cpuLimit, 10) 505 } 506 if b.cpuCores != "" { 507 envMap[CpuCores] = b.cpuCores 508 } 509 510 // Add the task metadata 511 if b.allocId != "" { 512 envMap[AllocID] = b.allocId 513 envMap[ShortAllocID] = b.allocId[:8] 514 } 515 if b.allocName != "" { 516 envMap[AllocName] = b.allocName 517 } 518 if b.groupName != "" { 519 envMap[GroupName] = b.groupName 520 } 521 if b.allocIndex != -1 { 522 envMap[AllocIndex] = strconv.Itoa(b.allocIndex) 523 } 524 if b.taskName != "" { 525 envMap[TaskName] = b.taskName 526 } 527 if b.jobID != "" { 528 envMap[JobID] = b.jobID 529 } 530 if b.jobName != "" { 531 envMap[JobName] = b.jobName 532 } 533 if b.jobParentID != "" { 534 envMap[JobParentID] = b.jobParentID 535 } 536 if b.datacenter != "" { 537 envMap[Datacenter] = b.datacenter 538 } 539 if b.cgroupParent != "" { 540 envMap[CgroupParent] = b.cgroupParent 541 } 542 if b.namespace != "" { 543 envMap[Namespace] = b.namespace 544 } 545 if b.region != "" { 546 envMap[Region] = b.region 547 } 548 549 // Build the network related env vars 550 buildNetworkEnv(envMap, b.networks, b.driverNetwork) 551 552 // Build the addr of the other tasks 553 for k, v := range b.otherPorts { 554 envMap[k] = v 555 } 556 557 // Build the Consul Connect upstream env vars 558 buildUpstreamsEnv(envMap, b.upstreams) 559 560 // Build the Vault Token 561 if b.injectVaultToken && b.vaultToken != "" { 562 envMap[VaultToken] = b.vaultToken 563 } 564 565 // Build the Vault Namespace 566 if b.injectVaultToken && b.vaultNamespace != "" { 567 envMap[VaultNamespace] = b.vaultNamespace 568 } 569 570 // Copy and interpolate task meta 571 for k, v := range b.taskMeta { 572 envMap[hargs.ReplaceEnv(k, nodeAttrs, envMap)] = hargs.ReplaceEnv(v, nodeAttrs, envMap) 573 } 574 575 // Interpolate and add environment variables from the host. Only do this if 576 // the variable is not present in the map; we do not want to override task 577 // variables in favour of the same variable found within the host OS env 578 // vars. 579 for k, v := range b.hostEnv { 580 if _, ok := envMap[k]; !ok { 581 envMap[k] = hargs.ReplaceEnv(v, nodeAttrs, envMap) 582 } 583 } 584 585 // Copy interpolated task env vars second as they override host env vars 586 for k, v := range b.envvars { 587 envMap[k] = hargs.ReplaceEnv(v, nodeAttrs, envMap) 588 } 589 590 // Copy hook env vars in the order the hooks were run 591 for _, h := range b.hookNames { 592 for k, v := range b.hookEnvs[h] { 593 e := hargs.ReplaceEnv(v, nodeAttrs, envMap) 594 envMap[k] = e 595 596 if h == b.deviceHookName { 597 if deviceEnvs == nil { 598 deviceEnvs = make(map[string]string, len(b.hookEnvs[h])) 599 } 600 601 deviceEnvs[k] = e 602 } 603 } 604 } 605 606 // Copy template env vars as they override task env vars 607 for k, v := range b.templateEnv { 608 envMap[k] = v 609 } 610 611 // Clean keys (see #2405) 612 prefixesToClean := [...]string{AddrPrefix, IpPrefix, PortPrefix, HostPortPrefix, MetaPrefix} 613 cleanedEnv := make(map[string]string, len(envMap)) 614 for k, v := range envMap { 615 cleanedK := k 616 for i := range prefixesToClean { 617 if strings.HasPrefix(k, prefixesToClean[i]) { 618 cleanedK = helper.CleanEnvVar(k, '_') 619 } 620 } 621 cleanedEnv[cleanedK] = v 622 } 623 624 return cleanedEnv, deviceEnvs 625 } 626 627 // Build must be called after all the tasks environment values have been set. 628 func (b *Builder) Build() *TaskEnv { 629 nodeAttrs := make(map[string]string) 630 631 b.mu.RLock() 632 defer b.mu.RUnlock() 633 634 if b.region != "" { 635 // Copy region over to node attrs 636 nodeAttrs[nodeRegionKey] = b.region 637 } 638 // Copy node attributes 639 for k, v := range b.nodeAttrs { 640 nodeAttrs[k] = v 641 } 642 643 envMap, deviceEnvs := b.buildEnv(b.allocDir, b.localDir, b.secretsDir, nodeAttrs) 644 envMapClient, _ := b.buildEnv(b.clientSharedAllocDir, b.clientTaskLocalDir, b.clientTaskSecretsDir, nodeAttrs) 645 646 return NewTaskEnv(envMap, envMapClient, deviceEnvs, nodeAttrs, b.clientTaskRoot, b.clientSharedAllocDir) 647 } 648 649 // UpdateTask updates the environment based on a new alloc and task. 650 func (b *Builder) UpdateTask(alloc *structs.Allocation, task *structs.Task) *Builder { 651 b.mu.Lock() 652 defer b.mu.Unlock() 653 return b.setTask(task).setAlloc(alloc) 654 } 655 656 // SetHookEnv sets environment variables from a hook. Variables are 657 // Last-Write-Wins, so if a hook writes a variable that's also written by a 658 // later hook, the later hooks value always gets used. 659 func (b *Builder) SetHookEnv(hook string, envs map[string]string) *Builder { 660 b.mu.Lock() 661 defer b.mu.Unlock() 662 return b.setHookEnvLocked(hook, envs) 663 } 664 665 // setHookEnvLocked is the implementation of setting hook environment variables 666 // and should be called with the lock held 667 func (b *Builder) setHookEnvLocked(hook string, envs map[string]string) *Builder { 668 if _, exists := b.hookEnvs[hook]; !exists { 669 b.hookNames = append(b.hookNames, hook) 670 } 671 b.hookEnvs[hook] = envs 672 673 return b 674 } 675 676 // SetDeviceHookEnv sets environment variables from a device hook. Variables are 677 // Last-Write-Wins, so if a hook writes a variable that's also written by a 678 // later hook, the later hooks value always gets used. 679 func (b *Builder) SetDeviceHookEnv(hookName string, envs map[string]string) *Builder { 680 b.mu.Lock() 681 defer b.mu.Unlock() 682 683 // Store the device hook name 684 b.deviceHookName = hookName 685 return b.setHookEnvLocked(hookName, envs) 686 } 687 688 // setTask is called from NewBuilder to populate task related environment 689 // variables. 690 func (b *Builder) setTask(task *structs.Task) *Builder { 691 if task == nil { 692 return b 693 } 694 b.taskName = task.Name 695 b.envvars = make(map[string]string, len(task.Env)) 696 for k, v := range task.Env { 697 b.envvars[k] = v 698 } 699 700 // COMPAT(0.11): Remove in 0.11 701 if task.Resources == nil { 702 b.memLimit = 0 703 b.memMaxLimit = 0 704 b.cpuLimit = 0 705 } else { 706 b.memLimit = int64(task.Resources.MemoryMB) 707 b.memMaxLimit = int64(task.Resources.MemoryMaxMB) 708 b.cpuLimit = int64(task.Resources.CPU) 709 } 710 return b 711 } 712 713 // setAlloc is called from NewBuilder to populate alloc related environment 714 // variables. 715 func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder { 716 b.allocId = alloc.ID 717 b.allocName = alloc.Name 718 b.groupName = alloc.TaskGroup 719 b.allocIndex = int(alloc.Index()) 720 b.jobID = alloc.Job.ID 721 b.jobName = alloc.Job.Name 722 b.jobParentID = alloc.Job.ParentID 723 b.namespace = alloc.Namespace 724 725 // Set meta 726 combined := alloc.Job.CombinedTaskMeta(alloc.TaskGroup, b.taskName) 727 // taskMetaSize is double to total meta keys to account for given and upper 728 // cased values 729 taskMetaSize := len(combined) * 2 730 731 // if job is parameterized initialize optional meta to empty strings 732 if alloc.Job.Dispatched { 733 optionalMetaCount := len(alloc.Job.ParameterizedJob.MetaOptional) 734 b.taskMeta = make(map[string]string, taskMetaSize+optionalMetaCount*2) 735 736 for _, k := range alloc.Job.ParameterizedJob.MetaOptional { 737 b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = "" 738 b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = "" 739 } 740 } else { 741 b.taskMeta = make(map[string]string, taskMetaSize) 742 } 743 744 for k, v := range combined { 745 b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = v 746 b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = v 747 } 748 749 tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) 750 751 b.otherPorts = make(map[string]string, len(tg.Tasks)*2) 752 753 // Protect against invalid allocs where AllocatedResources isn't set. 754 // TestClient_AddAllocError explicitly tests for this condition 755 if alloc.AllocatedResources != nil { 756 // Populate task resources 757 if tr, ok := alloc.AllocatedResources.Tasks[b.taskName]; ok { 758 b.cpuLimit = tr.Cpu.CpuShares 759 b.cpuCores = cpuset.New(tr.Cpu.ReservedCores...).String() 760 b.memLimit = tr.Memory.MemoryMB 761 b.memMaxLimit = tr.Memory.MemoryMaxMB 762 763 // Copy networks to prevent sharing 764 b.networks = make([]*structs.NetworkResource, len(tr.Networks)) 765 for i, n := range tr.Networks { 766 b.networks[i] = n.Copy() 767 } 768 } 769 770 // COMPAT(1.0): remove in 1.0 when AllocatedPorts can be used exclusively 771 // Add ports from other tasks 772 for taskName, resources := range alloc.AllocatedResources.Tasks { 773 // Add ports from other tasks 774 if taskName == b.taskName { 775 continue 776 } 777 778 for _, nw := range resources.Networks { 779 for _, p := range nw.ReservedPorts { 780 addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value) 781 } 782 for _, p := range nw.DynamicPorts { 783 addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value) 784 } 785 } 786 } 787 788 // COMPAT(1.0): remove in 1.0 when AllocatedPorts can be used exclusively 789 // Add ports from group networks 790 //TODO Expose IPs but possibly only via variable interpolation 791 for _, nw := range alloc.AllocatedResources.Shared.Networks { 792 for _, p := range nw.ReservedPorts { 793 addGroupPort(b.otherPorts, p) 794 } 795 for _, p := range nw.DynamicPorts { 796 addGroupPort(b.otherPorts, p) 797 } 798 } 799 800 // Add any allocated host ports 801 if alloc.AllocatedResources.Shared.Ports != nil { 802 addPorts(b.otherPorts, alloc.AllocatedResources.Shared.Ports) 803 } 804 } 805 806 var upstreams []structs.ConsulUpstream 807 for _, svc := range tg.Services { 808 if svc.Connect.HasSidecar() && svc.Connect.SidecarService.HasUpstreams() { 809 upstreams = append(upstreams, svc.Connect.SidecarService.Proxy.Upstreams...) 810 } 811 } 812 if len(upstreams) > 0 { 813 b.setUpstreamsLocked(upstreams) 814 } 815 816 return b 817 } 818 819 // setNode is called from NewBuilder to populate node attributes. 820 func (b *Builder) setNode(n *structs.Node) *Builder { 821 b.nodeAttrs = make(map[string]string, 4+len(n.Attributes)+len(n.Meta)) 822 b.nodeAttrs[nodeIdKey] = n.ID 823 b.nodeAttrs[nodeNameKey] = n.Name 824 b.nodeAttrs[nodeClassKey] = n.NodeClass 825 b.nodeAttrs[nodeDcKey] = n.Datacenter 826 b.datacenter = n.Datacenter 827 b.cgroupParent = n.CgroupParent 828 829 // Set up the attributes. 830 for k, v := range n.Attributes { 831 b.nodeAttrs[fmt.Sprintf("%s%s", nodeAttributePrefix, k)] = v 832 } 833 834 // Set up the meta. 835 for k, v := range n.Meta { 836 b.nodeAttrs[fmt.Sprintf("%s%s", nodeMetaPrefix, k)] = v 837 } 838 return b 839 } 840 841 func (b *Builder) SetAllocDir(dir string) *Builder { 842 b.mu.Lock() 843 b.allocDir = dir 844 b.mu.Unlock() 845 return b 846 } 847 848 func (b *Builder) SetTaskLocalDir(dir string) *Builder { 849 b.mu.Lock() 850 b.localDir = dir 851 b.mu.Unlock() 852 return b 853 } 854 855 func (b *Builder) SetClientSharedAllocDir(dir string) *Builder { 856 b.mu.Lock() 857 b.clientSharedAllocDir = dir 858 b.mu.Unlock() 859 return b 860 } 861 862 func (b *Builder) SetClientTaskRoot(dir string) *Builder { 863 b.mu.Lock() 864 b.clientTaskRoot = dir 865 b.mu.Unlock() 866 return b 867 } 868 869 func (b *Builder) SetClientTaskLocalDir(dir string) *Builder { 870 b.mu.Lock() 871 b.clientTaskLocalDir = dir 872 b.mu.Unlock() 873 return b 874 } 875 876 func (b *Builder) SetClientTaskSecretsDir(dir string) *Builder { 877 b.mu.Lock() 878 b.clientTaskSecretsDir = dir 879 b.mu.Unlock() 880 return b 881 } 882 883 func (b *Builder) SetSecretsDir(dir string) *Builder { 884 b.mu.Lock() 885 b.secretsDir = dir 886 b.mu.Unlock() 887 return b 888 } 889 890 // SetDriverNetwork defined by the driver. 891 func (b *Builder) SetDriverNetwork(n *drivers.DriverNetwork) *Builder { 892 ncopy := n.Copy() 893 b.mu.Lock() 894 b.driverNetwork = ncopy 895 b.mu.Unlock() 896 return b 897 } 898 899 // buildNetworkEnv env vars in the given map. 900 // 901 // Auto: NOMAD_PORT_<label> 902 // Host: NOMAD_IP_<label>, NOMAD_ADDR_<label>, NOMAD_HOST_PORT_<label> 903 // 904 // Handled by setAlloc -> otherPorts: 905 // 906 // Task: NOMAD_TASK_{IP,PORT,ADDR}_<task>_<label> # Always host values 907 func buildNetworkEnv(envMap map[string]string, nets structs.Networks, driverNet *drivers.DriverNetwork) { 908 for _, n := range nets { 909 for _, p := range n.ReservedPorts { 910 buildPortEnv(envMap, p, n.IP, driverNet) 911 } 912 for _, p := range n.DynamicPorts { 913 buildPortEnv(envMap, p, n.IP, driverNet) 914 } 915 } 916 } 917 918 func buildPortEnv(envMap map[string]string, p structs.Port, ip string, driverNet *drivers.DriverNetwork) { 919 // Host IP, port, and address 920 portStr := strconv.Itoa(p.Value) 921 envMap[IpPrefix+p.Label] = ip 922 envMap[HostPortPrefix+p.Label] = portStr 923 envMap[AddrPrefix+p.Label] = net.JoinHostPort(ip, portStr) 924 925 // Set Port to task's value if there's a port map 926 if driverNet != nil && driverNet.PortMap[p.Label] != 0 { 927 envMap[PortPrefix+p.Label] = strconv.Itoa(driverNet.PortMap[p.Label]) 928 } else { 929 // Default to host's 930 envMap[PortPrefix+p.Label] = portStr 931 } 932 } 933 934 // SetUpstreams defined by connect enabled group services 935 func (b *Builder) SetUpstreams(upstreams []structs.ConsulUpstream) *Builder { 936 b.mu.Lock() 937 defer b.mu.Unlock() 938 return b.setUpstreamsLocked(upstreams) 939 } 940 941 func (b *Builder) setUpstreamsLocked(upstreams []structs.ConsulUpstream) *Builder { 942 b.upstreams = upstreams 943 return b 944 } 945 946 // buildUpstreamsEnv builds NOMAD_UPSTREAM_{IP,PORT,ADDR}_{destination} vars 947 func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstream) { 948 // Proxy sidecars always bind to localhost 949 const ip = "127.0.0.1" 950 for _, u := range upstreams { 951 port := strconv.Itoa(u.LocalBindPort) 952 envMap[UpstreamPrefix+"IP_"+u.DestinationName] = ip 953 envMap[UpstreamPrefix+"PORT_"+u.DestinationName] = port 954 envMap[UpstreamPrefix+"ADDR_"+u.DestinationName] = net.JoinHostPort(ip, port) 955 956 // Also add cleaned version 957 cleanName := helper.CleanEnvVar(u.DestinationName, '_') 958 envMap[UpstreamPrefix+"ADDR_"+cleanName] = net.JoinHostPort(ip, port) 959 envMap[UpstreamPrefix+"IP_"+cleanName] = ip 960 envMap[UpstreamPrefix+"PORT_"+cleanName] = port 961 } 962 } 963 964 // SetPortMapEnvs sets the PortMap related environment variables on the map 965 func SetPortMapEnvs(envs map[string]string, ports map[string]int) map[string]string { 966 if envs == nil { 967 envs = map[string]string{} 968 } 969 970 for portLabel, port := range ports { 971 portEnv := helper.CleanEnvVar(PortPrefix+portLabel, '_') 972 envs[portEnv] = strconv.Itoa(port) 973 } 974 return envs 975 } 976 977 // SetHostEnvvars adds the host environment variables to the tasks. The filter 978 // parameter can be use to filter host environment from entering the tasks. 979 func (b *Builder) SetHostEnvvars(filter []string) *Builder { 980 filterMap := make(map[string]struct{}, len(filter)) 981 for _, f := range filter { 982 filterMap[f] = struct{}{} 983 } 984 985 fullHostEnv := os.Environ() 986 filteredHostEnv := make(map[string]string, len(fullHostEnv)) 987 for _, e := range fullHostEnv { 988 parts := strings.SplitN(e, "=", 2) 989 key, value := parts[0], parts[1] 990 991 // Skip filtered environment variables 992 if _, filtered := filterMap[key]; filtered { 993 continue 994 } 995 996 filteredHostEnv[key] = value 997 } 998 999 b.mu.Lock() 1000 b.hostEnv = filteredHostEnv 1001 b.mu.Unlock() 1002 return b 1003 } 1004 1005 func (b *Builder) SetTemplateEnv(m map[string]string) *Builder { 1006 b.mu.Lock() 1007 b.templateEnv = m 1008 b.mu.Unlock() 1009 return b 1010 } 1011 1012 func (b *Builder) SetVaultToken(token, namespace string, inject bool) *Builder { 1013 b.mu.Lock() 1014 b.vaultToken = token 1015 b.vaultNamespace = namespace 1016 b.injectVaultToken = inject 1017 b.mu.Unlock() 1018 return b 1019 } 1020 1021 // addPort keys and values for other tasks to an env var map 1022 func addPort(m map[string]string, taskName, ip, portLabel string, port int) { 1023 key := fmt.Sprintf("%s%s_%s", AddrPrefix, taskName, portLabel) 1024 m[key] = fmt.Sprintf("%s:%d", ip, port) 1025 key = fmt.Sprintf("%s%s_%s", IpPrefix, taskName, portLabel) 1026 m[key] = ip 1027 key = fmt.Sprintf("%s%s_%s", PortPrefix, taskName, portLabel) 1028 m[key] = strconv.Itoa(port) 1029 } 1030 1031 // addGroupPort adds a group network port. The To value is used if one is 1032 // specified. 1033 func addGroupPort(m map[string]string, port structs.Port) { 1034 if port.To > 0 { 1035 m[PortPrefix+port.Label] = strconv.Itoa(port.To) 1036 } else { 1037 m[PortPrefix+port.Label] = strconv.Itoa(port.Value) 1038 } 1039 1040 m[HostPortPrefix+port.Label] = strconv.Itoa(port.Value) 1041 } 1042 1043 func addPorts(m map[string]string, ports structs.AllocatedPorts) { 1044 for _, p := range ports { 1045 m[AddrPrefix+p.Label] = fmt.Sprintf("%s:%d", p.HostIP, p.Value) 1046 m[HostAddrPrefix+p.Label] = fmt.Sprintf("%s:%d", p.HostIP, p.Value) 1047 m[IpPrefix+p.Label] = p.HostIP 1048 m[HostIpPrefix+p.Label] = p.HostIP 1049 if p.To > 0 { 1050 val := strconv.Itoa(p.To) 1051 m[PortPrefix+p.Label] = val 1052 m[AllocPortPrefix+p.Label] = val 1053 } else { 1054 val := strconv.Itoa(p.Value) 1055 m[PortPrefix+p.Label] = val 1056 m[AllocPortPrefix+p.Label] = val 1057 } 1058 1059 m[HostPortPrefix+p.Label] = strconv.Itoa(p.Value) 1060 } 1061 }