github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/client/taskenv/env.go (about)

     1  package taskenv
     2  
     3  import (
     4  	"fmt"
     5  	"net"
     6  	"os"
     7  	"path/filepath"
     8  	"strconv"
     9  	"strings"
    10  	"sync"
    11  
    12  	"github.com/hashicorp/nomad/helper"
    13  	hargs "github.com/hashicorp/nomad/helper/args"
    14  	"github.com/hashicorp/nomad/nomad/structs"
    15  	"github.com/hashicorp/nomad/plugins/drivers"
    16  	"github.com/zclconf/go-cty/cty"
    17  )
    18  
    19  // A set of environment variables that are exported by each driver.
    20  const (
    21  	// AllocDir is the environment variable with the path to the alloc directory
    22  	// that is shared across tasks within a task group.
    23  	AllocDir = "NOMAD_ALLOC_DIR"
    24  
    25  	// TaskLocalDir is the environment variable with the path to the tasks local
    26  	// directory where it can store data that is persisted to the alloc is
    27  	// removed.
    28  	TaskLocalDir = "NOMAD_TASK_DIR"
    29  
    30  	// SecretsDir is the environment variable with the path to the tasks secret
    31  	// directory where it can store sensitive data.
    32  	SecretsDir = "NOMAD_SECRETS_DIR"
    33  
    34  	// MemLimit is the environment variable with the tasks memory limit in MBs.
    35  	MemLimit = "NOMAD_MEMORY_LIMIT"
    36  
    37  	// CpuLimit is the environment variable with the tasks CPU limit in MHz.
    38  	CpuLimit = "NOMAD_CPU_LIMIT"
    39  
    40  	// AllocID is the environment variable for passing the allocation ID.
    41  	AllocID = "NOMAD_ALLOC_ID"
    42  
    43  	// AllocName is the environment variable for passing the allocation name.
    44  	AllocName = "NOMAD_ALLOC_NAME"
    45  
    46  	// TaskName is the environment variable for passing the task name.
    47  	TaskName = "NOMAD_TASK_NAME"
    48  
    49  	// GroupName is the environment variable for passing the task group name.
    50  	GroupName = "NOMAD_GROUP_NAME"
    51  
    52  	// JobID is the environment variable for passing the job ID.
    53  	JobID = "NOMAD_JOB_ID"
    54  
    55  	// JobName is the environment variable for passing the job name.
    56  	JobName = "NOMAD_JOB_NAME"
    57  
    58  	// JobParentID is the environment variable for passing the ID of the parnt of the job
    59  	JobParentID = "NOMAD_JOB_PARENT_ID"
    60  
    61  	// AllocIndex is the environment variable for passing the allocation index.
    62  	AllocIndex = "NOMAD_ALLOC_INDEX"
    63  
    64  	// Datacenter is the environment variable for passing the datacenter in which the alloc is running.
    65  	Datacenter = "NOMAD_DC"
    66  
    67  	// Namespace is the environment variable for passing the namespace in which the alloc is running.
    68  	Namespace = "NOMAD_NAMESPACE"
    69  
    70  	// Region is the environment variable for passing the region in which the alloc is running.
    71  	Region = "NOMAD_REGION"
    72  
    73  	// AddrPrefix is the prefix for passing both dynamic and static port
    74  	// allocations to tasks.
    75  	// E.g $NOMAD_ADDR_http=127.0.0.1:80
    76  	//
    77  	// The ip:port are always the host's.
    78  	AddrPrefix = "NOMAD_ADDR_"
    79  
    80  	HostAddrPrefix = "NOMAD_HOST_ADDR_"
    81  
    82  	// IpPrefix is the prefix for passing the host IP of a port allocation
    83  	// to a task.
    84  	IpPrefix = "NOMAD_IP_"
    85  
    86  	HostIpPrefix = "NOMAD_HOST_IP_"
    87  
    88  	// PortPrefix is the prefix for passing the port allocation to a task.
    89  	// It will be the task's port if a port map is specified. Task's should
    90  	// bind to this port.
    91  	PortPrefix = "NOMAD_PORT_"
    92  
    93  	AllocPortPrefix = "NOMAD_ALLOC_PORT_"
    94  
    95  	// HostPortPrefix is the prefix for passing the host port when a port
    96  	// map is specified.
    97  	HostPortPrefix = "NOMAD_HOST_PORT_"
    98  
    99  	// MetaPrefix is the prefix for passing task meta data.
   100  	MetaPrefix = "NOMAD_META_"
   101  
   102  	// UpstreamPrefix is the prefix for passing upstream IP and ports to the alloc
   103  	UpstreamPrefix = "NOMAD_UPSTREAM_"
   104  
   105  	// VaultToken is the environment variable for passing the Vault token
   106  	VaultToken = "VAULT_TOKEN"
   107  
   108  	// VaultNamespace is the environment variable for passing the Vault namespace, if applicable
   109  	VaultNamespace = "VAULT_NAMESPACE"
   110  )
   111  
   112  // The node values that can be interpreted.
   113  const (
   114  	nodeIdKey     = "node.unique.id"
   115  	nodeDcKey     = "node.datacenter"
   116  	nodeRegionKey = "node.region"
   117  	nodeNameKey   = "node.unique.name"
   118  	nodeClassKey  = "node.class"
   119  
   120  	// Prefixes used for lookups.
   121  	nodeAttributePrefix = "attr."
   122  	nodeMetaPrefix      = "meta."
   123  )
   124  
   125  // TaskEnv is a task's environment as well as node attribute's for
   126  // interpolation.
   127  type TaskEnv struct {
   128  	// NodeAttrs is the map of node attributes for interpolation
   129  	NodeAttrs map[string]string
   130  
   131  	// EnvMap is the map of environment variables
   132  	EnvMap map[string]string
   133  
   134  	// deviceEnv is the environment variables populated from the device hooks.
   135  	deviceEnv map[string]string
   136  
   137  	// envList is a memoized list created by List()
   138  	envList []string
   139  
   140  	// EnvMap is the map of environment variables with client-specific
   141  	// task directories
   142  	// See https://github.com/hashicorp/nomad/pull/9671
   143  	EnvMapClient map[string]string
   144  
   145  	// clientTaskDir is the absolute path to the task root directory on the host
   146  	// <alloc_dir>/<task>
   147  	clientTaskDir string
   148  
   149  	// clientSharedAllocDir is the path to shared alloc directory on the host
   150  	// <alloc_dir>/alloc/
   151  	clientSharedAllocDir string
   152  }
   153  
   154  // NewTaskEnv creates a new task environment with the given environment, device
   155  // environment and node attribute maps.
   156  func NewTaskEnv(env, envClient, deviceEnv, node map[string]string, clientTaskDir, clientAllocDir string) *TaskEnv {
   157  	return &TaskEnv{
   158  		NodeAttrs:            node,
   159  		deviceEnv:            deviceEnv,
   160  		EnvMap:               env,
   161  		EnvMapClient:         envClient,
   162  		clientTaskDir:        clientTaskDir,
   163  		clientSharedAllocDir: clientAllocDir,
   164  	}
   165  }
   166  
   167  // NewEmptyTaskEnv creates a new empty task environment.
   168  func NewEmptyTaskEnv() *TaskEnv {
   169  	return &TaskEnv{
   170  		NodeAttrs: make(map[string]string),
   171  		deviceEnv: make(map[string]string),
   172  		EnvMap:    make(map[string]string),
   173  	}
   174  }
   175  
   176  // List returns the task's environment as a slice of NAME=value pair strings.
   177  func (t *TaskEnv) List() []string {
   178  	if t.envList != nil {
   179  		return t.envList
   180  	}
   181  
   182  	env := []string{}
   183  	for k, v := range t.EnvMap {
   184  		env = append(env, fmt.Sprintf("%s=%s", k, v))
   185  	}
   186  
   187  	return env
   188  }
   189  
   190  // DeviceEnv returns the task's environment variables set by device hooks.
   191  func (t *TaskEnv) DeviceEnv() map[string]string {
   192  	m := make(map[string]string, len(t.deviceEnv))
   193  	for k, v := range t.deviceEnv {
   194  		m[k] = v
   195  	}
   196  
   197  	return m
   198  }
   199  
   200  // Map of the task's environment variables.
   201  func (t *TaskEnv) Map() map[string]string {
   202  	m := make(map[string]string, len(t.EnvMap))
   203  	for k, v := range t.EnvMap {
   204  		m[k] = v
   205  	}
   206  
   207  	return m
   208  }
   209  
   210  // All of the task's environment variables and the node's attributes in a
   211  // single map.
   212  func (t *TaskEnv) All() map[string]string {
   213  	m := make(map[string]string, len(t.EnvMap)+len(t.NodeAttrs))
   214  	for k, v := range t.EnvMap {
   215  		m[k] = v
   216  	}
   217  	for k, v := range t.NodeAttrs {
   218  		m[k] = v
   219  	}
   220  
   221  	return m
   222  }
   223  
   224  // AllValues is a map of the task's environment variables and the node's
   225  // attributes with cty.Value (String) values. Errors including keys are
   226  // returned in a map by key name.
   227  //
   228  // In the rare case of a fatal error, only an error value is returned. This is
   229  // likely a programming error as user input should not be able to cause a fatal
   230  // error.
   231  func (t *TaskEnv) AllValues() (map[string]cty.Value, map[string]error, error) {
   232  	errs := make(map[string]error)
   233  
   234  	// Intermediate map for building up nested go types
   235  	allMap := make(map[string]interface{}, len(t.EnvMap)+len(t.NodeAttrs))
   236  
   237  	// Intermediate map for all env vars including those whose keys that
   238  	// cannot be nested (eg foo...bar)
   239  	envMap := make(map[string]cty.Value, len(t.EnvMap))
   240  
   241  	// Prepare job-based variables (eg job.meta, job.group.task.env, etc)
   242  	for k, v := range t.EnvMap {
   243  		if err := addNestedKey(allMap, k, v); err != nil {
   244  			errs[k] = err
   245  		}
   246  		envMap[k] = cty.StringVal(v)
   247  	}
   248  
   249  	// Prepare node-based variables (eg node.*, attr.*, meta.*)
   250  	for k, v := range t.NodeAttrs {
   251  		if err := addNestedKey(allMap, k, v); err != nil {
   252  			errs[k] = err
   253  		}
   254  	}
   255  
   256  	// Add flat envMap as a Map to allMap so users can access any key via
   257  	// HCL2's indexing syntax: ${env["foo...bar"]}
   258  	allMap["env"] = cty.MapVal(envMap)
   259  
   260  	// Add meta and attr to node if they exist to properly namespace things
   261  	// a bit.
   262  	nodeMapI, ok := allMap["node"]
   263  	if !ok {
   264  		return nil, nil, fmt.Errorf("missing node variable")
   265  	}
   266  	nodeMap, ok := nodeMapI.(map[string]interface{})
   267  	if !ok {
   268  		return nil, nil, fmt.Errorf("invalid type for node variable: %T", nodeMapI)
   269  	}
   270  	if attrMap, ok := allMap["attr"]; ok {
   271  		nodeMap["attr"] = attrMap
   272  	}
   273  	if metaMap, ok := allMap["meta"]; ok {
   274  		nodeMap["meta"] = metaMap
   275  	}
   276  
   277  	// ctyify the entire tree of strings and maps
   278  	tree, err := ctyify(allMap)
   279  	if err != nil {
   280  		// This should not be possible and is likely a programming
   281  		// error. Invalid user input should be cleaned earlier.
   282  		return nil, nil, err
   283  	}
   284  
   285  	return tree, errs, nil
   286  }
   287  
   288  // ParseAndReplace takes the user supplied args replaces any instance of an
   289  // environment variable or Nomad variable in the args with the actual value.
   290  func (t *TaskEnv) ParseAndReplace(args []string) []string {
   291  	if args == nil {
   292  		return nil
   293  	}
   294  
   295  	replaced := make([]string, len(args))
   296  	for i, arg := range args {
   297  		replaced[i] = hargs.ReplaceEnv(arg, t.EnvMap, t.NodeAttrs)
   298  	}
   299  
   300  	return replaced
   301  }
   302  
   303  // ReplaceEnv takes an arg and replaces all occurrences of environment variables
   304  // and Nomad variables.  If the variable is found in the passed map it is
   305  // replaced, otherwise the original string is returned.
   306  func (t *TaskEnv) ReplaceEnv(arg string) string {
   307  	return hargs.ReplaceEnv(arg, t.EnvMap, t.NodeAttrs)
   308  }
   309  
   310  // replaceEnvClient takes an arg and replaces all occurrences of client-specific
   311  // environment variables and Nomad variables.  If the variable is found in the
   312  // passed map it is replaced, otherwise the original string is returned.
   313  // The difference from ReplaceEnv client is potentially different values for
   314  // the following variables:
   315  // * NOMAD_ALLOC_DIR
   316  // * NOMAD_TASK_DIR
   317  // * NOMAD_SECRETS_DIR
   318  // and anything that was interpolated using them.
   319  //
   320  // See https://github.com/hashicorp/nomad/pull/9671
   321  func (t *TaskEnv) replaceEnvClient(arg string) string {
   322  	return hargs.ReplaceEnv(arg, t.EnvMapClient, t.NodeAttrs)
   323  }
   324  
   325  // checkEscape returns true if the absolute path testPath escapes both the
   326  // task directory and shared allocation directory specified in the
   327  // directory path fields of this TaskEnv
   328  func (t *TaskEnv) checkEscape(testPath string) bool {
   329  	for _, p := range []string{t.clientTaskDir, t.clientSharedAllocDir} {
   330  		if p != "" && !helper.PathEscapesSandbox(p, testPath) {
   331  			return false
   332  		}
   333  	}
   334  	return true
   335  }
   336  
   337  // ClientPath interpolates the argument as a path, using the
   338  // environment variables with client-relative directories. The
   339  // result is an absolute path on the client filesystem.
   340  //
   341  // If the interpolated result is a relative path, it is made absolute
   342  // If joinEscape, an interpolated path that escapes will be joined with the
   343  // task dir.
   344  // The result is checked to see whether it (still) escapes both the task working
   345  // directory and the shared allocation directory.
   346  func (t *TaskEnv) ClientPath(rawPath string, joinEscape bool) (string, bool) {
   347  	path := t.replaceEnvClient(rawPath)
   348  	if !filepath.IsAbs(path) || (t.checkEscape(path) && joinEscape) {
   349  		path = filepath.Join(t.clientTaskDir, path)
   350  	}
   351  	path = filepath.Clean(path)
   352  	escapes := t.checkEscape(path)
   353  	return path, escapes
   354  }
   355  
   356  // Builder is used to build task environment's and is safe for concurrent use.
   357  type Builder struct {
   358  	// envvars are custom set environment variables
   359  	envvars map[string]string
   360  
   361  	// templateEnv are env vars set from templates
   362  	templateEnv map[string]string
   363  
   364  	// hostEnv are environment variables filtered from the host
   365  	hostEnv map[string]string
   366  
   367  	// nodeAttrs are Node attributes and metadata
   368  	nodeAttrs map[string]string
   369  
   370  	// taskMeta are the meta attributes on the task
   371  	taskMeta map[string]string
   372  
   373  	// allocDir from task's perspective; eg /alloc
   374  	allocDir string
   375  
   376  	// localDir from task's perspective; eg /local
   377  	localDir string
   378  
   379  	// secretsDir from task's perspective; eg /secrets
   380  	secretsDir string
   381  
   382  	// clientSharedAllocDir is the shared alloc dir from the client's perspective; eg, <alloc_dir>/<alloc_id>/alloc
   383  	clientSharedAllocDir string
   384  
   385  	// clientTaskRoot is the task working directory from the client's perspective; eg <alloc_dir>/<alloc_id>/<task>
   386  	clientTaskRoot string
   387  
   388  	// clientTaskLocalDir is the local dir from the client's perspective; eg <client_task_root>/local
   389  	clientTaskLocalDir string
   390  
   391  	// clientTaskSecretsDir is the secrets dir from the client's perspective; eg <client_task_root>/secrets
   392  	clientTaskSecretsDir string
   393  
   394  	cpuLimit         int64
   395  	memLimit         int64
   396  	taskName         string
   397  	allocIndex       int
   398  	datacenter       string
   399  	namespace        string
   400  	region           string
   401  	allocId          string
   402  	allocName        string
   403  	groupName        string
   404  	vaultToken       string
   405  	vaultNamespace   string
   406  	injectVaultToken bool
   407  	jobID            string
   408  	jobName          string
   409  	jobParentID      string
   410  
   411  	// otherPorts for tasks in the same alloc
   412  	otherPorts map[string]string
   413  
   414  	// driverNetwork is the network defined by the driver (or nil if none
   415  	// was defined).
   416  	driverNetwork *drivers.DriverNetwork
   417  
   418  	// network resources from the task; must be lazily turned into env vars
   419  	// because portMaps and advertiseIP can change after builder creation
   420  	// and affect network env vars.
   421  	networks []*structs.NetworkResource
   422  
   423  	// hookEnvs are env vars set by hooks and stored by hook name to
   424  	// support adding/removing vars from multiple hooks (eg HookA adds A:1,
   425  	// HookB adds A:2, HookA removes A, A should equal 2)
   426  	hookEnvs map[string]map[string]string
   427  
   428  	// hookNames is a slice of hooks in hookEnvs to apply hookEnvs in the
   429  	// order the hooks are run.
   430  	hookNames []string
   431  
   432  	// deviceHookName is the device hook name. It is set only if device hooks
   433  	// are set. While a bit round about, this enables us to return device hook
   434  	// environment variables without having to hardcode the name of the hook.
   435  	deviceHookName string
   436  
   437  	// upstreams from the group connect enabled services
   438  	upstreams []structs.ConsulUpstream
   439  
   440  	mu *sync.RWMutex
   441  }
   442  
   443  // NewBuilder creates a new task environment builder.
   444  func NewBuilder(node *structs.Node, alloc *structs.Allocation, task *structs.Task, region string) *Builder {
   445  	b := NewEmptyBuilder()
   446  	b.region = region
   447  	return b.setTask(task).setAlloc(alloc).setNode(node)
   448  }
   449  
   450  // NewEmptyBuilder creates a new environment builder.
   451  func NewEmptyBuilder() *Builder {
   452  	return &Builder{
   453  		mu:       &sync.RWMutex{},
   454  		hookEnvs: map[string]map[string]string{},
   455  		envvars:  make(map[string]string),
   456  	}
   457  }
   458  
   459  // buildEnv returns the environment variables and device environment
   460  // variables with respect to the task directories passed in the arguments.
   461  func (b *Builder) buildEnv(allocDir, localDir, secretsDir string,
   462  	nodeAttrs map[string]string) (map[string]string, map[string]string) {
   463  
   464  	envMap := make(map[string]string)
   465  	var deviceEnvs map[string]string
   466  
   467  	// Add the directories
   468  	if allocDir != "" {
   469  		envMap[AllocDir] = allocDir
   470  	}
   471  	if localDir != "" {
   472  		envMap[TaskLocalDir] = localDir
   473  	}
   474  	if secretsDir != "" {
   475  		envMap[SecretsDir] = secretsDir
   476  	}
   477  
   478  	// Add the resource limits
   479  	if b.memLimit != 0 {
   480  		envMap[MemLimit] = strconv.FormatInt(b.memLimit, 10)
   481  	}
   482  	if b.cpuLimit != 0 {
   483  		envMap[CpuLimit] = strconv.FormatInt(b.cpuLimit, 10)
   484  	}
   485  
   486  	// Add the task metadata
   487  	if b.allocId != "" {
   488  		envMap[AllocID] = b.allocId
   489  	}
   490  	if b.allocName != "" {
   491  		envMap[AllocName] = b.allocName
   492  	}
   493  	if b.groupName != "" {
   494  		envMap[GroupName] = b.groupName
   495  	}
   496  	if b.allocIndex != -1 {
   497  		envMap[AllocIndex] = strconv.Itoa(b.allocIndex)
   498  	}
   499  	if b.taskName != "" {
   500  		envMap[TaskName] = b.taskName
   501  	}
   502  	if b.jobID != "" {
   503  		envMap[JobID] = b.jobID
   504  	}
   505  	if b.jobName != "" {
   506  		envMap[JobName] = b.jobName
   507  	}
   508  	if b.jobParentID != "" {
   509  		envMap[JobParentID] = b.jobParentID
   510  	}
   511  	if b.datacenter != "" {
   512  		envMap[Datacenter] = b.datacenter
   513  	}
   514  	if b.namespace != "" {
   515  		envMap[Namespace] = b.namespace
   516  	}
   517  	if b.region != "" {
   518  		envMap[Region] = b.region
   519  	}
   520  
   521  	// Build the network related env vars
   522  	buildNetworkEnv(envMap, b.networks, b.driverNetwork)
   523  
   524  	// Build the addr of the other tasks
   525  	for k, v := range b.otherPorts {
   526  		envMap[k] = v
   527  	}
   528  
   529  	// Build the Consul Connect upstream env vars
   530  	buildUpstreamsEnv(envMap, b.upstreams)
   531  
   532  	// Build the Vault Token
   533  	if b.injectVaultToken && b.vaultToken != "" {
   534  		envMap[VaultToken] = b.vaultToken
   535  	}
   536  
   537  	// Build the Vault Namespace
   538  	if b.injectVaultToken && b.vaultNamespace != "" {
   539  		envMap[VaultNamespace] = b.vaultNamespace
   540  	}
   541  
   542  	// Copy task meta
   543  	for k, v := range b.taskMeta {
   544  		envMap[k] = v
   545  	}
   546  
   547  	// Interpolate and add environment variables
   548  	for k, v := range b.hostEnv {
   549  		envMap[k] = hargs.ReplaceEnv(v, nodeAttrs, envMap)
   550  	}
   551  
   552  	// Copy interpolated task env vars second as they override host env vars
   553  	for k, v := range b.envvars {
   554  		envMap[k] = hargs.ReplaceEnv(v, nodeAttrs, envMap)
   555  	}
   556  
   557  	// Copy hook env vars in the order the hooks were run
   558  	for _, h := range b.hookNames {
   559  		for k, v := range b.hookEnvs[h] {
   560  			e := hargs.ReplaceEnv(v, nodeAttrs, envMap)
   561  			envMap[k] = e
   562  
   563  			if h == b.deviceHookName {
   564  				if deviceEnvs == nil {
   565  					deviceEnvs = make(map[string]string, len(b.hookEnvs[h]))
   566  				}
   567  
   568  				deviceEnvs[k] = e
   569  			}
   570  		}
   571  	}
   572  
   573  	// Copy template env vars as they override task env vars
   574  	for k, v := range b.templateEnv {
   575  		envMap[k] = v
   576  	}
   577  
   578  	// Clean keys (see #2405)
   579  	prefixesToClean := [...]string{AddrPrefix, IpPrefix, PortPrefix, HostPortPrefix, MetaPrefix}
   580  	cleanedEnv := make(map[string]string, len(envMap))
   581  	for k, v := range envMap {
   582  		cleanedK := k
   583  		for i := range prefixesToClean {
   584  			if strings.HasPrefix(k, prefixesToClean[i]) {
   585  				cleanedK = helper.CleanEnvVar(k, '_')
   586  			}
   587  		}
   588  		cleanedEnv[cleanedK] = v
   589  	}
   590  
   591  	return cleanedEnv, deviceEnvs
   592  }
   593  
   594  // Build must be called after all the tasks environment values have been set.
   595  func (b *Builder) Build() *TaskEnv {
   596  	nodeAttrs := make(map[string]string)
   597  
   598  	b.mu.RLock()
   599  	defer b.mu.RUnlock()
   600  
   601  	if b.region != "" {
   602  		// Copy region over to node attrs
   603  		nodeAttrs[nodeRegionKey] = b.region
   604  	}
   605  	// Copy node attributes
   606  	for k, v := range b.nodeAttrs {
   607  		nodeAttrs[k] = v
   608  	}
   609  
   610  	envMap, deviceEnvs := b.buildEnv(b.allocDir, b.localDir, b.secretsDir, nodeAttrs)
   611  	envMapClient, _ := b.buildEnv(b.clientSharedAllocDir, b.clientTaskLocalDir, b.clientTaskSecretsDir, nodeAttrs)
   612  
   613  	return NewTaskEnv(envMap, envMapClient, deviceEnvs, nodeAttrs, b.clientTaskRoot, b.clientSharedAllocDir)
   614  }
   615  
   616  // Update task updates the environment based on a new alloc and task.
   617  func (b *Builder) UpdateTask(alloc *structs.Allocation, task *structs.Task) *Builder {
   618  	b.mu.Lock()
   619  	defer b.mu.Unlock()
   620  	return b.setTask(task).setAlloc(alloc)
   621  }
   622  
   623  // SetHookEnv sets environment variables from a hook. Variables are
   624  // Last-Write-Wins, so if a hook writes a variable that's also written by a
   625  // later hook, the later hooks value always gets used.
   626  func (b *Builder) SetHookEnv(hook string, envs map[string]string) *Builder {
   627  	b.mu.Lock()
   628  	defer b.mu.Unlock()
   629  	return b.setHookEnvLocked(hook, envs)
   630  }
   631  
   632  // setHookEnvLocked is the implementation of setting hook environment variables
   633  // and should be called with the lock held
   634  func (b *Builder) setHookEnvLocked(hook string, envs map[string]string) *Builder {
   635  	if _, exists := b.hookEnvs[hook]; !exists {
   636  		b.hookNames = append(b.hookNames, hook)
   637  	}
   638  	b.hookEnvs[hook] = envs
   639  
   640  	return b
   641  }
   642  
   643  // SetDeviceHookEnv sets environment variables from a device hook. Variables are
   644  // Last-Write-Wins, so if a hook writes a variable that's also written by a
   645  // later hook, the later hooks value always gets used.
   646  func (b *Builder) SetDeviceHookEnv(hookName string, envs map[string]string) *Builder {
   647  	b.mu.Lock()
   648  	defer b.mu.Unlock()
   649  
   650  	// Store the device hook name
   651  	b.deviceHookName = hookName
   652  	return b.setHookEnvLocked(hookName, envs)
   653  }
   654  
   655  // setTask is called from NewBuilder to populate task related environment
   656  // variables.
   657  func (b *Builder) setTask(task *structs.Task) *Builder {
   658  	if task == nil {
   659  		return b
   660  	}
   661  	b.taskName = task.Name
   662  	b.envvars = make(map[string]string, len(task.Env))
   663  	for k, v := range task.Env {
   664  		b.envvars[k] = v
   665  	}
   666  
   667  	// COMPAT(0.11): Remove in 0.11
   668  	if task.Resources == nil {
   669  		b.memLimit = 0
   670  		b.cpuLimit = 0
   671  	} else {
   672  		b.memLimit = int64(task.Resources.MemoryMB)
   673  		b.cpuLimit = int64(task.Resources.CPU)
   674  	}
   675  	return b
   676  }
   677  
   678  // setAlloc is called from NewBuilder to populate alloc related environment
   679  // variables.
   680  func (b *Builder) setAlloc(alloc *structs.Allocation) *Builder {
   681  	b.allocId = alloc.ID
   682  	b.allocName = alloc.Name
   683  	b.groupName = alloc.TaskGroup
   684  	b.allocIndex = int(alloc.Index())
   685  	b.jobID = alloc.Job.ID
   686  	b.jobName = alloc.Job.Name
   687  	b.jobParentID = alloc.Job.ParentID
   688  	b.namespace = alloc.Namespace
   689  
   690  	// Set meta
   691  	combined := alloc.Job.CombinedTaskMeta(alloc.TaskGroup, b.taskName)
   692  	// taskMetaSize is double to total meta keys to account for given and upper
   693  	// cased values
   694  	taskMetaSize := len(combined) * 2
   695  
   696  	// if job is parameterized initialize optional meta to empty strings
   697  	if alloc.Job.Dispatched {
   698  		optionalMetaCount := len(alloc.Job.ParameterizedJob.MetaOptional)
   699  		b.taskMeta = make(map[string]string, taskMetaSize+optionalMetaCount*2)
   700  
   701  		for _, k := range alloc.Job.ParameterizedJob.MetaOptional {
   702  			b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = ""
   703  			b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = ""
   704  		}
   705  	} else {
   706  		b.taskMeta = make(map[string]string, taskMetaSize)
   707  	}
   708  
   709  	for k, v := range combined {
   710  		b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = v
   711  		b.taskMeta[fmt.Sprintf("%s%s", MetaPrefix, k)] = v
   712  	}
   713  
   714  	tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup)
   715  
   716  	b.otherPorts = make(map[string]string, len(tg.Tasks)*2)
   717  
   718  	// Protect against invalid allocs where AllocatedResources isn't set.
   719  	// TestClient_AddAllocError explicitly tests for this condition
   720  	if alloc.AllocatedResources != nil {
   721  		// Populate task resources
   722  		if tr, ok := alloc.AllocatedResources.Tasks[b.taskName]; ok {
   723  			b.cpuLimit = tr.Cpu.CpuShares
   724  			b.memLimit = tr.Memory.MemoryMB
   725  
   726  			// Copy networks to prevent sharing
   727  			b.networks = make([]*structs.NetworkResource, len(tr.Networks))
   728  			for i, n := range tr.Networks {
   729  				b.networks[i] = n.Copy()
   730  			}
   731  		}
   732  
   733  		// COMPAT(1.0): remove in 1.0 when AllocatedPorts can be used exclusively
   734  		// Add ports from other tasks
   735  		for taskName, resources := range alloc.AllocatedResources.Tasks {
   736  			// Add ports from other tasks
   737  			if taskName == b.taskName {
   738  				continue
   739  			}
   740  
   741  			for _, nw := range resources.Networks {
   742  				for _, p := range nw.ReservedPorts {
   743  					addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
   744  				}
   745  				for _, p := range nw.DynamicPorts {
   746  					addPort(b.otherPorts, taskName, nw.IP, p.Label, p.Value)
   747  				}
   748  			}
   749  		}
   750  
   751  		// COMPAT(1.0): remove in 1.0 when AllocatedPorts can be used exclusively
   752  		// Add ports from group networks
   753  		//TODO Expose IPs but possibly only via variable interpolation
   754  		for _, nw := range alloc.AllocatedResources.Shared.Networks {
   755  			for _, p := range nw.ReservedPorts {
   756  				addGroupPort(b.otherPorts, p)
   757  			}
   758  			for _, p := range nw.DynamicPorts {
   759  				addGroupPort(b.otherPorts, p)
   760  			}
   761  		}
   762  
   763  		// Add any allocated host ports
   764  		if alloc.AllocatedResources.Shared.Ports != nil {
   765  			addPorts(b.otherPorts, alloc.AllocatedResources.Shared.Ports)
   766  		}
   767  	}
   768  
   769  	upstreams := []structs.ConsulUpstream{}
   770  	for _, svc := range tg.Services {
   771  		if svc.Connect.HasSidecar() && svc.Connect.SidecarService.HasUpstreams() {
   772  			upstreams = append(upstreams, svc.Connect.SidecarService.Proxy.Upstreams...)
   773  		}
   774  	}
   775  	if len(upstreams) > 0 {
   776  		b.setUpstreamsLocked(upstreams)
   777  	}
   778  
   779  	return b
   780  }
   781  
   782  // setNode is called from NewBuilder to populate node attributes.
   783  func (b *Builder) setNode(n *structs.Node) *Builder {
   784  	b.nodeAttrs = make(map[string]string, 4+len(n.Attributes)+len(n.Meta))
   785  	b.nodeAttrs[nodeIdKey] = n.ID
   786  	b.nodeAttrs[nodeNameKey] = n.Name
   787  	b.nodeAttrs[nodeClassKey] = n.NodeClass
   788  	b.nodeAttrs[nodeDcKey] = n.Datacenter
   789  	b.datacenter = n.Datacenter
   790  
   791  	// Set up the attributes.
   792  	for k, v := range n.Attributes {
   793  		b.nodeAttrs[fmt.Sprintf("%s%s", nodeAttributePrefix, k)] = v
   794  	}
   795  
   796  	// Set up the meta.
   797  	for k, v := range n.Meta {
   798  		b.nodeAttrs[fmt.Sprintf("%s%s", nodeMetaPrefix, k)] = v
   799  	}
   800  	return b
   801  }
   802  
   803  func (b *Builder) SetAllocDir(dir string) *Builder {
   804  	b.mu.Lock()
   805  	b.allocDir = dir
   806  	b.mu.Unlock()
   807  	return b
   808  }
   809  
   810  func (b *Builder) SetTaskLocalDir(dir string) *Builder {
   811  	b.mu.Lock()
   812  	b.localDir = dir
   813  	b.mu.Unlock()
   814  	return b
   815  }
   816  
   817  func (b *Builder) SetClientSharedAllocDir(dir string) *Builder {
   818  	b.mu.Lock()
   819  	b.clientSharedAllocDir = dir
   820  	b.mu.Unlock()
   821  	return b
   822  }
   823  
   824  func (b *Builder) SetClientTaskRoot(dir string) *Builder {
   825  	b.mu.Lock()
   826  	b.clientTaskRoot = dir
   827  	b.mu.Unlock()
   828  	return b
   829  }
   830  
   831  func (b *Builder) SetClientTaskLocalDir(dir string) *Builder {
   832  	b.mu.Lock()
   833  	b.clientTaskLocalDir = dir
   834  	b.mu.Unlock()
   835  	return b
   836  }
   837  
   838  func (b *Builder) SetClientTaskSecretsDir(dir string) *Builder {
   839  	b.mu.Lock()
   840  	b.clientTaskSecretsDir = dir
   841  	b.mu.Unlock()
   842  	return b
   843  }
   844  
   845  func (b *Builder) SetSecretsDir(dir string) *Builder {
   846  	b.mu.Lock()
   847  	b.secretsDir = dir
   848  	b.mu.Unlock()
   849  	return b
   850  }
   851  
   852  // SetDriverNetwork defined by the driver.
   853  func (b *Builder) SetDriverNetwork(n *drivers.DriverNetwork) *Builder {
   854  	ncopy := n.Copy()
   855  	b.mu.Lock()
   856  	b.driverNetwork = ncopy
   857  	b.mu.Unlock()
   858  	return b
   859  }
   860  
   861  // buildNetworkEnv env vars in the given map.
   862  //
   863  //	Auto:   NOMAD_PORT_<label>
   864  //	Host:   NOMAD_IP_<label>, NOMAD_ADDR_<label>, NOMAD_HOST_PORT_<label>
   865  //
   866  // Handled by setAlloc -> otherPorts:
   867  //
   868  //	Task:   NOMAD_TASK_{IP,PORT,ADDR}_<task>_<label> # Always host values
   869  //
   870  func buildNetworkEnv(envMap map[string]string, nets structs.Networks, driverNet *drivers.DriverNetwork) {
   871  	for _, n := range nets {
   872  		for _, p := range n.ReservedPorts {
   873  			buildPortEnv(envMap, p, n.IP, driverNet)
   874  		}
   875  		for _, p := range n.DynamicPorts {
   876  			buildPortEnv(envMap, p, n.IP, driverNet)
   877  		}
   878  	}
   879  }
   880  
   881  func buildPortEnv(envMap map[string]string, p structs.Port, ip string, driverNet *drivers.DriverNetwork) {
   882  	// Host IP, port, and address
   883  	portStr := strconv.Itoa(p.Value)
   884  	envMap[IpPrefix+p.Label] = ip
   885  	envMap[HostPortPrefix+p.Label] = portStr
   886  	envMap[AddrPrefix+p.Label] = net.JoinHostPort(ip, portStr)
   887  
   888  	// Set Port to task's value if there's a port map
   889  	if driverNet != nil && driverNet.PortMap[p.Label] != 0 {
   890  		envMap[PortPrefix+p.Label] = strconv.Itoa(driverNet.PortMap[p.Label])
   891  	} else {
   892  		// Default to host's
   893  		envMap[PortPrefix+p.Label] = portStr
   894  	}
   895  }
   896  
   897  // SetUpstreams defined by connect enabled group services
   898  func (b *Builder) SetUpstreams(upstreams []structs.ConsulUpstream) *Builder {
   899  	b.mu.Lock()
   900  	defer b.mu.Unlock()
   901  	return b.setUpstreamsLocked(upstreams)
   902  }
   903  
   904  func (b *Builder) setUpstreamsLocked(upstreams []structs.ConsulUpstream) *Builder {
   905  	b.upstreams = upstreams
   906  	return b
   907  }
   908  
   909  // buildUpstreamsEnv builds NOMAD_UPSTREAM_{IP,PORT,ADDR}_{destination} vars
   910  func buildUpstreamsEnv(envMap map[string]string, upstreams []structs.ConsulUpstream) {
   911  	// Proxy sidecars always bind to localhost
   912  	const ip = "127.0.0.1"
   913  	for _, u := range upstreams {
   914  		port := strconv.Itoa(u.LocalBindPort)
   915  		envMap[UpstreamPrefix+"IP_"+u.DestinationName] = ip
   916  		envMap[UpstreamPrefix+"PORT_"+u.DestinationName] = port
   917  		envMap[UpstreamPrefix+"ADDR_"+u.DestinationName] = net.JoinHostPort(ip, port)
   918  
   919  		// Also add cleaned version
   920  		cleanName := helper.CleanEnvVar(u.DestinationName, '_')
   921  		envMap[UpstreamPrefix+"ADDR_"+cleanName] = net.JoinHostPort(ip, port)
   922  		envMap[UpstreamPrefix+"IP_"+cleanName] = ip
   923  		envMap[UpstreamPrefix+"PORT_"+cleanName] = port
   924  	}
   925  }
   926  
   927  // SetPortMapEnvs sets the PortMap related environment variables on the map
   928  func SetPortMapEnvs(envs map[string]string, ports map[string]int) map[string]string {
   929  	if envs == nil {
   930  		envs = map[string]string{}
   931  	}
   932  
   933  	for portLabel, port := range ports {
   934  		portEnv := helper.CleanEnvVar(PortPrefix+portLabel, '_')
   935  		envs[portEnv] = strconv.Itoa(port)
   936  	}
   937  	return envs
   938  }
   939  
   940  // SetHostEnvvars adds the host environment variables to the tasks. The filter
   941  // parameter can be use to filter host environment from entering the tasks.
   942  func (b *Builder) SetHostEnvvars(filter []string) *Builder {
   943  	filterMap := make(map[string]struct{}, len(filter))
   944  	for _, f := range filter {
   945  		filterMap[f] = struct{}{}
   946  	}
   947  
   948  	fullHostEnv := os.Environ()
   949  	filteredHostEnv := make(map[string]string, len(fullHostEnv))
   950  	for _, e := range fullHostEnv {
   951  		parts := strings.SplitN(e, "=", 2)
   952  		key, value := parts[0], parts[1]
   953  
   954  		// Skip filtered environment variables
   955  		if _, filtered := filterMap[key]; filtered {
   956  			continue
   957  		}
   958  
   959  		filteredHostEnv[key] = value
   960  	}
   961  
   962  	b.mu.Lock()
   963  	b.hostEnv = filteredHostEnv
   964  	b.mu.Unlock()
   965  	return b
   966  }
   967  
   968  func (b *Builder) SetTemplateEnv(m map[string]string) *Builder {
   969  	b.mu.Lock()
   970  	b.templateEnv = m
   971  	b.mu.Unlock()
   972  	return b
   973  }
   974  
   975  func (b *Builder) SetVaultToken(token, namespace string, inject bool) *Builder {
   976  	b.mu.Lock()
   977  	b.vaultToken = token
   978  	b.vaultNamespace = namespace
   979  	b.injectVaultToken = inject
   980  	b.mu.Unlock()
   981  	return b
   982  }
   983  
   984  // addPort keys and values for other tasks to an env var map
   985  func addPort(m map[string]string, taskName, ip, portLabel string, port int) {
   986  	key := fmt.Sprintf("%s%s_%s", AddrPrefix, taskName, portLabel)
   987  	m[key] = fmt.Sprintf("%s:%d", ip, port)
   988  	key = fmt.Sprintf("%s%s_%s", IpPrefix, taskName, portLabel)
   989  	m[key] = ip
   990  	key = fmt.Sprintf("%s%s_%s", PortPrefix, taskName, portLabel)
   991  	m[key] = strconv.Itoa(port)
   992  }
   993  
   994  // addGroupPort adds a group network port. The To value is used if one is
   995  // specified.
   996  func addGroupPort(m map[string]string, port structs.Port) {
   997  	if port.To > 0 {
   998  		m[PortPrefix+port.Label] = strconv.Itoa(port.To)
   999  	} else {
  1000  		m[PortPrefix+port.Label] = strconv.Itoa(port.Value)
  1001  	}
  1002  
  1003  	m[HostPortPrefix+port.Label] = strconv.Itoa(port.Value)
  1004  }
  1005  
  1006  func addPorts(m map[string]string, ports structs.AllocatedPorts) {
  1007  	for _, p := range ports {
  1008  		m[AddrPrefix+p.Label] = fmt.Sprintf("%s:%d", p.HostIP, p.Value)
  1009  		m[HostAddrPrefix+p.Label] = fmt.Sprintf("%s:%d", p.HostIP, p.Value)
  1010  		m[IpPrefix+p.Label] = p.HostIP
  1011  		m[HostIpPrefix+p.Label] = p.HostIP
  1012  		if p.To > 0 {
  1013  			val := strconv.Itoa(p.To)
  1014  			m[PortPrefix+p.Label] = val
  1015  			m[AllocPortPrefix+p.Label] = val
  1016  		} else {
  1017  			val := strconv.Itoa(p.Value)
  1018  			m[PortPrefix+p.Label] = val
  1019  			m[AllocPortPrefix+p.Label] = val
  1020  		}
  1021  
  1022  		m[HostPortPrefix+p.Label] = strconv.Itoa(p.Value)
  1023  	}
  1024  }