github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/nomad/structs/funcs.go (about)

     1  package structs
     2  
     3  import (
     4  	"crypto/subtle"
     5  	"encoding/base64"
     6  	"encoding/binary"
     7  	"fmt"
     8  	"math"
     9  	"sort"
    10  	"strconv"
    11  	"strings"
    12  
    13  	multierror "github.com/hashicorp/go-multierror"
    14  	lru "github.com/hashicorp/golang-lru"
    15  	"github.com/hashicorp/nomad/acl"
    16  	"golang.org/x/crypto/blake2b"
    17  )
    18  
    19  // MergeMultierrorWarnings takes job warnings and canonicalize warnings and
    20  // merges them into a returnable string. Both the errors may be nil.
    21  func MergeMultierrorWarnings(errs ...error) string {
    22  	if len(errs) == 0 {
    23  		return ""
    24  	}
    25  
    26  	var mErr multierror.Error
    27  	_ = multierror.Append(&mErr, errs...)
    28  	mErr.ErrorFormat = warningsFormatter
    29  
    30  	return mErr.Error()
    31  }
    32  
    33  // warningsFormatter is used to format job warnings
    34  func warningsFormatter(es []error) string {
    35  	sb := strings.Builder{}
    36  	sb.WriteString(fmt.Sprintf("%d warning(s):\n", len(es)))
    37  
    38  	for i := range es {
    39  		sb.WriteString(fmt.Sprintf("\n* %s", es[i]))
    40  	}
    41  
    42  	return sb.String()
    43  }
    44  
    45  // RemoveAllocs is used to remove any allocs with the given IDs
    46  // from the list of allocations
    47  func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
    48  	// Convert remove into a set
    49  	removeSet := make(map[string]struct{})
    50  	for _, remove := range remove {
    51  		removeSet[remove.ID] = struct{}{}
    52  	}
    53  
    54  	n := len(alloc)
    55  	for i := 0; i < n; i++ {
    56  		if _, ok := removeSet[alloc[i].ID]; ok {
    57  			alloc[i], alloc[n-1] = alloc[n-1], nil
    58  			i--
    59  			n--
    60  		}
    61  	}
    62  
    63  	alloc = alloc[:n]
    64  	return alloc
    65  }
    66  
    67  // FilterTerminalAllocs filters out all allocations in a terminal state and
    68  // returns the latest terminal allocations
    69  func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
    70  	terminalAllocsByName := make(map[string]*Allocation)
    71  	n := len(allocs)
    72  	for i := 0; i < n; i++ {
    73  		if allocs[i].TerminalStatus() {
    74  
    75  			// Add the allocation to the terminal allocs map if it's not already
    76  			// added or has a higher create index than the one which is
    77  			// currently present.
    78  			alloc, ok := terminalAllocsByName[allocs[i].Name]
    79  			if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
    80  				terminalAllocsByName[allocs[i].Name] = allocs[i]
    81  			}
    82  
    83  			// Remove the allocation
    84  			allocs[i], allocs[n-1] = allocs[n-1], nil
    85  			i--
    86  			n--
    87  		}
    88  	}
    89  	return allocs[:n], terminalAllocsByName
    90  }
    91  
    92  // AllocsFit checks if a given set of allocations will fit on a node.
    93  // The netIdx can optionally be provided if its already been computed.
    94  // If the netIdx is provided, it is assumed that the client has already
    95  // ensured there are no collisions. If checkDevices is set to true, we check if
    96  // there is a device oversubscription.
    97  func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevices bool) (bool, string, *ComparableResources, error) {
    98  	// Compute the allocs' utilization from zero
    99  	used := new(ComparableResources)
   100  
   101  	// For each alloc, add the resources
   102  	for _, alloc := range allocs {
   103  		// Do not consider the resource impact of terminal allocations
   104  		if alloc.TerminalStatus() {
   105  			continue
   106  		}
   107  
   108  		used.Add(alloc.ComparableResources())
   109  	}
   110  
   111  	// Check that the node resources (after subtracting reserved) are a
   112  	// super set of those that are being allocated
   113  	available := node.ComparableResources()
   114  	available.Subtract(node.ComparableReservedResources())
   115  	if superset, dimension := available.Superset(used); !superset {
   116  		return false, dimension, used, nil
   117  	}
   118  
   119  	// Create the network index if missing
   120  	if netIdx == nil {
   121  		netIdx = NewNetworkIndex()
   122  		defer netIdx.Release()
   123  		if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
   124  			return false, "reserved port collision", used, nil
   125  		}
   126  	}
   127  
   128  	// Check if the network is overcommitted
   129  	if netIdx.Overcommitted() {
   130  		return false, "bandwidth exceeded", used, nil
   131  	}
   132  
   133  	// Check devices
   134  	if checkDevices {
   135  		accounter := NewDeviceAccounter(node)
   136  		if accounter.AddAllocs(allocs) {
   137  			return false, "device oversubscribed", used, nil
   138  		}
   139  	}
   140  
   141  	// Allocations fit!
   142  	return true, "", used, nil
   143  }
   144  
   145  func computeFreePercentage(node *Node, util *ComparableResources) (freePctCpu, freePctRam float64) {
   146  	// COMPAT(0.11): Remove in 0.11
   147  	reserved := node.ComparableReservedResources()
   148  	res := node.ComparableResources()
   149  
   150  	// Determine the node availability
   151  	nodeCpu := float64(res.Flattened.Cpu.CpuShares)
   152  	nodeMem := float64(res.Flattened.Memory.MemoryMB)
   153  	if reserved != nil {
   154  		nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares)
   155  		nodeMem -= float64(reserved.Flattened.Memory.MemoryMB)
   156  	}
   157  
   158  	// Compute the free percentage
   159  	freePctCpu = 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu)
   160  	freePctRam = 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem)
   161  	return freePctCpu, freePctRam
   162  }
   163  
   164  // ScoreFitBinPack computes a fit score to achieve pinbacking behavior.
   165  // Score is in [0, 18]
   166  //
   167  // It's the BestFit v3 on the Google work published here:
   168  // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
   169  func ScoreFitBinPack(node *Node, util *ComparableResources) float64 {
   170  	freePctCpu, freePctRam := computeFreePercentage(node, util)
   171  
   172  	// Total will be "maximized" the smaller the value is.
   173  	// At 100% utilization, the total is 2, while at 0% util it is 20.
   174  	total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
   175  
   176  	// Invert so that the "maximized" total represents a high-value
   177  	// score. Because the floor is 20, we simply use that as an anchor.
   178  	// This means at a perfect fit, we return 18 as the score.
   179  	score := 20.0 - total
   180  
   181  	// Bound the score, just in case
   182  	// If the score is over 18, that means we've overfit the node.
   183  	if score > 18.0 {
   184  		score = 18.0
   185  	} else if score < 0 {
   186  		score = 0
   187  	}
   188  	return score
   189  }
   190  
   191  // ScoreFitBinSpread computes a fit score to achieve spread behavior.
   192  // Score is in [0, 18]
   193  //
   194  // This is equivalent to Worst Fit of
   195  // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
   196  func ScoreFitSpread(node *Node, util *ComparableResources) float64 {
   197  	freePctCpu, freePctRam := computeFreePercentage(node, util)
   198  	total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
   199  	score := total - 2
   200  
   201  	if score > 18.0 {
   202  		score = 18.0
   203  	} else if score < 0 {
   204  		score = 0
   205  	}
   206  	return score
   207  }
   208  
   209  func CopySliceConstraints(s []*Constraint) []*Constraint {
   210  	l := len(s)
   211  	if l == 0 {
   212  		return nil
   213  	}
   214  
   215  	c := make([]*Constraint, l)
   216  	for i, v := range s {
   217  		c[i] = v.Copy()
   218  	}
   219  	return c
   220  }
   221  
   222  func CopySliceAffinities(s []*Affinity) []*Affinity {
   223  	l := len(s)
   224  	if l == 0 {
   225  		return nil
   226  	}
   227  
   228  	c := make([]*Affinity, l)
   229  	for i, v := range s {
   230  		c[i] = v.Copy()
   231  	}
   232  	return c
   233  }
   234  
   235  func CopySliceSpreads(s []*Spread) []*Spread {
   236  	l := len(s)
   237  	if l == 0 {
   238  		return nil
   239  	}
   240  
   241  	c := make([]*Spread, l)
   242  	for i, v := range s {
   243  		c[i] = v.Copy()
   244  	}
   245  	return c
   246  }
   247  
   248  func CopySliceSpreadTarget(s []*SpreadTarget) []*SpreadTarget {
   249  	l := len(s)
   250  	if l == 0 {
   251  		return nil
   252  	}
   253  
   254  	c := make([]*SpreadTarget, l)
   255  	for i, v := range s {
   256  		c[i] = v.Copy()
   257  	}
   258  	return c
   259  }
   260  
   261  func CopySliceNodeScoreMeta(s []*NodeScoreMeta) []*NodeScoreMeta {
   262  	l := len(s)
   263  	if l == 0 {
   264  		return nil
   265  	}
   266  
   267  	c := make([]*NodeScoreMeta, l)
   268  	for i, v := range s {
   269  		c[i] = v.Copy()
   270  	}
   271  	return c
   272  }
   273  
   274  // VaultPoliciesSet takes the structure returned by VaultPolicies and returns
   275  // the set of required policies
   276  func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
   277  	set := make(map[string]struct{})
   278  
   279  	for _, tgp := range policies {
   280  		for _, tp := range tgp {
   281  			for _, p := range tp.Policies {
   282  				set[p] = struct{}{}
   283  			}
   284  		}
   285  	}
   286  
   287  	flattened := make([]string, 0, len(set))
   288  	for p := range set {
   289  		flattened = append(flattened, p)
   290  	}
   291  	return flattened
   292  }
   293  
   294  // VaultNaVaultNamespaceSet takes the structure returned by VaultPolicies and
   295  // returns a set of required namespaces
   296  func VaultNamespaceSet(policies map[string]map[string]*Vault) []string {
   297  	set := make(map[string]struct{})
   298  
   299  	for _, tgp := range policies {
   300  		for _, tp := range tgp {
   301  			if tp.Namespace != "" {
   302  				set[tp.Namespace] = struct{}{}
   303  			}
   304  		}
   305  	}
   306  
   307  	flattened := make([]string, 0, len(set))
   308  	for p := range set {
   309  		flattened = append(flattened, p)
   310  	}
   311  	return flattened
   312  }
   313  
   314  // DenormalizeAllocationJobs is used to attach a job to all allocations that are
   315  // non-terminal and do not have a job already. This is useful in cases where the
   316  // job is normalized.
   317  func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
   318  	if job != nil {
   319  		for _, alloc := range allocs {
   320  			if alloc.Job == nil && !alloc.TerminalStatus() {
   321  				alloc.Job = job
   322  			}
   323  		}
   324  	}
   325  }
   326  
   327  // AllocName returns the name of the allocation given the input.
   328  func AllocName(job, group string, idx uint) string {
   329  	return fmt.Sprintf("%s.%s[%d]", job, group, idx)
   330  }
   331  
   332  // ACLPolicyListHash returns a consistent hash for a set of policies.
   333  func ACLPolicyListHash(policies []*ACLPolicy) string {
   334  	cacheKeyHash, err := blake2b.New256(nil)
   335  	if err != nil {
   336  		panic(err)
   337  	}
   338  	for _, policy := range policies {
   339  		_, _ = cacheKeyHash.Write([]byte(policy.Name))
   340  		_ = binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
   341  	}
   342  	cacheKey := string(cacheKeyHash.Sum(nil))
   343  	return cacheKey
   344  }
   345  
   346  // CompileACLObject compiles a set of ACL policies into an ACL object with a cache
   347  func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
   348  	// Sort the policies to ensure consistent ordering
   349  	sort.Slice(policies, func(i, j int) bool {
   350  		return policies[i].Name < policies[j].Name
   351  	})
   352  
   353  	// Determine the cache key
   354  	cacheKey := ACLPolicyListHash(policies)
   355  	aclRaw, ok := cache.Get(cacheKey)
   356  	if ok {
   357  		return aclRaw.(*acl.ACL), nil
   358  	}
   359  
   360  	// Parse the policies
   361  	parsed := make([]*acl.Policy, 0, len(policies))
   362  	for _, policy := range policies {
   363  		p, err := acl.Parse(policy.Rules)
   364  		if err != nil {
   365  			return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
   366  		}
   367  		parsed = append(parsed, p)
   368  	}
   369  
   370  	// Create the ACL object
   371  	aclObj, err := acl.NewACL(false, parsed)
   372  	if err != nil {
   373  		return nil, fmt.Errorf("failed to construct ACL: %v", err)
   374  	}
   375  
   376  	// Update the cache
   377  	cache.Add(cacheKey, aclObj)
   378  	return aclObj, nil
   379  }
   380  
   381  // GenerateMigrateToken will create a token for a client to access an
   382  // authenticated volume of another client to migrate data for sticky volumes.
   383  func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) {
   384  	h, err := blake2b.New512([]byte(nodeSecretID))
   385  	if err != nil {
   386  		return "", err
   387  	}
   388  
   389  	_, _ = h.Write([]byte(allocID))
   390  
   391  	return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
   392  }
   393  
   394  // CompareMigrateToken returns true if two migration tokens can be computed and
   395  // are equal.
   396  func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool {
   397  	h, err := blake2b.New512([]byte(nodeSecretID))
   398  	if err != nil {
   399  		return false
   400  	}
   401  
   402  	_, _ = h.Write([]byte(allocID))
   403  
   404  	otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken)
   405  	if err != nil {
   406  		return false
   407  	}
   408  	return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1
   409  }
   410  
   411  // ParsePortRanges parses the passed port range string and returns a list of the
   412  // ports. The specification is a comma separated list of either port numbers or
   413  // port ranges. A port number is a single integer and a port range is two
   414  // integers separated by a hyphen. As an example the following spec would
   415  // convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16}
   416  func ParsePortRanges(spec string) ([]uint64, error) {
   417  	parts := strings.Split(spec, ",")
   418  
   419  	// Hot path the empty case
   420  	if len(parts) == 1 && parts[0] == "" {
   421  		return nil, nil
   422  	}
   423  
   424  	ports := make(map[uint64]struct{})
   425  	for _, part := range parts {
   426  		part = strings.TrimSpace(part)
   427  		rangeParts := strings.Split(part, "-")
   428  		l := len(rangeParts)
   429  		switch l {
   430  		case 1:
   431  			if val := rangeParts[0]; val == "" {
   432  				return nil, fmt.Errorf("can't specify empty port")
   433  			} else {
   434  				port, err := strconv.ParseUint(val, 10, 0)
   435  				if err != nil {
   436  					return nil, err
   437  				}
   438  				ports[port] = struct{}{}
   439  			}
   440  		case 2:
   441  			// We are parsing a range
   442  			start, err := strconv.ParseUint(rangeParts[0], 10, 0)
   443  			if err != nil {
   444  				return nil, err
   445  			}
   446  
   447  			end, err := strconv.ParseUint(rangeParts[1], 10, 0)
   448  			if err != nil {
   449  				return nil, err
   450  			}
   451  
   452  			if end < start {
   453  				return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start)
   454  			}
   455  
   456  			for i := start; i <= end; i++ {
   457  				ports[i] = struct{}{}
   458  			}
   459  		default:
   460  			return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)")
   461  		}
   462  	}
   463  
   464  	var results []uint64
   465  	for port := range ports {
   466  		results = append(results, port)
   467  	}
   468  
   469  	sort.Slice(results, func(i, j int) bool {
   470  		return results[i] < results[j]
   471  	})
   472  	return results, nil
   473  }