github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/nomad/structs/funcs.go (about)

     1  package structs
     2  
     3  import (
     4  	"crypto/subtle"
     5  	"encoding/base64"
     6  	"encoding/binary"
     7  	"fmt"
     8  	"math"
     9  	"sort"
    10  	"strconv"
    11  	"strings"
    12  
    13  	multierror "github.com/hashicorp/go-multierror"
    14  	lru "github.com/hashicorp/golang-lru"
    15  	"github.com/hashicorp/nomad/acl"
    16  	"golang.org/x/crypto/blake2b"
    17  )
    18  
    19  // MergeMultierrorWarnings takes job warnings and canonicalize warnings and
    20  // merges them into a returnable string. Both the errors may be nil.
    21  func MergeMultierrorWarnings(warnings ...error) string {
    22  	var warningMsg multierror.Error
    23  	for _, warn := range warnings {
    24  		if warn != nil {
    25  			multierror.Append(&warningMsg, warn)
    26  		}
    27  	}
    28  
    29  	if len(warningMsg.Errors) == 0 {
    30  		return ""
    31  	}
    32  
    33  	// Set the formatter
    34  	warningMsg.ErrorFormat = warningsFormatter
    35  	return warningMsg.Error()
    36  }
    37  
    38  // warningsFormatter is used to format job warnings
    39  func warningsFormatter(es []error) string {
    40  	points := make([]string, len(es))
    41  	for i, err := range es {
    42  		points[i] = fmt.Sprintf("* %s", err)
    43  	}
    44  
    45  	return fmt.Sprintf(
    46  		"%d warning(s):\n\n%s",
    47  		len(es), strings.Join(points, "\n"))
    48  }
    49  
    50  // RemoveAllocs is used to remove any allocs with the given IDs
    51  // from the list of allocations
    52  func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
    53  	// Convert remove into a set
    54  	removeSet := make(map[string]struct{})
    55  	for _, remove := range remove {
    56  		removeSet[remove.ID] = struct{}{}
    57  	}
    58  
    59  	n := len(alloc)
    60  	for i := 0; i < n; i++ {
    61  		if _, ok := removeSet[alloc[i].ID]; ok {
    62  			alloc[i], alloc[n-1] = alloc[n-1], nil
    63  			i--
    64  			n--
    65  		}
    66  	}
    67  
    68  	alloc = alloc[:n]
    69  	return alloc
    70  }
    71  
    72  // FilterTerminalAllocs filters out all allocations in a terminal state and
    73  // returns the latest terminal allocations
    74  func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
    75  	terminalAllocsByName := make(map[string]*Allocation)
    76  	n := len(allocs)
    77  	for i := 0; i < n; i++ {
    78  		if allocs[i].TerminalStatus() {
    79  
    80  			// Add the allocation to the terminal allocs map if it's not already
    81  			// added or has a higher create index than the one which is
    82  			// currently present.
    83  			alloc, ok := terminalAllocsByName[allocs[i].Name]
    84  			if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
    85  				terminalAllocsByName[allocs[i].Name] = allocs[i]
    86  			}
    87  
    88  			// Remove the allocation
    89  			allocs[i], allocs[n-1] = allocs[n-1], nil
    90  			i--
    91  			n--
    92  		}
    93  	}
    94  	return allocs[:n], terminalAllocsByName
    95  }
    96  
    97  // AllocsFit checks if a given set of allocations will fit on a node.
    98  // The netIdx can optionally be provided if its already been computed.
    99  // If the netIdx is provided, it is assumed that the client has already
   100  // ensured there are no collisions. If checkDevices is set to true, we check if
   101  // there is a device oversubscription.
   102  func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevices bool) (bool, string, *ComparableResources, error) {
   103  	// Compute the utilization from zero
   104  	used := new(ComparableResources)
   105  
   106  	// Add the reserved resources of the node
   107  	used.Add(node.ComparableReservedResources())
   108  
   109  	// For each alloc, add the resources
   110  	for _, alloc := range allocs {
   111  		// Do not consider the resource impact of terminal allocations
   112  		if alloc.TerminalStatus() {
   113  			continue
   114  		}
   115  
   116  		used.Add(alloc.ComparableResources())
   117  	}
   118  
   119  	// Check that the node resources are a super set of those
   120  	// that are being allocated
   121  	if superset, dimension := node.ComparableResources().Superset(used); !superset {
   122  		return false, dimension, used, nil
   123  	}
   124  
   125  	// Create the network index if missing
   126  	if netIdx == nil {
   127  		netIdx = NewNetworkIndex()
   128  		defer netIdx.Release()
   129  		if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
   130  			return false, "reserved port collision", used, nil
   131  		}
   132  	}
   133  
   134  	// Check if the network is overcommitted
   135  	if netIdx.Overcommitted() {
   136  		return false, "bandwidth exceeded", used, nil
   137  	}
   138  
   139  	// Check devices
   140  	if checkDevices {
   141  		accounter := NewDeviceAccounter(node)
   142  		if accounter.AddAllocs(allocs) {
   143  			return false, "device oversubscribed", used, nil
   144  		}
   145  	}
   146  
   147  	// Allocations fit!
   148  	return true, "", used, nil
   149  }
   150  
   151  // ScoreFit is used to score the fit based on the Google work published here:
   152  // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
   153  // This is equivalent to their BestFit v3
   154  func ScoreFit(node *Node, util *ComparableResources) float64 {
   155  	// COMPAT(0.11): Remove in 0.11
   156  	reserved := node.ComparableReservedResources()
   157  	res := node.ComparableResources()
   158  
   159  	// Determine the node availability
   160  	nodeCpu := float64(res.Flattened.Cpu.CpuShares)
   161  	nodeMem := float64(res.Flattened.Memory.MemoryMB)
   162  	if reserved != nil {
   163  		nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares)
   164  		nodeMem -= float64(reserved.Flattened.Memory.MemoryMB)
   165  	}
   166  
   167  	// Compute the free percentage
   168  	freePctCpu := 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu)
   169  	freePctRam := 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem)
   170  
   171  	// Total will be "maximized" the smaller the value is.
   172  	// At 100% utilization, the total is 2, while at 0% util it is 20.
   173  	total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
   174  
   175  	// Invert so that the "maximized" total represents a high-value
   176  	// score. Because the floor is 20, we simply use that as an anchor.
   177  	// This means at a perfect fit, we return 18 as the score.
   178  	score := 20.0 - total
   179  
   180  	// Bound the score, just in case
   181  	// If the score is over 18, that means we've overfit the node.
   182  	if score > 18.0 {
   183  		score = 18.0
   184  	} else if score < 0 {
   185  		score = 0
   186  	}
   187  	return score
   188  }
   189  
   190  func CopySliceConstraints(s []*Constraint) []*Constraint {
   191  	l := len(s)
   192  	if l == 0 {
   193  		return nil
   194  	}
   195  
   196  	c := make([]*Constraint, l)
   197  	for i, v := range s {
   198  		c[i] = v.Copy()
   199  	}
   200  	return c
   201  }
   202  
   203  func CopySliceAffinities(s []*Affinity) []*Affinity {
   204  	l := len(s)
   205  	if l == 0 {
   206  		return nil
   207  	}
   208  
   209  	c := make([]*Affinity, l)
   210  	for i, v := range s {
   211  		c[i] = v.Copy()
   212  	}
   213  	return c
   214  }
   215  
   216  func CopySliceSpreads(s []*Spread) []*Spread {
   217  	l := len(s)
   218  	if l == 0 {
   219  		return nil
   220  	}
   221  
   222  	c := make([]*Spread, l)
   223  	for i, v := range s {
   224  		c[i] = v.Copy()
   225  	}
   226  	return c
   227  }
   228  
   229  func CopySliceSpreadTarget(s []*SpreadTarget) []*SpreadTarget {
   230  	l := len(s)
   231  	if l == 0 {
   232  		return nil
   233  	}
   234  
   235  	c := make([]*SpreadTarget, l)
   236  	for i, v := range s {
   237  		c[i] = v.Copy()
   238  	}
   239  	return c
   240  }
   241  
   242  func CopySliceNodeScoreMeta(s []*NodeScoreMeta) []*NodeScoreMeta {
   243  	l := len(s)
   244  	if l == 0 {
   245  		return nil
   246  	}
   247  
   248  	c := make([]*NodeScoreMeta, l)
   249  	for i, v := range s {
   250  		c[i] = v.Copy()
   251  	}
   252  	return c
   253  }
   254  
   255  // VaultPoliciesSet takes the structure returned by VaultPolicies and returns
   256  // the set of required policies
   257  func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
   258  	set := make(map[string]struct{})
   259  
   260  	for _, tgp := range policies {
   261  		for _, tp := range tgp {
   262  			for _, p := range tp.Policies {
   263  				set[p] = struct{}{}
   264  			}
   265  		}
   266  	}
   267  
   268  	flattened := make([]string, 0, len(set))
   269  	for p := range set {
   270  		flattened = append(flattened, p)
   271  	}
   272  	return flattened
   273  }
   274  
   275  // DenormalizeAllocationJobs is used to attach a job to all allocations that are
   276  // non-terminal and do not have a job already. This is useful in cases where the
   277  // job is normalized.
   278  func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
   279  	if job != nil {
   280  		for _, alloc := range allocs {
   281  			if alloc.Job == nil && !alloc.TerminalStatus() {
   282  				alloc.Job = job
   283  			}
   284  		}
   285  	}
   286  }
   287  
   288  // AllocName returns the name of the allocation given the input.
   289  func AllocName(job, group string, idx uint) string {
   290  	return fmt.Sprintf("%s.%s[%d]", job, group, idx)
   291  }
   292  
   293  // ACLPolicyListHash returns a consistent hash for a set of policies.
   294  func ACLPolicyListHash(policies []*ACLPolicy) string {
   295  	cacheKeyHash, err := blake2b.New256(nil)
   296  	if err != nil {
   297  		panic(err)
   298  	}
   299  	for _, policy := range policies {
   300  		cacheKeyHash.Write([]byte(policy.Name))
   301  		binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
   302  	}
   303  	cacheKey := string(cacheKeyHash.Sum(nil))
   304  	return cacheKey
   305  }
   306  
   307  // CompileACLObject compiles a set of ACL policies into an ACL object with a cache
   308  func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
   309  	// Sort the policies to ensure consistent ordering
   310  	sort.Slice(policies, func(i, j int) bool {
   311  		return policies[i].Name < policies[j].Name
   312  	})
   313  
   314  	// Determine the cache key
   315  	cacheKey := ACLPolicyListHash(policies)
   316  	aclRaw, ok := cache.Get(cacheKey)
   317  	if ok {
   318  		return aclRaw.(*acl.ACL), nil
   319  	}
   320  
   321  	// Parse the policies
   322  	parsed := make([]*acl.Policy, 0, len(policies))
   323  	for _, policy := range policies {
   324  		p, err := acl.Parse(policy.Rules)
   325  		if err != nil {
   326  			return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
   327  		}
   328  		parsed = append(parsed, p)
   329  	}
   330  
   331  	// Create the ACL object
   332  	aclObj, err := acl.NewACL(false, parsed)
   333  	if err != nil {
   334  		return nil, fmt.Errorf("failed to construct ACL: %v", err)
   335  	}
   336  
   337  	// Update the cache
   338  	cache.Add(cacheKey, aclObj)
   339  	return aclObj, nil
   340  }
   341  
   342  // GenerateMigrateToken will create a token for a client to access an
   343  // authenticated volume of another client to migrate data for sticky volumes.
   344  func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) {
   345  	h, err := blake2b.New512([]byte(nodeSecretID))
   346  	if err != nil {
   347  		return "", err
   348  	}
   349  	h.Write([]byte(allocID))
   350  	return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
   351  }
   352  
   353  // CompareMigrateToken returns true if two migration tokens can be computed and
   354  // are equal.
   355  func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool {
   356  	h, err := blake2b.New512([]byte(nodeSecretID))
   357  	if err != nil {
   358  		return false
   359  	}
   360  	h.Write([]byte(allocID))
   361  
   362  	otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken)
   363  	if err != nil {
   364  		return false
   365  	}
   366  	return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1
   367  }
   368  
   369  // ParsePortRanges parses the passed port range string and returns a list of the
   370  // ports. The specification is a comma separated list of either port numbers or
   371  // port ranges. A port number is a single integer and a port range is two
   372  // integers separated by a hyphen. As an example the following spec would
   373  // convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16}
   374  func ParsePortRanges(spec string) ([]uint64, error) {
   375  	parts := strings.Split(spec, ",")
   376  
   377  	// Hot path the empty case
   378  	if len(parts) == 1 && parts[0] == "" {
   379  		return nil, nil
   380  	}
   381  
   382  	ports := make(map[uint64]struct{})
   383  	for _, part := range parts {
   384  		part = strings.TrimSpace(part)
   385  		rangeParts := strings.Split(part, "-")
   386  		l := len(rangeParts)
   387  		switch l {
   388  		case 1:
   389  			if val := rangeParts[0]; val == "" {
   390  				return nil, fmt.Errorf("can't specify empty port")
   391  			} else {
   392  				port, err := strconv.ParseUint(val, 10, 0)
   393  				if err != nil {
   394  					return nil, err
   395  				}
   396  				ports[port] = struct{}{}
   397  			}
   398  		case 2:
   399  			// We are parsing a range
   400  			start, err := strconv.ParseUint(rangeParts[0], 10, 0)
   401  			if err != nil {
   402  				return nil, err
   403  			}
   404  
   405  			end, err := strconv.ParseUint(rangeParts[1], 10, 0)
   406  			if err != nil {
   407  				return nil, err
   408  			}
   409  
   410  			if end < start {
   411  				return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start)
   412  			}
   413  
   414  			for i := start; i <= end; i++ {
   415  				ports[i] = struct{}{}
   416  			}
   417  		default:
   418  			return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)")
   419  		}
   420  	}
   421  
   422  	var results []uint64
   423  	for port := range ports {
   424  		results = append(results, port)
   425  	}
   426  
   427  	sort.Slice(results, func(i, j int) bool {
   428  		return results[i] < results[j]
   429  	})
   430  	return results, nil
   431  }