github.com/zoomfoo/nomad@v0.8.5-0.20180907175415-f28fd3a1a056/nomad/structs/funcs.go (about)

     1  package structs
     2  
     3  import (
     4  	"crypto/subtle"
     5  	"encoding/base64"
     6  	"encoding/binary"
     7  	"fmt"
     8  	"math"
     9  	"sort"
    10  	"strings"
    11  
    12  	"golang.org/x/crypto/blake2b"
    13  
    14  	multierror "github.com/hashicorp/go-multierror"
    15  	lru "github.com/hashicorp/golang-lru"
    16  	"github.com/hashicorp/nomad/acl"
    17  )
    18  
    19  // MergeMultierrorWarnings takes job warnings and canonicalize warnings and
    20  // merges them into a returnable string. Both the errors may be nil.
    21  func MergeMultierrorWarnings(warnings ...error) string {
    22  	var warningMsg multierror.Error
    23  	for _, warn := range warnings {
    24  		if warn != nil {
    25  			multierror.Append(&warningMsg, warn)
    26  		}
    27  	}
    28  
    29  	if len(warningMsg.Errors) == 0 {
    30  		return ""
    31  	}
    32  
    33  	// Set the formatter
    34  	warningMsg.ErrorFormat = warningsFormatter
    35  	return warningMsg.Error()
    36  }
    37  
    38  // warningsFormatter is used to format job warnings
    39  func warningsFormatter(es []error) string {
    40  	points := make([]string, len(es))
    41  	for i, err := range es {
    42  		points[i] = fmt.Sprintf("* %s", err)
    43  	}
    44  
    45  	return fmt.Sprintf(
    46  		"%d warning(s):\n\n%s",
    47  		len(es), strings.Join(points, "\n"))
    48  }
    49  
    50  // RemoveAllocs is used to remove any allocs with the given IDs
    51  // from the list of allocations
    52  func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
    53  	// Convert remove into a set
    54  	removeSet := make(map[string]struct{})
    55  	for _, remove := range remove {
    56  		removeSet[remove.ID] = struct{}{}
    57  	}
    58  
    59  	n := len(alloc)
    60  	for i := 0; i < n; i++ {
    61  		if _, ok := removeSet[alloc[i].ID]; ok {
    62  			alloc[i], alloc[n-1] = alloc[n-1], nil
    63  			i--
    64  			n--
    65  		}
    66  	}
    67  
    68  	alloc = alloc[:n]
    69  	return alloc
    70  }
    71  
    72  // FilterTerminalAllocs filters out all allocations in a terminal state and
    73  // returns the latest terminal allocations
    74  func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
    75  	terminalAllocsByName := make(map[string]*Allocation)
    76  	n := len(allocs)
    77  	for i := 0; i < n; i++ {
    78  		if allocs[i].TerminalStatus() {
    79  
    80  			// Add the allocation to the terminal allocs map if it's not already
    81  			// added or has a higher create index than the one which is
    82  			// currently present.
    83  			alloc, ok := terminalAllocsByName[allocs[i].Name]
    84  			if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
    85  				terminalAllocsByName[allocs[i].Name] = allocs[i]
    86  			}
    87  
    88  			// Remove the allocation
    89  			allocs[i], allocs[n-1] = allocs[n-1], nil
    90  			i--
    91  			n--
    92  		}
    93  	}
    94  	return allocs[:n], terminalAllocsByName
    95  }
    96  
    97  // AllocsFit checks if a given set of allocations will fit on a node.
    98  // The netIdx can optionally be provided if its already been computed.
    99  // If the netIdx is provided, it is assumed that the client has already
   100  // ensured there are no collisions.
   101  func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
   102  	// Compute the utilization from zero
   103  	used := new(Resources)
   104  
   105  	// Add the reserved resources of the node
   106  	if node.Reserved != nil {
   107  		if err := used.Add(node.Reserved); err != nil {
   108  			return false, "", nil, err
   109  		}
   110  	}
   111  
   112  	// For each alloc, add the resources
   113  	for _, alloc := range allocs {
   114  		if alloc.Resources != nil {
   115  			if err := used.Add(alloc.Resources); err != nil {
   116  				return false, "", nil, err
   117  			}
   118  		} else if alloc.TaskResources != nil {
   119  
   120  			// Adding the shared resource asks for the allocation to the used
   121  			// resources
   122  			if err := used.Add(alloc.SharedResources); err != nil {
   123  				return false, "", nil, err
   124  			}
   125  			// Allocations within the plan have the combined resources stripped
   126  			// to save space, so sum up the individual task resources.
   127  			for _, taskResource := range alloc.TaskResources {
   128  				if err := used.Add(taskResource); err != nil {
   129  					return false, "", nil, err
   130  				}
   131  			}
   132  		} else {
   133  			return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
   134  		}
   135  	}
   136  
   137  	// Check that the node resources are a super set of those
   138  	// that are being allocated
   139  	if superset, dimension := node.Resources.Superset(used); !superset {
   140  		return false, dimension, used, nil
   141  	}
   142  
   143  	// Create the network index if missing
   144  	if netIdx == nil {
   145  		netIdx = NewNetworkIndex()
   146  		defer netIdx.Release()
   147  		if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
   148  			return false, "reserved port collision", used, nil
   149  		}
   150  	}
   151  
   152  	// Check if the network is overcommitted
   153  	if netIdx.Overcommitted() {
   154  		return false, "bandwidth exceeded", used, nil
   155  	}
   156  
   157  	// Allocations fit!
   158  	return true, "", used, nil
   159  }
   160  
   161  // ScoreFit is used to score the fit based on the Google work published here:
   162  // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
   163  // This is equivalent to their BestFit v3
   164  func ScoreFit(node *Node, util *Resources) float64 {
   165  	// Determine the node availability
   166  	nodeCpu := float64(node.Resources.CPU)
   167  	if node.Reserved != nil {
   168  		nodeCpu -= float64(node.Reserved.CPU)
   169  	}
   170  	nodeMem := float64(node.Resources.MemoryMB)
   171  	if node.Reserved != nil {
   172  		nodeMem -= float64(node.Reserved.MemoryMB)
   173  	}
   174  
   175  	// Compute the free percentage
   176  	freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
   177  	freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
   178  
   179  	// Total will be "maximized" the smaller the value is.
   180  	// At 100% utilization, the total is 2, while at 0% util it is 20.
   181  	total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
   182  
   183  	// Invert so that the "maximized" total represents a high-value
   184  	// score. Because the floor is 20, we simply use that as an anchor.
   185  	// This means at a perfect fit, we return 18 as the score.
   186  	score := 20.0 - total
   187  
   188  	// Bound the score, just in case
   189  	// If the score is over 18, that means we've overfit the node.
   190  	if score > 18.0 {
   191  		score = 18.0
   192  	} else if score < 0 {
   193  		score = 0
   194  	}
   195  	return score
   196  }
   197  
   198  func CopySliceConstraints(s []*Constraint) []*Constraint {
   199  	l := len(s)
   200  	if l == 0 {
   201  		return nil
   202  	}
   203  
   204  	c := make([]*Constraint, l)
   205  	for i, v := range s {
   206  		c[i] = v.Copy()
   207  	}
   208  	return c
   209  }
   210  
   211  func CopySliceAffinities(s []*Affinity) []*Affinity {
   212  	l := len(s)
   213  	if l == 0 {
   214  		return nil
   215  	}
   216  
   217  	c := make([]*Affinity, l)
   218  	for i, v := range s {
   219  		c[i] = v.Copy()
   220  	}
   221  	return c
   222  }
   223  
   224  func CopySliceSpreads(s []*Spread) []*Spread {
   225  	l := len(s)
   226  	if l == 0 {
   227  		return nil
   228  	}
   229  
   230  	c := make([]*Spread, l)
   231  	for i, v := range s {
   232  		c[i] = v.Copy()
   233  	}
   234  	return c
   235  }
   236  
   237  func CopySliceSpreadTarget(s []*SpreadTarget) []*SpreadTarget {
   238  	l := len(s)
   239  	if l == 0 {
   240  		return nil
   241  	}
   242  
   243  	c := make([]*SpreadTarget, l)
   244  	for i, v := range s {
   245  		c[i] = v.Copy()
   246  	}
   247  	return c
   248  }
   249  
   250  func CopySliceNodeScoreMeta(s []*NodeScoreMeta) []*NodeScoreMeta {
   251  	l := len(s)
   252  	if l == 0 {
   253  		return nil
   254  	}
   255  
   256  	c := make([]*NodeScoreMeta, l)
   257  	for i, v := range s {
   258  		c[i] = v.Copy()
   259  	}
   260  	return c
   261  }
   262  
   263  // VaultPoliciesSet takes the structure returned by VaultPolicies and returns
   264  // the set of required policies
   265  func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
   266  	set := make(map[string]struct{})
   267  
   268  	for _, tgp := range policies {
   269  		for _, tp := range tgp {
   270  			for _, p := range tp.Policies {
   271  				set[p] = struct{}{}
   272  			}
   273  		}
   274  	}
   275  
   276  	flattened := make([]string, 0, len(set))
   277  	for p := range set {
   278  		flattened = append(flattened, p)
   279  	}
   280  	return flattened
   281  }
   282  
   283  // DenormalizeAllocationJobs is used to attach a job to all allocations that are
   284  // non-terminal and do not have a job already. This is useful in cases where the
   285  // job is normalized.
   286  func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
   287  	if job != nil {
   288  		for _, alloc := range allocs {
   289  			if alloc.Job == nil && !alloc.TerminalStatus() {
   290  				alloc.Job = job
   291  			}
   292  		}
   293  	}
   294  }
   295  
   296  // AllocName returns the name of the allocation given the input.
   297  func AllocName(job, group string, idx uint) string {
   298  	return fmt.Sprintf("%s.%s[%d]", job, group, idx)
   299  }
   300  
   301  // ACLPolicyListHash returns a consistent hash for a set of policies.
   302  func ACLPolicyListHash(policies []*ACLPolicy) string {
   303  	cacheKeyHash, err := blake2b.New256(nil)
   304  	if err != nil {
   305  		panic(err)
   306  	}
   307  	for _, policy := range policies {
   308  		cacheKeyHash.Write([]byte(policy.Name))
   309  		binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
   310  	}
   311  	cacheKey := string(cacheKeyHash.Sum(nil))
   312  	return cacheKey
   313  }
   314  
   315  // CompileACLObject compiles a set of ACL policies into an ACL object with a cache
   316  func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
   317  	// Sort the policies to ensure consistent ordering
   318  	sort.Slice(policies, func(i, j int) bool {
   319  		return policies[i].Name < policies[j].Name
   320  	})
   321  
   322  	// Determine the cache key
   323  	cacheKey := ACLPolicyListHash(policies)
   324  	aclRaw, ok := cache.Get(cacheKey)
   325  	if ok {
   326  		return aclRaw.(*acl.ACL), nil
   327  	}
   328  
   329  	// Parse the policies
   330  	parsed := make([]*acl.Policy, 0, len(policies))
   331  	for _, policy := range policies {
   332  		p, err := acl.Parse(policy.Rules)
   333  		if err != nil {
   334  			return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
   335  		}
   336  		parsed = append(parsed, p)
   337  	}
   338  
   339  	// Create the ACL object
   340  	aclObj, err := acl.NewACL(false, parsed)
   341  	if err != nil {
   342  		return nil, fmt.Errorf("failed to construct ACL: %v", err)
   343  	}
   344  
   345  	// Update the cache
   346  	cache.Add(cacheKey, aclObj)
   347  	return aclObj, nil
   348  }
   349  
   350  // GenerateMigrateToken will create a token for a client to access an
   351  // authenticated volume of another client to migrate data for sticky volumes.
   352  func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) {
   353  	h, err := blake2b.New512([]byte(nodeSecretID))
   354  	if err != nil {
   355  		return "", err
   356  	}
   357  	h.Write([]byte(allocID))
   358  	return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil
   359  }
   360  
   361  // CompareMigrateToken returns true if two migration tokens can be computed and
   362  // are equal.
   363  func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool {
   364  	h, err := blake2b.New512([]byte(nodeSecretID))
   365  	if err != nil {
   366  		return false
   367  	}
   368  	h.Write([]byte(allocID))
   369  
   370  	otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken)
   371  	if err != nil {
   372  		return false
   373  	}
   374  	return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1
   375  }