github.com/blixtra/nomad@v0.7.2-0.20171221000451-da9a1d7bb050/nomad/structs/funcs.go (about)

     1  package structs
     2  
     3  import (
     4  	"encoding/binary"
     5  	"fmt"
     6  	"math"
     7  	"sort"
     8  	"strings"
     9  
    10  	"golang.org/x/crypto/blake2b"
    11  
    12  	multierror "github.com/hashicorp/go-multierror"
    13  	lru "github.com/hashicorp/golang-lru"
    14  	"github.com/hashicorp/nomad/acl"
    15  )
    16  
    17  // MergeMultierrorWarnings takes job warnings and canonicalize warnings and
    18  // merges them into a returnable string. Both the errors may be nil.
    19  func MergeMultierrorWarnings(warnings ...error) string {
    20  	var warningMsg multierror.Error
    21  	for _, warn := range warnings {
    22  		if warn != nil {
    23  			multierror.Append(&warningMsg, warn)
    24  		}
    25  	}
    26  
    27  	if len(warningMsg.Errors) == 0 {
    28  		return ""
    29  	}
    30  
    31  	// Set the formatter
    32  	warningMsg.ErrorFormat = warningsFormatter
    33  	return warningMsg.Error()
    34  }
    35  
    36  // warningsFormatter is used to format job warnings
    37  func warningsFormatter(es []error) string {
    38  	points := make([]string, len(es))
    39  	for i, err := range es {
    40  		points[i] = fmt.Sprintf("* %s", err)
    41  	}
    42  
    43  	return fmt.Sprintf(
    44  		"%d warning(s):\n\n%s",
    45  		len(es), strings.Join(points, "\n"))
    46  }
    47  
    48  // RemoveAllocs is used to remove any allocs with the given IDs
    49  // from the list of allocations
    50  func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
    51  	// Convert remove into a set
    52  	removeSet := make(map[string]struct{})
    53  	for _, remove := range remove {
    54  		removeSet[remove.ID] = struct{}{}
    55  	}
    56  
    57  	n := len(alloc)
    58  	for i := 0; i < n; i++ {
    59  		if _, ok := removeSet[alloc[i].ID]; ok {
    60  			alloc[i], alloc[n-1] = alloc[n-1], nil
    61  			i--
    62  			n--
    63  		}
    64  	}
    65  
    66  	alloc = alloc[:n]
    67  	return alloc
    68  }
    69  
    70  // FilterTerminalAllocs filters out all allocations in a terminal state and
    71  // returns the latest terminal allocations
    72  func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
    73  	terminalAllocsByName := make(map[string]*Allocation)
    74  	n := len(allocs)
    75  	for i := 0; i < n; i++ {
    76  		if allocs[i].TerminalStatus() {
    77  
    78  			// Add the allocation to the terminal allocs map if it's not already
    79  			// added or has a higher create index than the one which is
    80  			// currently present.
    81  			alloc, ok := terminalAllocsByName[allocs[i].Name]
    82  			if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
    83  				terminalAllocsByName[allocs[i].Name] = allocs[i]
    84  			}
    85  
    86  			// Remove the allocation
    87  			allocs[i], allocs[n-1] = allocs[n-1], nil
    88  			i--
    89  			n--
    90  		}
    91  	}
    92  	return allocs[:n], terminalAllocsByName
    93  }
    94  
    95  // AllocsFit checks if a given set of allocations will fit on a node.
    96  // The netIdx can optionally be provided if its already been computed.
    97  // If the netIdx is provided, it is assumed that the client has already
    98  // ensured there are no collisions.
    99  func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
   100  	// Compute the utilization from zero
   101  	used := new(Resources)
   102  
   103  	// Add the reserved resources of the node
   104  	if node.Reserved != nil {
   105  		if err := used.Add(node.Reserved); err != nil {
   106  			return false, "", nil, err
   107  		}
   108  	}
   109  
   110  	// For each alloc, add the resources
   111  	for _, alloc := range allocs {
   112  		if alloc.Resources != nil {
   113  			if err := used.Add(alloc.Resources); err != nil {
   114  				return false, "", nil, err
   115  			}
   116  		} else if alloc.TaskResources != nil {
   117  
   118  			// Adding the shared resource asks for the allocation to the used
   119  			// resources
   120  			if err := used.Add(alloc.SharedResources); err != nil {
   121  				return false, "", nil, err
   122  			}
   123  			// Allocations within the plan have the combined resources stripped
   124  			// to save space, so sum up the individual task resources.
   125  			for _, taskResource := range alloc.TaskResources {
   126  				if err := used.Add(taskResource); err != nil {
   127  					return false, "", nil, err
   128  				}
   129  			}
   130  		} else {
   131  			return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
   132  		}
   133  	}
   134  
   135  	// Check that the node resources are a super set of those
   136  	// that are being allocated
   137  	if superset, dimension := node.Resources.Superset(used); !superset {
   138  		return false, dimension, used, nil
   139  	}
   140  
   141  	// Create the network index if missing
   142  	if netIdx == nil {
   143  		netIdx = NewNetworkIndex()
   144  		defer netIdx.Release()
   145  		if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
   146  			return false, "reserved port collision", used, nil
   147  		}
   148  	}
   149  
   150  	// Check if the network is overcommitted
   151  	if netIdx.Overcommitted() {
   152  		return false, "bandwidth exceeded", used, nil
   153  	}
   154  
   155  	// Allocations fit!
   156  	return true, "", used, nil
   157  }
   158  
   159  // ScoreFit is used to score the fit based on the Google work published here:
   160  // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
   161  // This is equivalent to their BestFit v3
   162  func ScoreFit(node *Node, util *Resources) float64 {
   163  	// Determine the node availability
   164  	nodeCpu := float64(node.Resources.CPU)
   165  	if node.Reserved != nil {
   166  		nodeCpu -= float64(node.Reserved.CPU)
   167  	}
   168  	nodeMem := float64(node.Resources.MemoryMB)
   169  	if node.Reserved != nil {
   170  		nodeMem -= float64(node.Reserved.MemoryMB)
   171  	}
   172  
   173  	// Compute the free percentage
   174  	freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
   175  	freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
   176  
   177  	// Total will be "maximized" the smaller the value is.
   178  	// At 100% utilization, the total is 2, while at 0% util it is 20.
   179  	total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
   180  
   181  	// Invert so that the "maximized" total represents a high-value
   182  	// score. Because the floor is 20, we simply use that as an anchor.
   183  	// This means at a perfect fit, we return 18 as the score.
   184  	score := 20.0 - total
   185  
   186  	// Bound the score, just in case
   187  	// If the score is over 18, that means we've overfit the node.
   188  	if score > 18.0 {
   189  		score = 18.0
   190  	} else if score < 0 {
   191  		score = 0
   192  	}
   193  	return score
   194  }
   195  
   196  func CopySliceConstraints(s []*Constraint) []*Constraint {
   197  	l := len(s)
   198  	if l == 0 {
   199  		return nil
   200  	}
   201  
   202  	c := make([]*Constraint, l)
   203  	for i, v := range s {
   204  		c[i] = v.Copy()
   205  	}
   206  	return c
   207  }
   208  
   209  // VaultPoliciesSet takes the structure returned by VaultPolicies and returns
   210  // the set of required policies
   211  func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
   212  	set := make(map[string]struct{})
   213  
   214  	for _, tgp := range policies {
   215  		for _, tp := range tgp {
   216  			for _, p := range tp.Policies {
   217  				set[p] = struct{}{}
   218  			}
   219  		}
   220  	}
   221  
   222  	flattened := make([]string, 0, len(set))
   223  	for p := range set {
   224  		flattened = append(flattened, p)
   225  	}
   226  	return flattened
   227  }
   228  
   229  // DenormalizeAllocationJobs is used to attach a job to all allocations that are
   230  // non-terminal and do not have a job already. This is useful in cases where the
   231  // job is normalized.
   232  func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
   233  	if job != nil {
   234  		for _, alloc := range allocs {
   235  			if alloc.Job == nil && !alloc.TerminalStatus() {
   236  				alloc.Job = job
   237  			}
   238  		}
   239  	}
   240  }
   241  
   242  // AllocName returns the name of the allocation given the input.
   243  func AllocName(job, group string, idx uint) string {
   244  	return fmt.Sprintf("%s.%s[%d]", job, group, idx)
   245  }
   246  
   247  // ACLPolicyListHash returns a consistent hash for a set of policies.
   248  func ACLPolicyListHash(policies []*ACLPolicy) string {
   249  	cacheKeyHash, err := blake2b.New256(nil)
   250  	if err != nil {
   251  		panic(err)
   252  	}
   253  	for _, policy := range policies {
   254  		cacheKeyHash.Write([]byte(policy.Name))
   255  		binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex)
   256  	}
   257  	cacheKey := string(cacheKeyHash.Sum(nil))
   258  	return cacheKey
   259  }
   260  
   261  // CompileACLObject compiles a set of ACL policies into an ACL object with a cache
   262  func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) {
   263  	// Sort the policies to ensure consistent ordering
   264  	sort.Slice(policies, func(i, j int) bool {
   265  		return policies[i].Name < policies[j].Name
   266  	})
   267  
   268  	// Determine the cache key
   269  	cacheKey := ACLPolicyListHash(policies)
   270  	aclRaw, ok := cache.Get(cacheKey)
   271  	if ok {
   272  		return aclRaw.(*acl.ACL), nil
   273  	}
   274  
   275  	// Parse the policies
   276  	parsed := make([]*acl.Policy, 0, len(policies))
   277  	for _, policy := range policies {
   278  		p, err := acl.Parse(policy.Rules)
   279  		if err != nil {
   280  			return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err)
   281  		}
   282  		parsed = append(parsed, p)
   283  	}
   284  
   285  	// Create the ACL object
   286  	aclObj, err := acl.NewACL(false, parsed)
   287  	if err != nil {
   288  		return nil, fmt.Errorf("failed to construct ACL: %v", err)
   289  	}
   290  
   291  	// Update the cache
   292  	cache.Add(cacheKey, aclObj)
   293  	return aclObj, nil
   294  }