github.com/hhrutter/nomad@v0.6.0-rc2.0.20170723054333-80c4b03f0705/nomad/structs/funcs.go (about)

     1  package structs
     2  
     3  import (
     4  	crand "crypto/rand"
     5  	"fmt"
     6  	"math"
     7  	"strings"
     8  
     9  	multierror "github.com/hashicorp/go-multierror"
    10  )
    11  
    12  // MergeMultierrorWarnings takes job warnings and canonicalize warnings and
    13  // merges them into a returnable string. Both the errors may be nil.
    14  func MergeMultierrorWarnings(warnings, canonicalizeWarnings error) string {
    15  	if warnings == nil && canonicalizeWarnings == nil {
    16  		return ""
    17  	}
    18  
    19  	var warningMsg multierror.Error
    20  	if canonicalizeWarnings != nil {
    21  		multierror.Append(&warningMsg, canonicalizeWarnings)
    22  	}
    23  
    24  	if warnings != nil {
    25  		multierror.Append(&warningMsg, warnings)
    26  	}
    27  
    28  	// Set the formatter
    29  	warningMsg.ErrorFormat = warningsFormatter
    30  	return warningMsg.Error()
    31  }
    32  
    33  // warningsFormatter is used to format job warnings
    34  func warningsFormatter(es []error) string {
    35  	points := make([]string, len(es))
    36  	for i, err := range es {
    37  		points[i] = fmt.Sprintf("* %s", err)
    38  	}
    39  
    40  	return fmt.Sprintf(
    41  		"%d warning(s):\n\n%s",
    42  		len(es), strings.Join(points, "\n"))
    43  }
    44  
    45  // RemoveAllocs is used to remove any allocs with the given IDs
    46  // from the list of allocations
    47  func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation {
    48  	// Convert remove into a set
    49  	removeSet := make(map[string]struct{})
    50  	for _, remove := range remove {
    51  		removeSet[remove.ID] = struct{}{}
    52  	}
    53  
    54  	n := len(alloc)
    55  	for i := 0; i < n; i++ {
    56  		if _, ok := removeSet[alloc[i].ID]; ok {
    57  			alloc[i], alloc[n-1] = alloc[n-1], nil
    58  			i--
    59  			n--
    60  		}
    61  	}
    62  
    63  	alloc = alloc[:n]
    64  	return alloc
    65  }
    66  
    67  // FilterTerminalAllocs filters out all allocations in a terminal state and
    68  // returns the latest terminal allocations
    69  func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) {
    70  	terminalAllocsByName := make(map[string]*Allocation)
    71  	n := len(allocs)
    72  	for i := 0; i < n; i++ {
    73  		if allocs[i].TerminalStatus() {
    74  
    75  			// Add the allocation to the terminal allocs map if it's not already
    76  			// added or has a higher create index than the one which is
    77  			// currently present.
    78  			alloc, ok := terminalAllocsByName[allocs[i].Name]
    79  			if !ok || alloc.CreateIndex < allocs[i].CreateIndex {
    80  				terminalAllocsByName[allocs[i].Name] = allocs[i]
    81  			}
    82  
    83  			// Remove the allocation
    84  			allocs[i], allocs[n-1] = allocs[n-1], nil
    85  			i--
    86  			n--
    87  		}
    88  	}
    89  	return allocs[:n], terminalAllocsByName
    90  }
    91  
    92  // AllocsFit checks if a given set of allocations will fit on a node.
    93  // The netIdx can optionally be provided if its already been computed.
    94  // If the netIdx is provided, it is assumed that the client has already
    95  // ensured there are no collisions.
    96  func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) {
    97  	// Compute the utilization from zero
    98  	used := new(Resources)
    99  
   100  	// Add the reserved resources of the node
   101  	if node.Reserved != nil {
   102  		if err := used.Add(node.Reserved); err != nil {
   103  			return false, "", nil, err
   104  		}
   105  	}
   106  
   107  	// For each alloc, add the resources
   108  	for _, alloc := range allocs {
   109  		if alloc.Resources != nil {
   110  			if err := used.Add(alloc.Resources); err != nil {
   111  				return false, "", nil, err
   112  			}
   113  		} else if alloc.TaskResources != nil {
   114  
   115  			// Adding the shared resource asks for the allocation to the used
   116  			// resources
   117  			if err := used.Add(alloc.SharedResources); err != nil {
   118  				return false, "", nil, err
   119  			}
   120  			// Allocations within the plan have the combined resources stripped
   121  			// to save space, so sum up the individual task resources.
   122  			for _, taskResource := range alloc.TaskResources {
   123  				if err := used.Add(taskResource); err != nil {
   124  					return false, "", nil, err
   125  				}
   126  			}
   127  		} else {
   128  			return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID)
   129  		}
   130  	}
   131  
   132  	// Check that the node resources are a super set of those
   133  	// that are being allocated
   134  	if superset, dimension := node.Resources.Superset(used); !superset {
   135  		return false, dimension, used, nil
   136  	}
   137  
   138  	// Create the network index if missing
   139  	if netIdx == nil {
   140  		netIdx = NewNetworkIndex()
   141  		defer netIdx.Release()
   142  		if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) {
   143  			return false, "reserved port collision", used, nil
   144  		}
   145  	}
   146  
   147  	// Check if the network is overcommitted
   148  	if netIdx.Overcommitted() {
   149  		return false, "bandwidth exceeded", used, nil
   150  	}
   151  
   152  	// Allocations fit!
   153  	return true, "", used, nil
   154  }
   155  
   156  // ScoreFit is used to score the fit based on the Google work published here:
   157  // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt
   158  // This is equivalent to their BestFit v3
   159  func ScoreFit(node *Node, util *Resources) float64 {
   160  	// Determine the node availability
   161  	nodeCpu := float64(node.Resources.CPU)
   162  	if node.Reserved != nil {
   163  		nodeCpu -= float64(node.Reserved.CPU)
   164  	}
   165  	nodeMem := float64(node.Resources.MemoryMB)
   166  	if node.Reserved != nil {
   167  		nodeMem -= float64(node.Reserved.MemoryMB)
   168  	}
   169  
   170  	// Compute the free percentage
   171  	freePctCpu := 1 - (float64(util.CPU) / nodeCpu)
   172  	freePctRam := 1 - (float64(util.MemoryMB) / nodeMem)
   173  
   174  	// Total will be "maximized" the smaller the value is.
   175  	// At 100% utilization, the total is 2, while at 0% util it is 20.
   176  	total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam)
   177  
   178  	// Invert so that the "maximized" total represents a high-value
   179  	// score. Because the floor is 20, we simply use that as an anchor.
   180  	// This means at a perfect fit, we return 18 as the score.
   181  	score := 20.0 - total
   182  
   183  	// Bound the score, just in case
   184  	// If the score is over 18, that means we've overfit the node.
   185  	if score > 18.0 {
   186  		score = 18.0
   187  	} else if score < 0 {
   188  		score = 0
   189  	}
   190  	return score
   191  }
   192  
   193  // GenerateUUID is used to generate a random UUID
   194  func GenerateUUID() string {
   195  	buf := make([]byte, 16)
   196  	if _, err := crand.Read(buf); err != nil {
   197  		panic(fmt.Errorf("failed to read random bytes: %v", err))
   198  	}
   199  
   200  	return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
   201  		buf[0:4],
   202  		buf[4:6],
   203  		buf[6:8],
   204  		buf[8:10],
   205  		buf[10:16])
   206  }
   207  
   208  func CopySliceConstraints(s []*Constraint) []*Constraint {
   209  	l := len(s)
   210  	if l == 0 {
   211  		return nil
   212  	}
   213  
   214  	c := make([]*Constraint, l)
   215  	for i, v := range s {
   216  		c[i] = v.Copy()
   217  	}
   218  	return c
   219  }
   220  
   221  // VaultPoliciesSet takes the structure returned by VaultPolicies and returns
   222  // the set of required policies
   223  func VaultPoliciesSet(policies map[string]map[string]*Vault) []string {
   224  	set := make(map[string]struct{})
   225  
   226  	for _, tgp := range policies {
   227  		for _, tp := range tgp {
   228  			for _, p := range tp.Policies {
   229  				set[p] = struct{}{}
   230  			}
   231  		}
   232  	}
   233  
   234  	flattened := make([]string, 0, len(set))
   235  	for p := range set {
   236  		flattened = append(flattened, p)
   237  	}
   238  	return flattened
   239  }
   240  
   241  // DenormalizeAllocationJobs is used to attach a job to all allocations that are
   242  // non-terminal and do not have a job already. This is useful in cases where the
   243  // job is normalized.
   244  func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) {
   245  	if job != nil {
   246  		for _, alloc := range allocs {
   247  			if alloc.Job == nil && !alloc.TerminalStatus() {
   248  				alloc.Job = job
   249  			}
   250  		}
   251  	}
   252  }
   253  
   254  // AllocName returns the name of the allocation given the input.
   255  func AllocName(job, group string, idx uint) string {
   256  	return fmt.Sprintf("%s.%s[%d]", job, group, idx)
   257  }