github.com/quite/nomad@v0.8.6/nomad/structs/funcs.go (about) 1 package structs 2 3 import ( 4 "crypto/subtle" 5 "encoding/base64" 6 "encoding/binary" 7 "fmt" 8 "math" 9 "sort" 10 "strings" 11 12 "golang.org/x/crypto/blake2b" 13 14 multierror "github.com/hashicorp/go-multierror" 15 lru "github.com/hashicorp/golang-lru" 16 "github.com/hashicorp/nomad/acl" 17 ) 18 19 // MergeMultierrorWarnings takes job warnings and canonicalize warnings and 20 // merges them into a returnable string. Both the errors may be nil. 21 func MergeMultierrorWarnings(warnings ...error) string { 22 var warningMsg multierror.Error 23 for _, warn := range warnings { 24 if warn != nil { 25 multierror.Append(&warningMsg, warn) 26 } 27 } 28 29 if len(warningMsg.Errors) == 0 { 30 return "" 31 } 32 33 // Set the formatter 34 warningMsg.ErrorFormat = warningsFormatter 35 return warningMsg.Error() 36 } 37 38 // warningsFormatter is used to format job warnings 39 func warningsFormatter(es []error) string { 40 points := make([]string, len(es)) 41 for i, err := range es { 42 points[i] = fmt.Sprintf("* %s", err) 43 } 44 45 return fmt.Sprintf( 46 "%d warning(s):\n\n%s", 47 len(es), strings.Join(points, "\n")) 48 } 49 50 // RemoveAllocs is used to remove any allocs with the given IDs 51 // from the list of allocations 52 func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { 53 // Convert remove into a set 54 removeSet := make(map[string]struct{}) 55 for _, remove := range remove { 56 removeSet[remove.ID] = struct{}{} 57 } 58 59 n := len(alloc) 60 for i := 0; i < n; i++ { 61 if _, ok := removeSet[alloc[i].ID]; ok { 62 alloc[i], alloc[n-1] = alloc[n-1], nil 63 i-- 64 n-- 65 } 66 } 67 68 alloc = alloc[:n] 69 return alloc 70 } 71 72 // FilterTerminalAllocs filters out all allocations in a terminal state and 73 // returns the latest terminal allocations 74 func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { 75 terminalAllocsByName := make(map[string]*Allocation) 76 n := len(allocs) 77 for i := 0; i < n; i++ { 78 if allocs[i].TerminalStatus() { 79 80 // Add the allocation to the terminal allocs map if it's not already 81 // added or has a higher create index than the one which is 82 // currently present. 83 alloc, ok := terminalAllocsByName[allocs[i].Name] 84 if !ok || alloc.CreateIndex < allocs[i].CreateIndex { 85 terminalAllocsByName[allocs[i].Name] = allocs[i] 86 } 87 88 // Remove the allocation 89 allocs[i], allocs[n-1] = allocs[n-1], nil 90 i-- 91 n-- 92 } 93 } 94 return allocs[:n], terminalAllocsByName 95 } 96 97 // AllocsFit checks if a given set of allocations will fit on a node. 98 // The netIdx can optionally be provided if its already been computed. 99 // If the netIdx is provided, it is assumed that the client has already 100 // ensured there are no collisions. 101 func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex) (bool, string, *Resources, error) { 102 // Compute the utilization from zero 103 used := new(Resources) 104 105 // Add the reserved resources of the node 106 if node.Reserved != nil { 107 if err := used.Add(node.Reserved); err != nil { 108 return false, "", nil, err 109 } 110 } 111 112 // For each alloc, add the resources 113 for _, alloc := range allocs { 114 // Do not consider the resource impact of terminal allocations 115 if alloc.TerminalStatus() { 116 continue 117 } 118 119 if alloc.Resources != nil { 120 if err := used.Add(alloc.Resources); err != nil { 121 return false, "", nil, err 122 } 123 } else if alloc.TaskResources != nil { 124 125 // Adding the shared resource asks for the allocation to the used 126 // resources 127 if err := used.Add(alloc.SharedResources); err != nil { 128 return false, "", nil, err 129 } 130 // Allocations within the plan have the combined resources stripped 131 // to save space, so sum up the individual task resources. 132 for _, taskResource := range alloc.TaskResources { 133 if err := used.Add(taskResource); err != nil { 134 return false, "", nil, err 135 } 136 } 137 } else { 138 return false, "", nil, fmt.Errorf("allocation %q has no resources set", alloc.ID) 139 } 140 } 141 142 // Check that the node resources are a super set of those 143 // that are being allocated 144 if superset, dimension := node.Resources.Superset(used); !superset { 145 return false, dimension, used, nil 146 } 147 148 // Create the network index if missing 149 if netIdx == nil { 150 netIdx = NewNetworkIndex() 151 defer netIdx.Release() 152 if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { 153 return false, "reserved port collision", used, nil 154 } 155 } 156 157 // Check if the network is overcommitted 158 if netIdx.Overcommitted() { 159 return false, "bandwidth exceeded", used, nil 160 } 161 162 // Allocations fit! 163 return true, "", used, nil 164 } 165 166 // ScoreFit is used to score the fit based on the Google work published here: 167 // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt 168 // This is equivalent to their BestFit v3 169 func ScoreFit(node *Node, util *Resources) float64 { 170 // Determine the node availability 171 nodeCpu := float64(node.Resources.CPU) 172 if node.Reserved != nil { 173 nodeCpu -= float64(node.Reserved.CPU) 174 } 175 nodeMem := float64(node.Resources.MemoryMB) 176 if node.Reserved != nil { 177 nodeMem -= float64(node.Reserved.MemoryMB) 178 } 179 180 // Compute the free percentage 181 freePctCpu := 1 - (float64(util.CPU) / nodeCpu) 182 freePctRam := 1 - (float64(util.MemoryMB) / nodeMem) 183 184 // Total will be "maximized" the smaller the value is. 185 // At 100% utilization, the total is 2, while at 0% util it is 20. 186 total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) 187 188 // Invert so that the "maximized" total represents a high-value 189 // score. Because the floor is 20, we simply use that as an anchor. 190 // This means at a perfect fit, we return 18 as the score. 191 score := 20.0 - total 192 193 // Bound the score, just in case 194 // If the score is over 18, that means we've overfit the node. 195 if score > 18.0 { 196 score = 18.0 197 } else if score < 0 { 198 score = 0 199 } 200 return score 201 } 202 203 func CopySliceConstraints(s []*Constraint) []*Constraint { 204 l := len(s) 205 if l == 0 { 206 return nil 207 } 208 209 c := make([]*Constraint, l) 210 for i, v := range s { 211 c[i] = v.Copy() 212 } 213 return c 214 } 215 216 // VaultPoliciesSet takes the structure returned by VaultPolicies and returns 217 // the set of required policies 218 func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { 219 set := make(map[string]struct{}) 220 221 for _, tgp := range policies { 222 for _, tp := range tgp { 223 for _, p := range tp.Policies { 224 set[p] = struct{}{} 225 } 226 } 227 } 228 229 flattened := make([]string, 0, len(set)) 230 for p := range set { 231 flattened = append(flattened, p) 232 } 233 return flattened 234 } 235 236 // DenormalizeAllocationJobs is used to attach a job to all allocations that are 237 // non-terminal and do not have a job already. This is useful in cases where the 238 // job is normalized. 239 func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { 240 if job != nil { 241 for _, alloc := range allocs { 242 if alloc.Job == nil && !alloc.TerminalStatus() { 243 alloc.Job = job 244 } 245 } 246 } 247 } 248 249 // AllocName returns the name of the allocation given the input. 250 func AllocName(job, group string, idx uint) string { 251 return fmt.Sprintf("%s.%s[%d]", job, group, idx) 252 } 253 254 // ACLPolicyListHash returns a consistent hash for a set of policies. 255 func ACLPolicyListHash(policies []*ACLPolicy) string { 256 cacheKeyHash, err := blake2b.New256(nil) 257 if err != nil { 258 panic(err) 259 } 260 for _, policy := range policies { 261 cacheKeyHash.Write([]byte(policy.Name)) 262 binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) 263 } 264 cacheKey := string(cacheKeyHash.Sum(nil)) 265 return cacheKey 266 } 267 268 // CompileACLObject compiles a set of ACL policies into an ACL object with a cache 269 func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { 270 // Sort the policies to ensure consistent ordering 271 sort.Slice(policies, func(i, j int) bool { 272 return policies[i].Name < policies[j].Name 273 }) 274 275 // Determine the cache key 276 cacheKey := ACLPolicyListHash(policies) 277 aclRaw, ok := cache.Get(cacheKey) 278 if ok { 279 return aclRaw.(*acl.ACL), nil 280 } 281 282 // Parse the policies 283 parsed := make([]*acl.Policy, 0, len(policies)) 284 for _, policy := range policies { 285 p, err := acl.Parse(policy.Rules) 286 if err != nil { 287 return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) 288 } 289 parsed = append(parsed, p) 290 } 291 292 // Create the ACL object 293 aclObj, err := acl.NewACL(false, parsed) 294 if err != nil { 295 return nil, fmt.Errorf("failed to construct ACL: %v", err) 296 } 297 298 // Update the cache 299 cache.Add(cacheKey, aclObj) 300 return aclObj, nil 301 } 302 303 // GenerateMigrateToken will create a token for a client to access an 304 // authenticated volume of another client to migrate data for sticky volumes. 305 func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) { 306 h, err := blake2b.New512([]byte(nodeSecretID)) 307 if err != nil { 308 return "", err 309 } 310 h.Write([]byte(allocID)) 311 return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil 312 } 313 314 // CompareMigrateToken returns true if two migration tokens can be computed and 315 // are equal. 316 func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool { 317 h, err := blake2b.New512([]byte(nodeSecretID)) 318 if err != nil { 319 return false 320 } 321 h.Write([]byte(allocID)) 322 323 otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken) 324 if err != nil { 325 return false 326 } 327 return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1 328 }