github.com/uchennaokeke444/nomad@v0.11.8/nomad/structs/funcs.go (about) 1 package structs 2 3 import ( 4 "crypto/subtle" 5 "encoding/base64" 6 "encoding/binary" 7 "fmt" 8 "math" 9 "sort" 10 "strconv" 11 "strings" 12 13 multierror "github.com/hashicorp/go-multierror" 14 lru "github.com/hashicorp/golang-lru" 15 "github.com/hashicorp/nomad/acl" 16 "github.com/mitchellh/copystructure" 17 "golang.org/x/crypto/blake2b" 18 ) 19 20 // MergeMultierrorWarnings takes job warnings and canonicalize warnings and 21 // merges them into a returnable string. Both the errors may be nil. 22 func MergeMultierrorWarnings(warnings ...error) string { 23 var warningMsg multierror.Error 24 for _, warn := range warnings { 25 if warn != nil { 26 multierror.Append(&warningMsg, warn) 27 } 28 } 29 30 if len(warningMsg.Errors) == 0 { 31 return "" 32 } 33 34 // Set the formatter 35 warningMsg.ErrorFormat = warningsFormatter 36 return warningMsg.Error() 37 } 38 39 // warningsFormatter is used to format job warnings 40 func warningsFormatter(es []error) string { 41 points := make([]string, len(es)) 42 for i, err := range es { 43 points[i] = fmt.Sprintf("* %s", err) 44 } 45 46 return fmt.Sprintf( 47 "%d warning(s):\n\n%s", 48 len(es), strings.Join(points, "\n")) 49 } 50 51 // RemoveAllocs is used to remove any allocs with the given IDs 52 // from the list of allocations 53 func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { 54 // Convert remove into a set 55 removeSet := make(map[string]struct{}) 56 for _, remove := range remove { 57 removeSet[remove.ID] = struct{}{} 58 } 59 60 n := len(alloc) 61 for i := 0; i < n; i++ { 62 if _, ok := removeSet[alloc[i].ID]; ok { 63 alloc[i], alloc[n-1] = alloc[n-1], nil 64 i-- 65 n-- 66 } 67 } 68 69 alloc = alloc[:n] 70 return alloc 71 } 72 73 // FilterTerminalAllocs filters out all allocations in a terminal state and 74 // returns the latest terminal allocations 75 func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { 76 terminalAllocsByName := make(map[string]*Allocation) 77 n := len(allocs) 78 for i := 0; i < n; i++ { 79 if allocs[i].TerminalStatus() { 80 81 // Add the allocation to the terminal allocs map if it's not already 82 // added or has a higher create index than the one which is 83 // currently present. 84 alloc, ok := terminalAllocsByName[allocs[i].Name] 85 if !ok || alloc.CreateIndex < allocs[i].CreateIndex { 86 terminalAllocsByName[allocs[i].Name] = allocs[i] 87 } 88 89 // Remove the allocation 90 allocs[i], allocs[n-1] = allocs[n-1], nil 91 i-- 92 n-- 93 } 94 } 95 return allocs[:n], terminalAllocsByName 96 } 97 98 // AllocsFit checks if a given set of allocations will fit on a node. 99 // The netIdx can optionally be provided if its already been computed. 100 // If the netIdx is provided, it is assumed that the client has already 101 // ensured there are no collisions. If checkDevices is set to true, we check if 102 // there is a device oversubscription. 103 func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevices bool) (bool, string, *ComparableResources, error) { 104 // Compute the allocs' utilization from zero 105 used := new(ComparableResources) 106 107 // For each alloc, add the resources 108 for _, alloc := range allocs { 109 // Do not consider the resource impact of terminal allocations 110 if alloc.TerminalStatus() { 111 continue 112 } 113 114 used.Add(alloc.ComparableResources()) 115 } 116 117 // Check that the node resources (after subtracting reserved) are a 118 // super set of those that are being allocated 119 available := node.ComparableResources() 120 available.Subtract(node.ComparableReservedResources()) 121 if superset, dimension := available.Superset(used); !superset { 122 return false, dimension, used, nil 123 } 124 125 // Create the network index if missing 126 if netIdx == nil { 127 netIdx = NewNetworkIndex() 128 defer netIdx.Release() 129 if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { 130 return false, "reserved port collision", used, nil 131 } 132 } 133 134 // Check if the network is overcommitted 135 if netIdx.Overcommitted() { 136 return false, "bandwidth exceeded", used, nil 137 } 138 139 // Check devices 140 if checkDevices { 141 accounter := NewDeviceAccounter(node) 142 if accounter.AddAllocs(allocs) { 143 return false, "device oversubscribed", used, nil 144 } 145 } 146 147 // Allocations fit! 148 return true, "", used, nil 149 } 150 151 func computeFreePercentage(node *Node, util *ComparableResources) (freePctCpu, freePctRam float64) { 152 // COMPAT(0.11): Remove in 0.11 153 reserved := node.ComparableReservedResources() 154 res := node.ComparableResources() 155 156 // Determine the node availability 157 nodeCpu := float64(res.Flattened.Cpu.CpuShares) 158 nodeMem := float64(res.Flattened.Memory.MemoryMB) 159 if reserved != nil { 160 nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares) 161 nodeMem -= float64(reserved.Flattened.Memory.MemoryMB) 162 } 163 164 // Compute the free percentage 165 freePctCpu = 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu) 166 freePctRam = 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem) 167 return freePctCpu, freePctRam 168 } 169 170 // ScoreFitBinPack computes a fit score to achieve pinbacking behavior. 171 // Score is in [0, 18] 172 // 173 // It's the BestFit v3 on the Google work published here: 174 // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt 175 func ScoreFitBinPack(node *Node, util *ComparableResources) float64 { 176 freePctCpu, freePctRam := computeFreePercentage(node, util) 177 178 // Total will be "maximized" the smaller the value is. 179 // At 100% utilization, the total is 2, while at 0% util it is 20. 180 total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) 181 182 // Invert so that the "maximized" total represents a high-value 183 // score. Because the floor is 20, we simply use that as an anchor. 184 // This means at a perfect fit, we return 18 as the score. 185 score := 20.0 - total 186 187 // Bound the score, just in case 188 // If the score is over 18, that means we've overfit the node. 189 if score > 18.0 { 190 score = 18.0 191 } else if score < 0 { 192 score = 0 193 } 194 return score 195 } 196 197 // ScoreFitBinSpread computes a fit score to achieve spread behavior. 198 // Score is in [0, 18] 199 // 200 // This is equivalent to Worst Fit of 201 // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt 202 func ScoreFitSpread(node *Node, util *ComparableResources) float64 { 203 freePctCpu, freePctRam := computeFreePercentage(node, util) 204 total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) 205 score := total - 2 206 207 if score > 18.0 { 208 score = 18.0 209 } else if score < 0 { 210 score = 0 211 } 212 return score 213 } 214 215 func CopySliceConstraints(s []*Constraint) []*Constraint { 216 l := len(s) 217 if l == 0 { 218 return nil 219 } 220 221 c := make([]*Constraint, l) 222 for i, v := range s { 223 c[i] = v.Copy() 224 } 225 return c 226 } 227 228 func CopySliceAffinities(s []*Affinity) []*Affinity { 229 l := len(s) 230 if l == 0 { 231 return nil 232 } 233 234 c := make([]*Affinity, l) 235 for i, v := range s { 236 c[i] = v.Copy() 237 } 238 return c 239 } 240 241 func CopySliceSpreads(s []*Spread) []*Spread { 242 l := len(s) 243 if l == 0 { 244 return nil 245 } 246 247 c := make([]*Spread, l) 248 for i, v := range s { 249 c[i] = v.Copy() 250 } 251 return c 252 } 253 254 func CopySliceSpreadTarget(s []*SpreadTarget) []*SpreadTarget { 255 l := len(s) 256 if l == 0 { 257 return nil 258 } 259 260 c := make([]*SpreadTarget, l) 261 for i, v := range s { 262 c[i] = v.Copy() 263 } 264 return c 265 } 266 267 func CopySliceNodeScoreMeta(s []*NodeScoreMeta) []*NodeScoreMeta { 268 l := len(s) 269 if l == 0 { 270 return nil 271 } 272 273 c := make([]*NodeScoreMeta, l) 274 for i, v := range s { 275 c[i] = v.Copy() 276 } 277 return c 278 } 279 280 func CopyScalingPolicy(p *ScalingPolicy) *ScalingPolicy { 281 if p == nil { 282 return nil 283 } 284 285 opaquePolicyConfig, err := copystructure.Copy(p.Policy) 286 if err != nil { 287 panic(err.Error()) 288 } 289 290 c := ScalingPolicy{ 291 ID: p.ID, 292 Policy: opaquePolicyConfig.(map[string]interface{}), 293 Enabled: p.Enabled, 294 Min: p.Min, 295 Max: p.Max, 296 CreateIndex: p.CreateIndex, 297 ModifyIndex: p.ModifyIndex, 298 } 299 c.Target = make(map[string]string, len(p.Target)) 300 for k, v := range p.Target { 301 c.Target[k] = v 302 } 303 return &c 304 } 305 306 // VaultPoliciesSet takes the structure returned by VaultPolicies and returns 307 // the set of required policies 308 func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { 309 set := make(map[string]struct{}) 310 311 for _, tgp := range policies { 312 for _, tp := range tgp { 313 for _, p := range tp.Policies { 314 set[p] = struct{}{} 315 } 316 } 317 } 318 319 flattened := make([]string, 0, len(set)) 320 for p := range set { 321 flattened = append(flattened, p) 322 } 323 return flattened 324 } 325 326 // DenormalizeAllocationJobs is used to attach a job to all allocations that are 327 // non-terminal and do not have a job already. This is useful in cases where the 328 // job is normalized. 329 func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { 330 if job != nil { 331 for _, alloc := range allocs { 332 if alloc.Job == nil && !alloc.TerminalStatus() { 333 alloc.Job = job 334 } 335 } 336 } 337 } 338 339 // AllocName returns the name of the allocation given the input. 340 func AllocName(job, group string, idx uint) string { 341 return fmt.Sprintf("%s.%s[%d]", job, group, idx) 342 } 343 344 // ACLPolicyListHash returns a consistent hash for a set of policies. 345 func ACLPolicyListHash(policies []*ACLPolicy) string { 346 cacheKeyHash, err := blake2b.New256(nil) 347 if err != nil { 348 panic(err) 349 } 350 for _, policy := range policies { 351 cacheKeyHash.Write([]byte(policy.Name)) 352 binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) 353 } 354 cacheKey := string(cacheKeyHash.Sum(nil)) 355 return cacheKey 356 } 357 358 // CompileACLObject compiles a set of ACL policies into an ACL object with a cache 359 func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { 360 // Sort the policies to ensure consistent ordering 361 sort.Slice(policies, func(i, j int) bool { 362 return policies[i].Name < policies[j].Name 363 }) 364 365 // Determine the cache key 366 cacheKey := ACLPolicyListHash(policies) 367 aclRaw, ok := cache.Get(cacheKey) 368 if ok { 369 return aclRaw.(*acl.ACL), nil 370 } 371 372 // Parse the policies 373 parsed := make([]*acl.Policy, 0, len(policies)) 374 for _, policy := range policies { 375 p, err := acl.Parse(policy.Rules) 376 if err != nil { 377 return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) 378 } 379 parsed = append(parsed, p) 380 } 381 382 // Create the ACL object 383 aclObj, err := acl.NewACL(false, parsed) 384 if err != nil { 385 return nil, fmt.Errorf("failed to construct ACL: %v", err) 386 } 387 388 // Update the cache 389 cache.Add(cacheKey, aclObj) 390 return aclObj, nil 391 } 392 393 // GenerateMigrateToken will create a token for a client to access an 394 // authenticated volume of another client to migrate data for sticky volumes. 395 func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) { 396 h, err := blake2b.New512([]byte(nodeSecretID)) 397 if err != nil { 398 return "", err 399 } 400 h.Write([]byte(allocID)) 401 return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil 402 } 403 404 // CompareMigrateToken returns true if two migration tokens can be computed and 405 // are equal. 406 func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool { 407 h, err := blake2b.New512([]byte(nodeSecretID)) 408 if err != nil { 409 return false 410 } 411 h.Write([]byte(allocID)) 412 413 otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken) 414 if err != nil { 415 return false 416 } 417 return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1 418 } 419 420 // ParsePortRanges parses the passed port range string and returns a list of the 421 // ports. The specification is a comma separated list of either port numbers or 422 // port ranges. A port number is a single integer and a port range is two 423 // integers separated by a hyphen. As an example the following spec would 424 // convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16} 425 func ParsePortRanges(spec string) ([]uint64, error) { 426 parts := strings.Split(spec, ",") 427 428 // Hot path the empty case 429 if len(parts) == 1 && parts[0] == "" { 430 return nil, nil 431 } 432 433 ports := make(map[uint64]struct{}) 434 for _, part := range parts { 435 part = strings.TrimSpace(part) 436 rangeParts := strings.Split(part, "-") 437 l := len(rangeParts) 438 switch l { 439 case 1: 440 if val := rangeParts[0]; val == "" { 441 return nil, fmt.Errorf("can't specify empty port") 442 } else { 443 port, err := strconv.ParseUint(val, 10, 0) 444 if err != nil { 445 return nil, err 446 } 447 ports[port] = struct{}{} 448 } 449 case 2: 450 // We are parsing a range 451 start, err := strconv.ParseUint(rangeParts[0], 10, 0) 452 if err != nil { 453 return nil, err 454 } 455 456 end, err := strconv.ParseUint(rangeParts[1], 10, 0) 457 if err != nil { 458 return nil, err 459 } 460 461 if end < start { 462 return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start) 463 } 464 465 for i := start; i <= end; i++ { 466 ports[i] = struct{}{} 467 } 468 default: 469 return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)") 470 } 471 } 472 473 var results []uint64 474 for port := range ports { 475 results = append(results, port) 476 } 477 478 sort.Slice(results, func(i, j int) bool { 479 return results[i] < results[j] 480 }) 481 return results, nil 482 }