github.com/ThomasObenaus/nomad@v0.11.1/nomad/structs/funcs.go (about) 1 package structs 2 3 import ( 4 "crypto/subtle" 5 "encoding/base64" 6 "encoding/binary" 7 "fmt" 8 "math" 9 "sort" 10 "strconv" 11 "strings" 12 13 multierror "github.com/hashicorp/go-multierror" 14 lru "github.com/hashicorp/golang-lru" 15 "github.com/hashicorp/nomad/acl" 16 "github.com/mitchellh/copystructure" 17 "golang.org/x/crypto/blake2b" 18 ) 19 20 // MergeMultierrorWarnings takes job warnings and canonicalize warnings and 21 // merges them into a returnable string. Both the errors may be nil. 22 func MergeMultierrorWarnings(warnings ...error) string { 23 var warningMsg multierror.Error 24 for _, warn := range warnings { 25 if warn != nil { 26 multierror.Append(&warningMsg, warn) 27 } 28 } 29 30 if len(warningMsg.Errors) == 0 { 31 return "" 32 } 33 34 // Set the formatter 35 warningMsg.ErrorFormat = warningsFormatter 36 return warningMsg.Error() 37 } 38 39 // warningsFormatter is used to format job warnings 40 func warningsFormatter(es []error) string { 41 points := make([]string, len(es)) 42 for i, err := range es { 43 points[i] = fmt.Sprintf("* %s", err) 44 } 45 46 return fmt.Sprintf( 47 "%d warning(s):\n\n%s", 48 len(es), strings.Join(points, "\n")) 49 } 50 51 // RemoveAllocs is used to remove any allocs with the given IDs 52 // from the list of allocations 53 func RemoveAllocs(alloc []*Allocation, remove []*Allocation) []*Allocation { 54 // Convert remove into a set 55 removeSet := make(map[string]struct{}) 56 for _, remove := range remove { 57 removeSet[remove.ID] = struct{}{} 58 } 59 60 n := len(alloc) 61 for i := 0; i < n; i++ { 62 if _, ok := removeSet[alloc[i].ID]; ok { 63 alloc[i], alloc[n-1] = alloc[n-1], nil 64 i-- 65 n-- 66 } 67 } 68 69 alloc = alloc[:n] 70 return alloc 71 } 72 73 // FilterTerminalAllocs filters out all allocations in a terminal state and 74 // returns the latest terminal allocations 75 func FilterTerminalAllocs(allocs []*Allocation) ([]*Allocation, map[string]*Allocation) { 76 terminalAllocsByName := make(map[string]*Allocation) 77 n := len(allocs) 78 for i := 0; i < n; i++ { 79 if allocs[i].TerminalStatus() { 80 81 // Add the allocation to the terminal allocs map if it's not already 82 // added or has a higher create index than the one which is 83 // currently present. 84 alloc, ok := terminalAllocsByName[allocs[i].Name] 85 if !ok || alloc.CreateIndex < allocs[i].CreateIndex { 86 terminalAllocsByName[allocs[i].Name] = allocs[i] 87 } 88 89 // Remove the allocation 90 allocs[i], allocs[n-1] = allocs[n-1], nil 91 i-- 92 n-- 93 } 94 } 95 return allocs[:n], terminalAllocsByName 96 } 97 98 // AllocsFit checks if a given set of allocations will fit on a node. 99 // The netIdx can optionally be provided if its already been computed. 100 // If the netIdx is provided, it is assumed that the client has already 101 // ensured there are no collisions. If checkDevices is set to true, we check if 102 // there is a device oversubscription. 103 func AllocsFit(node *Node, allocs []*Allocation, netIdx *NetworkIndex, checkDevices bool) (bool, string, *ComparableResources, error) { 104 // Compute the utilization from zero 105 used := new(ComparableResources) 106 107 // Add the reserved resources of the node 108 used.Add(node.ComparableReservedResources()) 109 110 // For each alloc, add the resources 111 for _, alloc := range allocs { 112 // Do not consider the resource impact of terminal allocations 113 if alloc.TerminalStatus() { 114 continue 115 } 116 117 used.Add(alloc.ComparableResources()) 118 } 119 120 // Check that the node resources are a super set of those 121 // that are being allocated 122 if superset, dimension := node.ComparableResources().Superset(used); !superset { 123 return false, dimension, used, nil 124 } 125 126 // Create the network index if missing 127 if netIdx == nil { 128 netIdx = NewNetworkIndex() 129 defer netIdx.Release() 130 if netIdx.SetNode(node) || netIdx.AddAllocs(allocs) { 131 return false, "reserved port collision", used, nil 132 } 133 } 134 135 // Check if the network is overcommitted 136 if netIdx.Overcommitted() { 137 return false, "bandwidth exceeded", used, nil 138 } 139 140 // Check devices 141 if checkDevices { 142 accounter := NewDeviceAccounter(node) 143 if accounter.AddAllocs(allocs) { 144 return false, "device oversubscribed", used, nil 145 } 146 } 147 148 // Allocations fit! 149 return true, "", used, nil 150 } 151 152 // ScoreFit is used to score the fit based on the Google work published here: 153 // http://www.columbia.edu/~cs2035/courses/ieor4405.S13/datacenter_scheduling.ppt 154 // This is equivalent to their BestFit v3 155 func ScoreFit(node *Node, util *ComparableResources) float64 { 156 // COMPAT(0.11): Remove in 0.11 157 reserved := node.ComparableReservedResources() 158 res := node.ComparableResources() 159 160 // Determine the node availability 161 nodeCpu := float64(res.Flattened.Cpu.CpuShares) 162 nodeMem := float64(res.Flattened.Memory.MemoryMB) 163 if reserved != nil { 164 nodeCpu -= float64(reserved.Flattened.Cpu.CpuShares) 165 nodeMem -= float64(reserved.Flattened.Memory.MemoryMB) 166 } 167 168 // Compute the free percentage 169 freePctCpu := 1 - (float64(util.Flattened.Cpu.CpuShares) / nodeCpu) 170 freePctRam := 1 - (float64(util.Flattened.Memory.MemoryMB) / nodeMem) 171 172 // Total will be "maximized" the smaller the value is. 173 // At 100% utilization, the total is 2, while at 0% util it is 20. 174 total := math.Pow(10, freePctCpu) + math.Pow(10, freePctRam) 175 176 // Invert so that the "maximized" total represents a high-value 177 // score. Because the floor is 20, we simply use that as an anchor. 178 // This means at a perfect fit, we return 18 as the score. 179 score := 20.0 - total 180 181 // Bound the score, just in case 182 // If the score is over 18, that means we've overfit the node. 183 if score > 18.0 { 184 score = 18.0 185 } else if score < 0 { 186 score = 0 187 } 188 return score 189 } 190 191 func CopySliceConstraints(s []*Constraint) []*Constraint { 192 l := len(s) 193 if l == 0 { 194 return nil 195 } 196 197 c := make([]*Constraint, l) 198 for i, v := range s { 199 c[i] = v.Copy() 200 } 201 return c 202 } 203 204 func CopySliceAffinities(s []*Affinity) []*Affinity { 205 l := len(s) 206 if l == 0 { 207 return nil 208 } 209 210 c := make([]*Affinity, l) 211 for i, v := range s { 212 c[i] = v.Copy() 213 } 214 return c 215 } 216 217 func CopySliceSpreads(s []*Spread) []*Spread { 218 l := len(s) 219 if l == 0 { 220 return nil 221 } 222 223 c := make([]*Spread, l) 224 for i, v := range s { 225 c[i] = v.Copy() 226 } 227 return c 228 } 229 230 func CopySliceSpreadTarget(s []*SpreadTarget) []*SpreadTarget { 231 l := len(s) 232 if l == 0 { 233 return nil 234 } 235 236 c := make([]*SpreadTarget, l) 237 for i, v := range s { 238 c[i] = v.Copy() 239 } 240 return c 241 } 242 243 func CopySliceNodeScoreMeta(s []*NodeScoreMeta) []*NodeScoreMeta { 244 l := len(s) 245 if l == 0 { 246 return nil 247 } 248 249 c := make([]*NodeScoreMeta, l) 250 for i, v := range s { 251 c[i] = v.Copy() 252 } 253 return c 254 } 255 256 func CopyScalingPolicy(p *ScalingPolicy) *ScalingPolicy { 257 if p == nil { 258 return nil 259 } 260 261 opaquePolicyConfig, err := copystructure.Copy(p.Policy) 262 if err != nil { 263 panic(err.Error()) 264 } 265 266 c := ScalingPolicy{ 267 ID: p.ID, 268 Policy: opaquePolicyConfig.(map[string]interface{}), 269 Enabled: p.Enabled, 270 Min: p.Min, 271 Max: p.Max, 272 CreateIndex: p.CreateIndex, 273 ModifyIndex: p.ModifyIndex, 274 } 275 c.Target = make(map[string]string, len(p.Target)) 276 for k, v := range p.Target { 277 c.Target[k] = v 278 } 279 return &c 280 } 281 282 // VaultPoliciesSet takes the structure returned by VaultPolicies and returns 283 // the set of required policies 284 func VaultPoliciesSet(policies map[string]map[string]*Vault) []string { 285 set := make(map[string]struct{}) 286 287 for _, tgp := range policies { 288 for _, tp := range tgp { 289 for _, p := range tp.Policies { 290 set[p] = struct{}{} 291 } 292 } 293 } 294 295 flattened := make([]string, 0, len(set)) 296 for p := range set { 297 flattened = append(flattened, p) 298 } 299 return flattened 300 } 301 302 // DenormalizeAllocationJobs is used to attach a job to all allocations that are 303 // non-terminal and do not have a job already. This is useful in cases where the 304 // job is normalized. 305 func DenormalizeAllocationJobs(job *Job, allocs []*Allocation) { 306 if job != nil { 307 for _, alloc := range allocs { 308 if alloc.Job == nil && !alloc.TerminalStatus() { 309 alloc.Job = job 310 } 311 } 312 } 313 } 314 315 // AllocName returns the name of the allocation given the input. 316 func AllocName(job, group string, idx uint) string { 317 return fmt.Sprintf("%s.%s[%d]", job, group, idx) 318 } 319 320 // ACLPolicyListHash returns a consistent hash for a set of policies. 321 func ACLPolicyListHash(policies []*ACLPolicy) string { 322 cacheKeyHash, err := blake2b.New256(nil) 323 if err != nil { 324 panic(err) 325 } 326 for _, policy := range policies { 327 cacheKeyHash.Write([]byte(policy.Name)) 328 binary.Write(cacheKeyHash, binary.BigEndian, policy.ModifyIndex) 329 } 330 cacheKey := string(cacheKeyHash.Sum(nil)) 331 return cacheKey 332 } 333 334 // CompileACLObject compiles a set of ACL policies into an ACL object with a cache 335 func CompileACLObject(cache *lru.TwoQueueCache, policies []*ACLPolicy) (*acl.ACL, error) { 336 // Sort the policies to ensure consistent ordering 337 sort.Slice(policies, func(i, j int) bool { 338 return policies[i].Name < policies[j].Name 339 }) 340 341 // Determine the cache key 342 cacheKey := ACLPolicyListHash(policies) 343 aclRaw, ok := cache.Get(cacheKey) 344 if ok { 345 return aclRaw.(*acl.ACL), nil 346 } 347 348 // Parse the policies 349 parsed := make([]*acl.Policy, 0, len(policies)) 350 for _, policy := range policies { 351 p, err := acl.Parse(policy.Rules) 352 if err != nil { 353 return nil, fmt.Errorf("failed to parse %q: %v", policy.Name, err) 354 } 355 parsed = append(parsed, p) 356 } 357 358 // Create the ACL object 359 aclObj, err := acl.NewACL(false, parsed) 360 if err != nil { 361 return nil, fmt.Errorf("failed to construct ACL: %v", err) 362 } 363 364 // Update the cache 365 cache.Add(cacheKey, aclObj) 366 return aclObj, nil 367 } 368 369 // GenerateMigrateToken will create a token for a client to access an 370 // authenticated volume of another client to migrate data for sticky volumes. 371 func GenerateMigrateToken(allocID, nodeSecretID string) (string, error) { 372 h, err := blake2b.New512([]byte(nodeSecretID)) 373 if err != nil { 374 return "", err 375 } 376 h.Write([]byte(allocID)) 377 return base64.URLEncoding.EncodeToString(h.Sum(nil)), nil 378 } 379 380 // CompareMigrateToken returns true if two migration tokens can be computed and 381 // are equal. 382 func CompareMigrateToken(allocID, nodeSecretID, otherMigrateToken string) bool { 383 h, err := blake2b.New512([]byte(nodeSecretID)) 384 if err != nil { 385 return false 386 } 387 h.Write([]byte(allocID)) 388 389 otherBytes, err := base64.URLEncoding.DecodeString(otherMigrateToken) 390 if err != nil { 391 return false 392 } 393 return subtle.ConstantTimeCompare(h.Sum(nil), otherBytes) == 1 394 } 395 396 // ParsePortRanges parses the passed port range string and returns a list of the 397 // ports. The specification is a comma separated list of either port numbers or 398 // port ranges. A port number is a single integer and a port range is two 399 // integers separated by a hyphen. As an example the following spec would 400 // convert to: ParsePortRanges("10,12-14,16") -> []uint64{10, 12, 13, 14, 16} 401 func ParsePortRanges(spec string) ([]uint64, error) { 402 parts := strings.Split(spec, ",") 403 404 // Hot path the empty case 405 if len(parts) == 1 && parts[0] == "" { 406 return nil, nil 407 } 408 409 ports := make(map[uint64]struct{}) 410 for _, part := range parts { 411 part = strings.TrimSpace(part) 412 rangeParts := strings.Split(part, "-") 413 l := len(rangeParts) 414 switch l { 415 case 1: 416 if val := rangeParts[0]; val == "" { 417 return nil, fmt.Errorf("can't specify empty port") 418 } else { 419 port, err := strconv.ParseUint(val, 10, 0) 420 if err != nil { 421 return nil, err 422 } 423 ports[port] = struct{}{} 424 } 425 case 2: 426 // We are parsing a range 427 start, err := strconv.ParseUint(rangeParts[0], 10, 0) 428 if err != nil { 429 return nil, err 430 } 431 432 end, err := strconv.ParseUint(rangeParts[1], 10, 0) 433 if err != nil { 434 return nil, err 435 } 436 437 if end < start { 438 return nil, fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start) 439 } 440 441 for i := start; i <= end; i++ { 442 ports[i] = struct{}{} 443 } 444 default: 445 return nil, fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)") 446 } 447 } 448 449 var results []uint64 450 for port := range ports { 451 results = append(results, port) 452 } 453 454 sort.Slice(results, func(i, j int) bool { 455 return results[i] < results[j] 456 }) 457 return results, nil 458 }