github.com/zoomfoo/nomad@v0.8.5-0.20180907175415-f28fd3a1a056/scheduler/util_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "reflect" 6 "testing" 7 8 "github.com/hashicorp/nomad/helper" 9 "github.com/hashicorp/nomad/helper/testlog" 10 "github.com/hashicorp/nomad/helper/uuid" 11 "github.com/hashicorp/nomad/nomad/mock" 12 "github.com/hashicorp/nomad/nomad/state" 13 "github.com/hashicorp/nomad/nomad/structs" 14 ) 15 16 // noErr is used to assert there are no errors 17 func noErr(t *testing.T, err error) { 18 if err != nil { 19 t.Fatalf("err: %v", err) 20 } 21 } 22 23 func TestMaterializeTaskGroups(t *testing.T) { 24 job := mock.Job() 25 index := materializeTaskGroups(job) 26 if len(index) != 10 { 27 t.Fatalf("Bad: %#v", index) 28 } 29 30 for i := 0; i < 10; i++ { 31 name := fmt.Sprintf("my-job.web[%d]", i) 32 tg, ok := index[name] 33 if !ok { 34 t.Fatalf("bad") 35 } 36 if tg != job.TaskGroups[0] { 37 t.Fatalf("bad") 38 } 39 } 40 } 41 42 func TestDiffAllocs(t *testing.T) { 43 job := mock.Job() 44 required := materializeTaskGroups(job) 45 46 // The "old" job has a previous modify index 47 oldJob := new(structs.Job) 48 *oldJob = *job 49 oldJob.JobModifyIndex -= 1 50 51 drainNode := mock.Node() 52 drainNode.Drain = true 53 54 deadNode := mock.Node() 55 deadNode.Status = structs.NodeStatusDown 56 57 tainted := map[string]*structs.Node{ 58 "dead": deadNode, 59 "drainNode": drainNode, 60 } 61 62 allocs := []*structs.Allocation{ 63 // Update the 1st 64 { 65 ID: uuid.Generate(), 66 NodeID: "zip", 67 Name: "my-job.web[0]", 68 Job: oldJob, 69 }, 70 71 // Ignore the 2rd 72 { 73 ID: uuid.Generate(), 74 NodeID: "zip", 75 Name: "my-job.web[1]", 76 Job: job, 77 }, 78 79 // Evict 11th 80 { 81 ID: uuid.Generate(), 82 NodeID: "zip", 83 Name: "my-job.web[10]", 84 Job: oldJob, 85 }, 86 87 // Migrate the 3rd 88 { 89 ID: uuid.Generate(), 90 NodeID: "drainNode", 91 Name: "my-job.web[2]", 92 Job: oldJob, 93 DesiredTransition: structs.DesiredTransition{ 94 Migrate: helper.BoolToPtr(true), 95 }, 96 }, 97 // Mark the 4th lost 98 { 99 ID: uuid.Generate(), 100 NodeID: "dead", 101 Name: "my-job.web[3]", 102 Job: oldJob, 103 }, 104 } 105 106 // Have three terminal allocs 107 terminalAllocs := map[string]*structs.Allocation{ 108 "my-job.web[4]": { 109 ID: uuid.Generate(), 110 NodeID: "zip", 111 Name: "my-job.web[4]", 112 Job: job, 113 }, 114 "my-job.web[5]": { 115 ID: uuid.Generate(), 116 NodeID: "zip", 117 Name: "my-job.web[5]", 118 Job: job, 119 }, 120 "my-job.web[6]": { 121 ID: uuid.Generate(), 122 NodeID: "zip", 123 Name: "my-job.web[6]", 124 Job: job, 125 }, 126 } 127 128 diff := diffAllocs(job, tainted, required, allocs, terminalAllocs) 129 place := diff.place 130 update := diff.update 131 migrate := diff.migrate 132 stop := diff.stop 133 ignore := diff.ignore 134 lost := diff.lost 135 136 // We should update the first alloc 137 if len(update) != 1 || update[0].Alloc != allocs[0] { 138 t.Fatalf("bad: %#v", update) 139 } 140 141 // We should ignore the second alloc 142 if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { 143 t.Fatalf("bad: %#v", ignore) 144 } 145 146 // We should stop the 3rd alloc 147 if len(stop) != 1 || stop[0].Alloc != allocs[2] { 148 t.Fatalf("bad: %#v", stop) 149 } 150 151 // We should migrate the 4rd alloc 152 if len(migrate) != 1 || migrate[0].Alloc != allocs[3] { 153 t.Fatalf("bad: %#v", migrate) 154 } 155 156 // We should mark the 5th alloc as lost 157 if len(lost) != 1 || lost[0].Alloc != allocs[4] { 158 t.Fatalf("bad: %#v", migrate) 159 } 160 161 // We should place 6 162 if len(place) != 6 { 163 t.Fatalf("bad: %#v", place) 164 } 165 166 // Ensure that the allocations which are replacements of terminal allocs are 167 // annotated 168 for name, alloc := range terminalAllocs { 169 for _, allocTuple := range diff.place { 170 if name == allocTuple.Name { 171 if !reflect.DeepEqual(alloc, allocTuple.Alloc) { 172 t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc) 173 } 174 } 175 } 176 } 177 } 178 179 func TestDiffSystemAllocs(t *testing.T) { 180 job := mock.SystemJob() 181 182 drainNode := mock.Node() 183 drainNode.Drain = true 184 185 deadNode := mock.Node() 186 deadNode.Status = structs.NodeStatusDown 187 188 tainted := map[string]*structs.Node{ 189 deadNode.ID: deadNode, 190 drainNode.ID: drainNode, 191 } 192 193 // Create three alive nodes. 194 nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}, 195 {ID: "pipe"}, {ID: drainNode.ID}, {ID: deadNode.ID}} 196 197 // The "old" job has a previous modify index 198 oldJob := new(structs.Job) 199 *oldJob = *job 200 oldJob.JobModifyIndex -= 1 201 202 allocs := []*structs.Allocation{ 203 // Update allocation on baz 204 { 205 ID: uuid.Generate(), 206 NodeID: "baz", 207 Name: "my-job.web[0]", 208 Job: oldJob, 209 }, 210 211 // Ignore allocation on bar 212 { 213 ID: uuid.Generate(), 214 NodeID: "bar", 215 Name: "my-job.web[0]", 216 Job: job, 217 }, 218 219 // Stop allocation on draining node. 220 { 221 ID: uuid.Generate(), 222 NodeID: drainNode.ID, 223 Name: "my-job.web[0]", 224 Job: oldJob, 225 DesiredTransition: structs.DesiredTransition{ 226 Migrate: helper.BoolToPtr(true), 227 }, 228 }, 229 // Mark as lost on a dead node 230 { 231 ID: uuid.Generate(), 232 NodeID: deadNode.ID, 233 Name: "my-job.web[0]", 234 Job: oldJob, 235 }, 236 } 237 238 // Have three terminal allocs 239 terminalAllocs := map[string]*structs.Allocation{ 240 "my-job.web[0]": { 241 ID: uuid.Generate(), 242 NodeID: "pipe", 243 Name: "my-job.web[0]", 244 Job: job, 245 }, 246 } 247 248 diff := diffSystemAllocs(job, nodes, tainted, allocs, terminalAllocs) 249 place := diff.place 250 update := diff.update 251 migrate := diff.migrate 252 stop := diff.stop 253 ignore := diff.ignore 254 lost := diff.lost 255 256 // We should update the first alloc 257 if len(update) != 1 || update[0].Alloc != allocs[0] { 258 t.Fatalf("bad: %#v", update) 259 } 260 261 // We should ignore the second alloc 262 if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { 263 t.Fatalf("bad: %#v", ignore) 264 } 265 266 // We should stop the third alloc 267 if len(stop) != 0 { 268 t.Fatalf("bad: %#v", stop) 269 } 270 271 // There should be no migrates. 272 if len(migrate) != 1 || migrate[0].Alloc != allocs[2] { 273 t.Fatalf("bad: %#v", migrate) 274 } 275 276 // We should mark the 5th alloc as lost 277 if len(lost) != 1 || lost[0].Alloc != allocs[3] { 278 t.Fatalf("bad: %#v", migrate) 279 } 280 281 // We should place 1 282 if l := len(place); l != 2 { 283 t.Fatalf("bad: %#v", l) 284 } 285 286 // Ensure that the allocations which are replacements of terminal allocs are 287 // annotated 288 for _, alloc := range terminalAllocs { 289 for _, allocTuple := range diff.place { 290 if alloc.NodeID == allocTuple.Alloc.NodeID { 291 if !reflect.DeepEqual(alloc, allocTuple.Alloc) { 292 t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc) 293 } 294 } 295 } 296 } 297 } 298 299 func TestReadyNodesInDCs(t *testing.T) { 300 state := state.TestStateStore(t) 301 node1 := mock.Node() 302 node2 := mock.Node() 303 node2.Datacenter = "dc2" 304 node3 := mock.Node() 305 node3.Datacenter = "dc2" 306 node3.Status = structs.NodeStatusDown 307 node4 := mock.Node() 308 node4.Drain = true 309 310 noErr(t, state.UpsertNode(1000, node1)) 311 noErr(t, state.UpsertNode(1001, node2)) 312 noErr(t, state.UpsertNode(1002, node3)) 313 noErr(t, state.UpsertNode(1003, node4)) 314 315 nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"}) 316 if err != nil { 317 t.Fatalf("err: %v", err) 318 } 319 320 if len(nodes) != 2 { 321 t.Fatalf("bad: %v", nodes) 322 } 323 if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID { 324 t.Fatalf("Bad: %#v", nodes) 325 } 326 if count, ok := dc["dc1"]; !ok || count != 1 { 327 t.Fatalf("Bad: dc1 count %v", count) 328 } 329 if count, ok := dc["dc2"]; !ok || count != 1 { 330 t.Fatalf("Bad: dc2 count %v", count) 331 } 332 } 333 334 func TestRetryMax(t *testing.T) { 335 calls := 0 336 bad := func() (bool, error) { 337 calls += 1 338 return false, nil 339 } 340 err := retryMax(3, bad, nil) 341 if err == nil { 342 t.Fatalf("should fail") 343 } 344 if calls != 3 { 345 t.Fatalf("mis match") 346 } 347 348 calls = 0 349 first := true 350 reset := func() bool { 351 if calls == 3 && first { 352 first = false 353 return true 354 } 355 return false 356 } 357 err = retryMax(3, bad, reset) 358 if err == nil { 359 t.Fatalf("should fail") 360 } 361 if calls != 6 { 362 t.Fatalf("mis match") 363 } 364 365 calls = 0 366 good := func() (bool, error) { 367 calls += 1 368 return true, nil 369 } 370 err = retryMax(3, good, nil) 371 if err != nil { 372 t.Fatalf("err: %v", err) 373 } 374 if calls != 1 { 375 t.Fatalf("mis match") 376 } 377 } 378 379 func TestTaintedNodes(t *testing.T) { 380 state := state.TestStateStore(t) 381 node1 := mock.Node() 382 node2 := mock.Node() 383 node2.Datacenter = "dc2" 384 node3 := mock.Node() 385 node3.Datacenter = "dc2" 386 node3.Status = structs.NodeStatusDown 387 node4 := mock.Node() 388 node4.Drain = true 389 noErr(t, state.UpsertNode(1000, node1)) 390 noErr(t, state.UpsertNode(1001, node2)) 391 noErr(t, state.UpsertNode(1002, node3)) 392 noErr(t, state.UpsertNode(1003, node4)) 393 394 allocs := []*structs.Allocation{ 395 {NodeID: node1.ID}, 396 {NodeID: node2.ID}, 397 {NodeID: node3.ID}, 398 {NodeID: node4.ID}, 399 {NodeID: "12345678-abcd-efab-cdef-123456789abc"}, 400 } 401 tainted, err := taintedNodes(state, allocs) 402 if err != nil { 403 t.Fatalf("err: %v", err) 404 } 405 406 if len(tainted) != 3 { 407 t.Fatalf("bad: %v", tainted) 408 } 409 410 if _, ok := tainted[node1.ID]; ok { 411 t.Fatalf("Bad: %v", tainted) 412 } 413 if _, ok := tainted[node2.ID]; ok { 414 t.Fatalf("Bad: %v", tainted) 415 } 416 417 if node, ok := tainted[node3.ID]; !ok || node == nil { 418 t.Fatalf("Bad: %v", tainted) 419 } 420 421 if node, ok := tainted[node4.ID]; !ok || node == nil { 422 t.Fatalf("Bad: %v", tainted) 423 } 424 425 if node, ok := tainted["12345678-abcd-efab-cdef-123456789abc"]; !ok || node != nil { 426 t.Fatalf("Bad: %v", tainted) 427 } 428 } 429 430 func TestShuffleNodes(t *testing.T) { 431 // Use a large number of nodes to make the probability of shuffling to the 432 // original order very low. 433 nodes := []*structs.Node{ 434 mock.Node(), 435 mock.Node(), 436 mock.Node(), 437 mock.Node(), 438 mock.Node(), 439 mock.Node(), 440 mock.Node(), 441 mock.Node(), 442 mock.Node(), 443 mock.Node(), 444 } 445 orig := make([]*structs.Node, len(nodes)) 446 copy(orig, nodes) 447 shuffleNodes(nodes) 448 if reflect.DeepEqual(nodes, orig) { 449 t.Fatalf("should not match") 450 } 451 } 452 453 func TestTasksUpdated(t *testing.T) { 454 j1 := mock.Job() 455 j2 := mock.Job() 456 name := j1.TaskGroups[0].Name 457 458 if tasksUpdated(j1, j2, name) { 459 t.Fatalf("bad") 460 } 461 462 j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 463 if !tasksUpdated(j1, j2, name) { 464 t.Fatalf("bad") 465 } 466 467 j3 := mock.Job() 468 j3.TaskGroups[0].Tasks[0].Name = "foo" 469 if !tasksUpdated(j1, j3, name) { 470 t.Fatalf("bad") 471 } 472 473 j4 := mock.Job() 474 j4.TaskGroups[0].Tasks[0].Driver = "foo" 475 if !tasksUpdated(j1, j4, name) { 476 t.Fatalf("bad") 477 } 478 479 j5 := mock.Job() 480 j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks, 481 j5.TaskGroups[0].Tasks[0]) 482 if !tasksUpdated(j1, j5, name) { 483 t.Fatalf("bad") 484 } 485 486 j6 := mock.Job() 487 j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{ 488 {Label: "http", Value: 0}, 489 {Label: "https", Value: 0}, 490 {Label: "admin", Value: 0}, 491 } 492 if !tasksUpdated(j1, j6, name) { 493 t.Fatalf("bad") 494 } 495 496 j7 := mock.Job() 497 j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE" 498 if !tasksUpdated(j1, j7, name) { 499 t.Fatalf("bad") 500 } 501 502 j8 := mock.Job() 503 j8.TaskGroups[0].Tasks[0].User = "foo" 504 if !tasksUpdated(j1, j8, name) { 505 t.Fatalf("bad") 506 } 507 508 j9 := mock.Job() 509 j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{ 510 { 511 GetterSource: "http://foo.com/bar", 512 }, 513 } 514 if !tasksUpdated(j1, j9, name) { 515 t.Fatalf("bad") 516 } 517 518 j10 := mock.Job() 519 j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom" 520 if !tasksUpdated(j1, j10, name) { 521 t.Fatalf("bad") 522 } 523 524 j11 := mock.Job() 525 j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337 526 if !tasksUpdated(j1, j11, name) { 527 t.Fatalf("bad") 528 } 529 530 j12 := mock.Job() 531 j12.TaskGroups[0].Tasks[0].Resources.Networks[0].MBits = 100 532 if !tasksUpdated(j1, j12, name) { 533 t.Fatalf("bad") 534 } 535 536 j13 := mock.Job() 537 j13.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts[0].Label = "foobar" 538 if !tasksUpdated(j1, j13, name) { 539 t.Fatalf("bad") 540 } 541 542 j14 := mock.Job() 543 j14.TaskGroups[0].Tasks[0].Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}} 544 if !tasksUpdated(j1, j14, name) { 545 t.Fatalf("bad") 546 } 547 548 j15 := mock.Job() 549 j15.TaskGroups[0].Tasks[0].Vault = &structs.Vault{Policies: []string{"foo"}} 550 if !tasksUpdated(j1, j15, name) { 551 t.Fatalf("bad") 552 } 553 554 j16 := mock.Job() 555 j16.TaskGroups[0].EphemeralDisk.Sticky = true 556 if !tasksUpdated(j1, j16, name) { 557 t.Fatal("bad") 558 } 559 560 // Change group meta 561 j17 := mock.Job() 562 j17.TaskGroups[0].Meta["j17_test"] = "roll_baby_roll" 563 if !tasksUpdated(j1, j17, name) { 564 t.Fatal("bad") 565 } 566 567 // Change job meta 568 j18 := mock.Job() 569 j18.Meta["j18_test"] = "roll_baby_roll" 570 if !tasksUpdated(j1, j18, name) { 571 t.Fatal("bad") 572 } 573 } 574 575 func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { 576 _, ctx := testContext(t) 577 allocs := []allocTuple{ 578 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 579 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 580 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 581 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 582 } 583 diff := &diffResult{} 584 585 limit := 2 586 if !evictAndPlace(ctx, diff, allocs, "", &limit) { 587 t.Fatal("evictAndReplace() should have returned true") 588 } 589 590 if limit != 0 { 591 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit) 592 } 593 594 if len(diff.place) != 2 { 595 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 596 } 597 } 598 599 func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { 600 _, ctx := testContext(t) 601 allocs := []allocTuple{ 602 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 603 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 604 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 605 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 606 } 607 diff := &diffResult{} 608 609 limit := 4 610 if evictAndPlace(ctx, diff, allocs, "", &limit) { 611 t.Fatal("evictAndReplace() should have returned false") 612 } 613 614 if limit != 0 { 615 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit) 616 } 617 618 if len(diff.place) != 4 { 619 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 620 } 621 } 622 623 func TestSetStatus(t *testing.T) { 624 h := NewHarness(t) 625 logger := testlog.Logger(t) 626 eval := mock.Eval() 627 status := "a" 628 desc := "b" 629 if err := setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, ""); err != nil { 630 t.Fatalf("setStatus() failed: %v", err) 631 } 632 633 if len(h.Evals) != 1 { 634 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 635 } 636 637 newEval := h.Evals[0] 638 if newEval.ID != eval.ID || newEval.Status != status || newEval.StatusDescription != desc { 639 t.Fatalf("setStatus() submited invalid eval: %v", newEval) 640 } 641 642 // Test next evals 643 h = NewHarness(t) 644 next := mock.Eval() 645 if err := setStatus(logger, h, eval, next, nil, nil, status, desc, nil, ""); err != nil { 646 t.Fatalf("setStatus() failed: %v", err) 647 } 648 649 if len(h.Evals) != 1 { 650 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 651 } 652 653 newEval = h.Evals[0] 654 if newEval.NextEval != next.ID { 655 t.Fatalf("setStatus() didn't set nextEval correctly: %v", newEval) 656 } 657 658 // Test blocked evals 659 h = NewHarness(t) 660 blocked := mock.Eval() 661 if err := setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, ""); err != nil { 662 t.Fatalf("setStatus() failed: %v", err) 663 } 664 665 if len(h.Evals) != 1 { 666 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 667 } 668 669 newEval = h.Evals[0] 670 if newEval.BlockedEval != blocked.ID { 671 t.Fatalf("setStatus() didn't set BlockedEval correctly: %v", newEval) 672 } 673 674 // Test metrics 675 h = NewHarness(t) 676 metrics := map[string]*structs.AllocMetric{"foo": nil} 677 if err := setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, ""); err != nil { 678 t.Fatalf("setStatus() failed: %v", err) 679 } 680 681 if len(h.Evals) != 1 { 682 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 683 } 684 685 newEval = h.Evals[0] 686 if !reflect.DeepEqual(newEval.FailedTGAllocs, metrics) { 687 t.Fatalf("setStatus() didn't set failed task group metrics correctly: %v", newEval) 688 } 689 690 // Test queued allocations 691 h = NewHarness(t) 692 queuedAllocs := map[string]int{"web": 1} 693 694 if err := setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, ""); err != nil { 695 t.Fatalf("setStatus() failed: %v", err) 696 } 697 698 if len(h.Evals) != 1 { 699 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 700 } 701 702 newEval = h.Evals[0] 703 if !reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs) { 704 t.Fatalf("setStatus() didn't set failed task group metrics correctly: %v", newEval) 705 } 706 707 h = NewHarness(t) 708 dID := uuid.Generate() 709 if err := setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID); err != nil { 710 t.Fatalf("setStatus() failed: %v", err) 711 } 712 713 if len(h.Evals) != 1 { 714 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 715 } 716 717 newEval = h.Evals[0] 718 if newEval.DeploymentID != dID { 719 t.Fatalf("setStatus() didn't set deployment id correctly: %v", newEval) 720 } 721 } 722 723 func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { 724 state, ctx := testContext(t) 725 eval := mock.Eval() 726 job := mock.Job() 727 728 node := mock.Node() 729 noErr(t, state.UpsertNode(900, node)) 730 731 // Register an alloc 732 alloc := &structs.Allocation{ 733 Namespace: structs.DefaultNamespace, 734 ID: uuid.Generate(), 735 EvalID: eval.ID, 736 NodeID: node.ID, 737 JobID: job.ID, 738 Job: job, 739 Resources: &structs.Resources{ 740 CPU: 2048, 741 MemoryMB: 2048, 742 }, 743 DesiredStatus: structs.AllocDesiredStatusRun, 744 TaskGroup: "web", 745 } 746 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 747 noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) 748 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 749 750 // Create a new task group that prevents in-place updates. 751 tg := &structs.TaskGroup{} 752 *tg = *job.TaskGroups[0] 753 task := &structs.Task{Name: "FOO"} 754 tg.Tasks = nil 755 tg.Tasks = append(tg.Tasks, task) 756 757 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 758 stack := NewGenericStack(false, ctx) 759 760 // Do the inplace update. 761 unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) 762 763 if len(unplaced) != 1 || len(inplace) != 0 { 764 t.Fatal("inplaceUpdate incorrectly did an inplace update") 765 } 766 767 if len(ctx.plan.NodeAllocation) != 0 { 768 t.Fatal("inplaceUpdate incorrectly did an inplace update") 769 } 770 } 771 772 func TestInplaceUpdate_NoMatch(t *testing.T) { 773 state, ctx := testContext(t) 774 eval := mock.Eval() 775 job := mock.Job() 776 777 node := mock.Node() 778 noErr(t, state.UpsertNode(900, node)) 779 780 // Register an alloc 781 alloc := &structs.Allocation{ 782 Namespace: structs.DefaultNamespace, 783 ID: uuid.Generate(), 784 EvalID: eval.ID, 785 NodeID: node.ID, 786 JobID: job.ID, 787 Job: job, 788 Resources: &structs.Resources{ 789 CPU: 2048, 790 MemoryMB: 2048, 791 }, 792 DesiredStatus: structs.AllocDesiredStatusRun, 793 TaskGroup: "web", 794 } 795 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 796 noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) 797 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 798 799 // Create a new task group that requires too much resources. 800 tg := &structs.TaskGroup{} 801 *tg = *job.TaskGroups[0] 802 resource := &structs.Resources{CPU: 9999} 803 tg.Tasks[0].Resources = resource 804 805 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 806 stack := NewGenericStack(false, ctx) 807 808 // Do the inplace update. 809 unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) 810 811 if len(unplaced) != 1 || len(inplace) != 0 { 812 t.Fatal("inplaceUpdate incorrectly did an inplace update") 813 } 814 815 if len(ctx.plan.NodeAllocation) != 0 { 816 t.Fatal("inplaceUpdate incorrectly did an inplace update") 817 } 818 } 819 820 func TestInplaceUpdate_Success(t *testing.T) { 821 state, ctx := testContext(t) 822 eval := mock.Eval() 823 job := mock.Job() 824 825 node := mock.Node() 826 noErr(t, state.UpsertNode(900, node)) 827 828 // Register an alloc 829 alloc := &structs.Allocation{ 830 Namespace: structs.DefaultNamespace, 831 ID: uuid.Generate(), 832 EvalID: eval.ID, 833 NodeID: node.ID, 834 JobID: job.ID, 835 Job: job, 836 TaskGroup: job.TaskGroups[0].Name, 837 Resources: &structs.Resources{ 838 CPU: 2048, 839 MemoryMB: 2048, 840 }, 841 DesiredStatus: structs.AllocDesiredStatusRun, 842 } 843 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 844 noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) 845 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 846 847 // Create a new task group that updates the resources. 848 tg := &structs.TaskGroup{} 849 *tg = *job.TaskGroups[0] 850 resource := &structs.Resources{CPU: 737} 851 tg.Tasks[0].Resources = resource 852 newServices := []*structs.Service{ 853 { 854 Name: "dummy-service", 855 PortLabel: "http", 856 }, 857 { 858 Name: "dummy-service2", 859 PortLabel: "http", 860 }, 861 } 862 863 // Delete service 2 864 tg.Tasks[0].Services = tg.Tasks[0].Services[:1] 865 866 // Add the new services 867 tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...) 868 869 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 870 stack := NewGenericStack(false, ctx) 871 stack.SetJob(job) 872 873 // Do the inplace update. 874 unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) 875 876 if len(unplaced) != 0 || len(inplace) != 1 { 877 t.Fatal("inplaceUpdate did not do an inplace update") 878 } 879 880 if len(ctx.plan.NodeAllocation) != 1 { 881 t.Fatal("inplaceUpdate did not do an inplace update") 882 } 883 884 if inplace[0].Alloc.ID != alloc.ID { 885 t.Fatalf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace) 886 } 887 888 // Get the alloc we inserted. 889 a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0] 890 if a.Job == nil { 891 t.Fatalf("bad") 892 } 893 894 if len(a.Job.TaskGroups) != 1 { 895 t.Fatalf("bad") 896 } 897 898 if len(a.Job.TaskGroups[0].Tasks) != 1 { 899 t.Fatalf("bad") 900 } 901 902 if len(a.Job.TaskGroups[0].Tasks[0].Services) != 3 { 903 t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services)) 904 } 905 906 serviceNames := make(map[string]struct{}, 3) 907 for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services { 908 serviceNames[consulService.Name] = struct{}{} 909 } 910 if len(serviceNames) != 3 { 911 t.Fatalf("bad") 912 } 913 914 for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} { 915 if _, found := serviceNames[name]; !found { 916 t.Errorf("Expected consul service name missing: %v", name) 917 } 918 } 919 } 920 921 func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { 922 _, ctx := testContext(t) 923 allocs := []allocTuple{ 924 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 925 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 926 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 927 {Alloc: &structs.Allocation{ID: uuid.Generate()}}, 928 } 929 diff := &diffResult{} 930 931 limit := 6 932 if evictAndPlace(ctx, diff, allocs, "", &limit) { 933 t.Fatal("evictAndReplace() should have returned false") 934 } 935 936 if limit != 2 { 937 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 2", limit) 938 } 939 940 if len(diff.place) != 4 { 941 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 942 } 943 } 944 945 func TestTaskGroupConstraints(t *testing.T) { 946 constr := &structs.Constraint{RTarget: "bar"} 947 constr2 := &structs.Constraint{LTarget: "foo"} 948 constr3 := &structs.Constraint{Operand: "<"} 949 950 tg := &structs.TaskGroup{ 951 Name: "web", 952 Count: 10, 953 Constraints: []*structs.Constraint{constr}, 954 EphemeralDisk: &structs.EphemeralDisk{}, 955 Tasks: []*structs.Task{ 956 { 957 Driver: "exec", 958 Resources: &structs.Resources{ 959 CPU: 500, 960 MemoryMB: 256, 961 }, 962 Constraints: []*structs.Constraint{constr2}, 963 }, 964 { 965 Driver: "docker", 966 Resources: &structs.Resources{ 967 CPU: 500, 968 MemoryMB: 256, 969 }, 970 Constraints: []*structs.Constraint{constr3}, 971 }, 972 }, 973 } 974 975 // Build the expected values. 976 expConstr := []*structs.Constraint{constr, constr2, constr3} 977 expDrivers := map[string]struct{}{"exec": {}, "docker": {}} 978 expSize := &structs.Resources{ 979 CPU: 1000, 980 MemoryMB: 512, 981 } 982 983 actConstrains := taskGroupConstraints(tg) 984 if !reflect.DeepEqual(actConstrains.constraints, expConstr) { 985 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr) 986 } 987 if !reflect.DeepEqual(actConstrains.drivers, expDrivers) { 988 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers) 989 } 990 if !reflect.DeepEqual(actConstrains.size, expSize) { 991 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.size, expSize) 992 } 993 994 } 995 996 func TestProgressMade(t *testing.T) { 997 noopPlan := &structs.PlanResult{} 998 if progressMade(nil) || progressMade(noopPlan) { 999 t.Fatal("no progress plan marked as making progress") 1000 } 1001 1002 m := map[string][]*structs.Allocation{ 1003 "foo": {mock.Alloc()}, 1004 } 1005 both := &structs.PlanResult{ 1006 NodeAllocation: m, 1007 NodeUpdate: m, 1008 } 1009 update := &structs.PlanResult{NodeUpdate: m} 1010 alloc := &structs.PlanResult{NodeAllocation: m} 1011 deployment := &structs.PlanResult{Deployment: mock.Deployment()} 1012 deploymentUpdates := &structs.PlanResult{ 1013 DeploymentUpdates: []*structs.DeploymentStatusUpdate{ 1014 {DeploymentID: uuid.Generate()}, 1015 }, 1016 } 1017 if !(progressMade(both) && progressMade(update) && progressMade(alloc) && 1018 progressMade(deployment) && progressMade(deploymentUpdates)) { 1019 t.Fatal("bad") 1020 } 1021 } 1022 1023 func TestDesiredUpdates(t *testing.T) { 1024 tg1 := &structs.TaskGroup{Name: "foo"} 1025 tg2 := &structs.TaskGroup{Name: "bar"} 1026 a2 := &structs.Allocation{TaskGroup: "bar"} 1027 1028 place := []allocTuple{ 1029 {TaskGroup: tg1}, 1030 {TaskGroup: tg1}, 1031 {TaskGroup: tg1}, 1032 {TaskGroup: tg2}, 1033 } 1034 stop := []allocTuple{ 1035 {TaskGroup: tg2, Alloc: a2}, 1036 {TaskGroup: tg2, Alloc: a2}, 1037 } 1038 ignore := []allocTuple{ 1039 {TaskGroup: tg1}, 1040 } 1041 migrate := []allocTuple{ 1042 {TaskGroup: tg2}, 1043 } 1044 inplace := []allocTuple{ 1045 {TaskGroup: tg1}, 1046 {TaskGroup: tg1}, 1047 } 1048 destructive := []allocTuple{ 1049 {TaskGroup: tg1}, 1050 {TaskGroup: tg2}, 1051 {TaskGroup: tg2}, 1052 } 1053 diff := &diffResult{ 1054 place: place, 1055 stop: stop, 1056 ignore: ignore, 1057 migrate: migrate, 1058 } 1059 1060 expected := map[string]*structs.DesiredUpdates{ 1061 "foo": { 1062 Place: 3, 1063 Ignore: 1, 1064 InPlaceUpdate: 2, 1065 DestructiveUpdate: 1, 1066 }, 1067 "bar": { 1068 Place: 1, 1069 Stop: 2, 1070 Migrate: 1, 1071 DestructiveUpdate: 2, 1072 }, 1073 } 1074 1075 desired := desiredUpdates(diff, inplace, destructive) 1076 if !reflect.DeepEqual(desired, expected) { 1077 t.Fatalf("desiredUpdates() returned %#v; want %#v", desired, expected) 1078 } 1079 } 1080 1081 func TestUtil_AdjustQueuedAllocations(t *testing.T) { 1082 logger := testlog.Logger(t) 1083 alloc1 := mock.Alloc() 1084 alloc2 := mock.Alloc() 1085 alloc2.CreateIndex = 4 1086 alloc2.ModifyIndex = 4 1087 alloc3 := mock.Alloc() 1088 alloc3.CreateIndex = 3 1089 alloc3.ModifyIndex = 5 1090 alloc4 := mock.Alloc() 1091 alloc4.CreateIndex = 6 1092 alloc4.ModifyIndex = 8 1093 1094 planResult := structs.PlanResult{ 1095 NodeUpdate: map[string][]*structs.Allocation{ 1096 "node-1": {alloc1}, 1097 }, 1098 NodeAllocation: map[string][]*structs.Allocation{ 1099 "node-1": { 1100 alloc2, 1101 }, 1102 "node-2": { 1103 alloc3, alloc4, 1104 }, 1105 }, 1106 RefreshIndex: 3, 1107 AllocIndex: 16, // Should not be considered 1108 } 1109 1110 queuedAllocs := map[string]int{"web": 2} 1111 adjustQueuedAllocations(logger, &planResult, queuedAllocs) 1112 1113 if queuedAllocs["web"] != 1 { 1114 t.Fatalf("expected: %v, actual: %v", 1, queuedAllocs["web"]) 1115 } 1116 } 1117 1118 func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { 1119 node := mock.Node() 1120 node.Status = structs.NodeStatusDown 1121 alloc1 := mock.Alloc() 1122 alloc1.NodeID = node.ID 1123 alloc1.DesiredStatus = structs.AllocDesiredStatusStop 1124 1125 alloc2 := mock.Alloc() 1126 alloc2.NodeID = node.ID 1127 alloc2.DesiredStatus = structs.AllocDesiredStatusStop 1128 alloc2.ClientStatus = structs.AllocClientStatusRunning 1129 1130 alloc3 := mock.Alloc() 1131 alloc3.NodeID = node.ID 1132 alloc3.DesiredStatus = structs.AllocDesiredStatusStop 1133 alloc3.ClientStatus = structs.AllocClientStatusComplete 1134 1135 alloc4 := mock.Alloc() 1136 alloc4.NodeID = node.ID 1137 alloc4.DesiredStatus = structs.AllocDesiredStatusStop 1138 alloc4.ClientStatus = structs.AllocClientStatusFailed 1139 1140 allocs := []*structs.Allocation{alloc1, alloc2, alloc3, alloc4} 1141 plan := structs.Plan{ 1142 NodeUpdate: make(map[string][]*structs.Allocation), 1143 } 1144 tainted := map[string]*structs.Node{node.ID: node} 1145 1146 updateNonTerminalAllocsToLost(&plan, tainted, allocs) 1147 1148 allocsLost := make([]string, 0, 2) 1149 for _, alloc := range plan.NodeUpdate[node.ID] { 1150 allocsLost = append(allocsLost, alloc.ID) 1151 } 1152 expected := []string{alloc1.ID, alloc2.ID} 1153 if !reflect.DeepEqual(allocsLost, expected) { 1154 t.Fatalf("actual: %v, expected: %v", allocsLost, expected) 1155 } 1156 1157 // Update the node status to ready and try again 1158 plan = structs.Plan{ 1159 NodeUpdate: make(map[string][]*structs.Allocation), 1160 } 1161 node.Status = structs.NodeStatusReady 1162 updateNonTerminalAllocsToLost(&plan, tainted, allocs) 1163 1164 allocsLost = make([]string, 0, 2) 1165 for _, alloc := range plan.NodeUpdate[node.ID] { 1166 allocsLost = append(allocsLost, alloc.ID) 1167 } 1168 expected = []string{} 1169 if !reflect.DeepEqual(allocsLost, expected) { 1170 t.Fatalf("actual: %v, expected: %v", allocsLost, expected) 1171 } 1172 }