github.com/ncodes/nomad@v0.5.7-0.20170403112158-97adf4a74fb3/scheduler/util_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "log" 6 "os" 7 "reflect" 8 "testing" 9 10 "github.com/ncodes/nomad/nomad/mock" 11 "github.com/ncodes/nomad/nomad/state" 12 "github.com/ncodes/nomad/nomad/structs" 13 ) 14 15 // noErr is used to assert there are no errors 16 func noErr(t *testing.T, err error) { 17 if err != nil { 18 t.Fatalf("err: %v", err) 19 } 20 } 21 22 func TestMaterializeTaskGroups(t *testing.T) { 23 job := mock.Job() 24 index := materializeTaskGroups(job) 25 if len(index) != 10 { 26 t.Fatalf("Bad: %#v", index) 27 } 28 29 for i := 0; i < 10; i++ { 30 name := fmt.Sprintf("my-job.web[%d]", i) 31 tg, ok := index[name] 32 if !ok { 33 t.Fatalf("bad") 34 } 35 if tg != job.TaskGroups[0] { 36 t.Fatalf("bad") 37 } 38 } 39 } 40 41 func TestDiffAllocs(t *testing.T) { 42 job := mock.Job() 43 required := materializeTaskGroups(job) 44 45 // The "old" job has a previous modify index 46 oldJob := new(structs.Job) 47 *oldJob = *job 48 oldJob.JobModifyIndex -= 1 49 50 drainNode := mock.Node() 51 drainNode.Drain = true 52 53 deadNode := mock.Node() 54 deadNode.Status = structs.NodeStatusDown 55 56 tainted := map[string]*structs.Node{ 57 "dead": deadNode, 58 "drainNode": drainNode, 59 } 60 61 allocs := []*structs.Allocation{ 62 // Update the 1st 63 &structs.Allocation{ 64 ID: structs.GenerateUUID(), 65 NodeID: "zip", 66 Name: "my-job.web[0]", 67 Job: oldJob, 68 }, 69 70 // Ignore the 2rd 71 &structs.Allocation{ 72 ID: structs.GenerateUUID(), 73 NodeID: "zip", 74 Name: "my-job.web[1]", 75 Job: job, 76 }, 77 78 // Evict 11th 79 &structs.Allocation{ 80 ID: structs.GenerateUUID(), 81 NodeID: "zip", 82 Name: "my-job.web[10]", 83 Job: oldJob, 84 }, 85 86 // Migrate the 3rd 87 &structs.Allocation{ 88 ID: structs.GenerateUUID(), 89 NodeID: "drainNode", 90 Name: "my-job.web[2]", 91 Job: oldJob, 92 }, 93 // Mark the 4th lost 94 &structs.Allocation{ 95 ID: structs.GenerateUUID(), 96 NodeID: "dead", 97 Name: "my-job.web[3]", 98 Job: oldJob, 99 }, 100 } 101 102 // Have three terminal allocs 103 terminalAllocs := map[string]*structs.Allocation{ 104 "my-job.web[4]": &structs.Allocation{ 105 ID: structs.GenerateUUID(), 106 NodeID: "zip", 107 Name: "my-job.web[4]", 108 Job: job, 109 }, 110 "my-job.web[5]": &structs.Allocation{ 111 ID: structs.GenerateUUID(), 112 NodeID: "zip", 113 Name: "my-job.web[5]", 114 Job: job, 115 }, 116 "my-job.web[6]": &structs.Allocation{ 117 ID: structs.GenerateUUID(), 118 NodeID: "zip", 119 Name: "my-job.web[6]", 120 Job: job, 121 }, 122 } 123 124 diff := diffAllocs(job, tainted, required, allocs, terminalAllocs) 125 place := diff.place 126 update := diff.update 127 migrate := diff.migrate 128 stop := diff.stop 129 ignore := diff.ignore 130 lost := diff.lost 131 132 // We should update the first alloc 133 if len(update) != 1 || update[0].Alloc != allocs[0] { 134 t.Fatalf("bad: %#v", update) 135 } 136 137 // We should ignore the second alloc 138 if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { 139 t.Fatalf("bad: %#v", ignore) 140 } 141 142 // We should stop the 3rd alloc 143 if len(stop) != 1 || stop[0].Alloc != allocs[2] { 144 t.Fatalf("bad: %#v", stop) 145 } 146 147 // We should migrate the 4rd alloc 148 if len(migrate) != 1 || migrate[0].Alloc != allocs[3] { 149 t.Fatalf("bad: %#v", migrate) 150 } 151 152 // We should mark the 5th alloc as lost 153 if len(lost) != 1 || lost[0].Alloc != allocs[4] { 154 t.Fatalf("bad: %#v", migrate) 155 } 156 157 // We should place 6 158 if len(place) != 6 { 159 t.Fatalf("bad: %#v", place) 160 } 161 162 // Ensure that the allocations which are replacements of terminal allocs are 163 // annotated 164 for name, alloc := range terminalAllocs { 165 for _, allocTuple := range diff.place { 166 if name == allocTuple.Name { 167 if !reflect.DeepEqual(alloc, allocTuple.Alloc) { 168 t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc) 169 } 170 } 171 } 172 } 173 } 174 175 func TestDiffSystemAllocs(t *testing.T) { 176 job := mock.SystemJob() 177 178 drainNode := mock.Node() 179 drainNode.Drain = true 180 181 deadNode := mock.Node() 182 deadNode.Status = structs.NodeStatusDown 183 184 tainted := map[string]*structs.Node{ 185 deadNode.ID: deadNode, 186 drainNode.ID: drainNode, 187 } 188 189 // Create three alive nodes. 190 nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}, 191 {ID: "pipe"}, {ID: drainNode.ID}, {ID: deadNode.ID}} 192 193 // The "old" job has a previous modify index 194 oldJob := new(structs.Job) 195 *oldJob = *job 196 oldJob.JobModifyIndex -= 1 197 198 allocs := []*structs.Allocation{ 199 // Update allocation on baz 200 &structs.Allocation{ 201 ID: structs.GenerateUUID(), 202 NodeID: "baz", 203 Name: "my-job.web[0]", 204 Job: oldJob, 205 }, 206 207 // Ignore allocation on bar 208 &structs.Allocation{ 209 ID: structs.GenerateUUID(), 210 NodeID: "bar", 211 Name: "my-job.web[0]", 212 Job: job, 213 }, 214 215 // Stop allocation on draining node. 216 &structs.Allocation{ 217 ID: structs.GenerateUUID(), 218 NodeID: drainNode.ID, 219 Name: "my-job.web[0]", 220 Job: oldJob, 221 }, 222 // Mark as lost on a dead node 223 &structs.Allocation{ 224 ID: structs.GenerateUUID(), 225 NodeID: deadNode.ID, 226 Name: "my-job.web[0]", 227 Job: oldJob, 228 }, 229 } 230 231 // Have three terminal allocs 232 terminalAllocs := map[string]*structs.Allocation{ 233 "my-job.web[0]": &structs.Allocation{ 234 ID: structs.GenerateUUID(), 235 NodeID: "pipe", 236 Name: "my-job.web[0]", 237 Job: job, 238 }, 239 } 240 241 diff := diffSystemAllocs(job, nodes, tainted, allocs, terminalAllocs) 242 place := diff.place 243 update := diff.update 244 migrate := diff.migrate 245 stop := diff.stop 246 ignore := diff.ignore 247 lost := diff.lost 248 249 // We should update the first alloc 250 if len(update) != 1 || update[0].Alloc != allocs[0] { 251 t.Fatalf("bad: %#v", update) 252 } 253 254 // We should ignore the second alloc 255 if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { 256 t.Fatalf("bad: %#v", ignore) 257 } 258 259 // We should stop the third alloc 260 if len(stop) != 1 || stop[0].Alloc != allocs[2] { 261 t.Fatalf("bad: %#v", stop) 262 } 263 264 // There should be no migrates. 265 if len(migrate) != 0 { 266 t.Fatalf("bad: %#v", migrate) 267 } 268 269 // We should mark the 5th alloc as lost 270 if len(lost) != 1 || lost[0].Alloc != allocs[3] { 271 t.Fatalf("bad: %#v", migrate) 272 } 273 274 // We should place 1 275 if l := len(place); l != 2 { 276 t.Fatalf("bad: %#v", l) 277 } 278 279 // Ensure that the allocations which are replacements of terminal allocs are 280 // annotated 281 for _, alloc := range terminalAllocs { 282 for _, allocTuple := range diff.place { 283 if alloc.NodeID == allocTuple.Alloc.NodeID { 284 if !reflect.DeepEqual(alloc, allocTuple.Alloc) { 285 t.Fatalf("expected: %#v, actual: %#v", alloc, allocTuple.Alloc) 286 } 287 } 288 } 289 } 290 } 291 292 func TestReadyNodesInDCs(t *testing.T) { 293 state, err := state.NewStateStore(os.Stderr) 294 if err != nil { 295 t.Fatalf("err: %v", err) 296 } 297 298 node1 := mock.Node() 299 node2 := mock.Node() 300 node2.Datacenter = "dc2" 301 node3 := mock.Node() 302 node3.Datacenter = "dc2" 303 node3.Status = structs.NodeStatusDown 304 node4 := mock.Node() 305 node4.Drain = true 306 307 noErr(t, state.UpsertNode(1000, node1)) 308 noErr(t, state.UpsertNode(1001, node2)) 309 noErr(t, state.UpsertNode(1002, node3)) 310 noErr(t, state.UpsertNode(1003, node4)) 311 312 nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"}) 313 if err != nil { 314 t.Fatalf("err: %v", err) 315 } 316 317 if len(nodes) != 2 { 318 t.Fatalf("bad: %v", nodes) 319 } 320 if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID { 321 t.Fatalf("Bad: %#v", nodes) 322 } 323 if count, ok := dc["dc1"]; !ok || count != 1 { 324 t.Fatalf("Bad: dc1 count %v", count) 325 } 326 if count, ok := dc["dc2"]; !ok || count != 1 { 327 t.Fatalf("Bad: dc2 count %v", count) 328 } 329 } 330 331 func TestRetryMax(t *testing.T) { 332 calls := 0 333 bad := func() (bool, error) { 334 calls += 1 335 return false, nil 336 } 337 err := retryMax(3, bad, nil) 338 if err == nil { 339 t.Fatalf("should fail") 340 } 341 if calls != 3 { 342 t.Fatalf("mis match") 343 } 344 345 calls = 0 346 first := true 347 reset := func() bool { 348 if calls == 3 && first { 349 first = false 350 return true 351 } 352 return false 353 } 354 err = retryMax(3, bad, reset) 355 if err == nil { 356 t.Fatalf("should fail") 357 } 358 if calls != 6 { 359 t.Fatalf("mis match") 360 } 361 362 calls = 0 363 good := func() (bool, error) { 364 calls += 1 365 return true, nil 366 } 367 err = retryMax(3, good, nil) 368 if err != nil { 369 t.Fatalf("err: %v", err) 370 } 371 if calls != 1 { 372 t.Fatalf("mis match") 373 } 374 } 375 376 func TestTaintedNodes(t *testing.T) { 377 state, err := state.NewStateStore(os.Stderr) 378 if err != nil { 379 t.Fatalf("err: %v", err) 380 } 381 382 node1 := mock.Node() 383 node2 := mock.Node() 384 node2.Datacenter = "dc2" 385 node3 := mock.Node() 386 node3.Datacenter = "dc2" 387 node3.Status = structs.NodeStatusDown 388 node4 := mock.Node() 389 node4.Drain = true 390 noErr(t, state.UpsertNode(1000, node1)) 391 noErr(t, state.UpsertNode(1001, node2)) 392 noErr(t, state.UpsertNode(1002, node3)) 393 noErr(t, state.UpsertNode(1003, node4)) 394 395 allocs := []*structs.Allocation{ 396 &structs.Allocation{NodeID: node1.ID}, 397 &structs.Allocation{NodeID: node2.ID}, 398 &structs.Allocation{NodeID: node3.ID}, 399 &structs.Allocation{NodeID: node4.ID}, 400 &structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"}, 401 } 402 tainted, err := taintedNodes(state, allocs) 403 if err != nil { 404 t.Fatalf("err: %v", err) 405 } 406 407 if len(tainted) != 3 { 408 t.Fatalf("bad: %v", tainted) 409 } 410 411 if _, ok := tainted[node1.ID]; ok { 412 t.Fatalf("Bad: %v", tainted) 413 } 414 if _, ok := tainted[node2.ID]; ok { 415 t.Fatalf("Bad: %v", tainted) 416 } 417 418 if node, ok := tainted[node3.ID]; !ok || node == nil { 419 t.Fatalf("Bad: %v", tainted) 420 } 421 422 if node, ok := tainted[node4.ID]; !ok || node == nil { 423 t.Fatalf("Bad: %v", tainted) 424 } 425 426 if node, ok := tainted["12345678-abcd-efab-cdef-123456789abc"]; !ok || node != nil { 427 t.Fatalf("Bad: %v", tainted) 428 } 429 } 430 431 func TestShuffleNodes(t *testing.T) { 432 // Use a large number of nodes to make the probability of shuffling to the 433 // original order very low. 434 nodes := []*structs.Node{ 435 mock.Node(), 436 mock.Node(), 437 mock.Node(), 438 mock.Node(), 439 mock.Node(), 440 mock.Node(), 441 mock.Node(), 442 mock.Node(), 443 mock.Node(), 444 mock.Node(), 445 } 446 orig := make([]*structs.Node, len(nodes)) 447 copy(orig, nodes) 448 shuffleNodes(nodes) 449 if reflect.DeepEqual(nodes, orig) { 450 t.Fatalf("should not match") 451 } 452 } 453 454 func TestTasksUpdated(t *testing.T) { 455 j1 := mock.Job() 456 j2 := mock.Job() 457 name := j1.TaskGroups[0].Name 458 459 if tasksUpdated(j1, j2, name) { 460 t.Fatalf("bad") 461 } 462 463 j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 464 if !tasksUpdated(j1, j2, name) { 465 t.Fatalf("bad") 466 } 467 468 j3 := mock.Job() 469 j3.TaskGroups[0].Tasks[0].Name = "foo" 470 if !tasksUpdated(j1, j3, name) { 471 t.Fatalf("bad") 472 } 473 474 j4 := mock.Job() 475 j4.TaskGroups[0].Tasks[0].Driver = "foo" 476 if !tasksUpdated(j1, j4, name) { 477 t.Fatalf("bad") 478 } 479 480 j5 := mock.Job() 481 j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks, 482 j5.TaskGroups[0].Tasks[0]) 483 if !tasksUpdated(j1, j5, name) { 484 t.Fatalf("bad") 485 } 486 487 j6 := mock.Job() 488 j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{ 489 {Label: "http", Value: 0}, 490 {Label: "https", Value: 0}, 491 {Label: "admin", Value: 0}, 492 } 493 if !tasksUpdated(j1, j6, name) { 494 t.Fatalf("bad") 495 } 496 497 j7 := mock.Job() 498 j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE" 499 if !tasksUpdated(j1, j7, name) { 500 t.Fatalf("bad") 501 } 502 503 j8 := mock.Job() 504 j8.TaskGroups[0].Tasks[0].User = "foo" 505 if !tasksUpdated(j1, j8, name) { 506 t.Fatalf("bad") 507 } 508 509 j9 := mock.Job() 510 j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{ 511 { 512 GetterSource: "http://foo.com/bar", 513 }, 514 } 515 if !tasksUpdated(j1, j9, name) { 516 t.Fatalf("bad") 517 } 518 519 j10 := mock.Job() 520 j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom" 521 if !tasksUpdated(j1, j10, name) { 522 t.Fatalf("bad") 523 } 524 525 j11 := mock.Job() 526 j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337 527 if !tasksUpdated(j1, j11, name) { 528 t.Fatalf("bad") 529 } 530 531 j12 := mock.Job() 532 j12.TaskGroups[0].Tasks[0].Resources.Networks[0].MBits = 100 533 if !tasksUpdated(j1, j12, name) { 534 t.Fatalf("bad") 535 } 536 537 j13 := mock.Job() 538 j13.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts[0].Label = "foobar" 539 if !tasksUpdated(j1, j13, name) { 540 t.Fatalf("bad") 541 } 542 543 j14 := mock.Job() 544 j14.TaskGroups[0].Tasks[0].Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}} 545 if !tasksUpdated(j1, j14, name) { 546 t.Fatalf("bad") 547 } 548 549 j15 := mock.Job() 550 j15.TaskGroups[0].Tasks[0].Vault = &structs.Vault{Policies: []string{"foo"}} 551 if !tasksUpdated(j1, j15, name) { 552 t.Fatalf("bad") 553 } 554 555 j16 := mock.Job() 556 j16.TaskGroups[0].EphemeralDisk.Sticky = true 557 if !tasksUpdated(j1, j16, name) { 558 t.Fatal("bad") 559 } 560 561 // Change group meta 562 j17 := mock.Job() 563 j17.TaskGroups[0].Meta["j17_test"] = "roll_baby_roll" 564 if !tasksUpdated(j1, j17, name) { 565 t.Fatal("bad") 566 } 567 568 // Change job meta 569 j18 := mock.Job() 570 j18.Meta["j18_test"] = "roll_baby_roll" 571 if !tasksUpdated(j1, j18, name) { 572 t.Fatal("bad") 573 } 574 } 575 576 func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { 577 _, ctx := testContext(t) 578 allocs := []allocTuple{ 579 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 580 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 581 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 582 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 583 } 584 diff := &diffResult{} 585 586 limit := 2 587 if !evictAndPlace(ctx, diff, allocs, "", &limit) { 588 t.Fatal("evictAndReplace() should have returned true") 589 } 590 591 if limit != 0 { 592 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit) 593 } 594 595 if len(diff.place) != 2 { 596 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 597 } 598 } 599 600 func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { 601 _, ctx := testContext(t) 602 allocs := []allocTuple{ 603 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 604 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 605 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 606 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 607 } 608 diff := &diffResult{} 609 610 limit := 4 611 if evictAndPlace(ctx, diff, allocs, "", &limit) { 612 t.Fatal("evictAndReplace() should have returned false") 613 } 614 615 if limit != 0 { 616 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit) 617 } 618 619 if len(diff.place) != 4 { 620 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 621 } 622 } 623 624 func TestSetStatus(t *testing.T) { 625 h := NewHarness(t) 626 logger := log.New(os.Stderr, "", log.LstdFlags) 627 eval := mock.Eval() 628 status := "a" 629 desc := "b" 630 if err := setStatus(logger, h, eval, nil, nil, nil, status, desc, nil); err != nil { 631 t.Fatalf("setStatus() failed: %v", err) 632 } 633 634 if len(h.Evals) != 1 { 635 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 636 } 637 638 newEval := h.Evals[0] 639 if newEval.ID != eval.ID || newEval.Status != status || newEval.StatusDescription != desc { 640 t.Fatalf("setStatus() submited invalid eval: %v", newEval) 641 } 642 643 // Test next evals 644 h = NewHarness(t) 645 next := mock.Eval() 646 if err := setStatus(logger, h, eval, next, nil, nil, status, desc, nil); err != nil { 647 t.Fatalf("setStatus() failed: %v", err) 648 } 649 650 if len(h.Evals) != 1 { 651 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 652 } 653 654 newEval = h.Evals[0] 655 if newEval.NextEval != next.ID { 656 t.Fatalf("setStatus() didn't set nextEval correctly: %v", newEval) 657 } 658 659 // Test blocked evals 660 h = NewHarness(t) 661 blocked := mock.Eval() 662 if err := setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil); err != nil { 663 t.Fatalf("setStatus() failed: %v", err) 664 } 665 666 if len(h.Evals) != 1 { 667 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 668 } 669 670 newEval = h.Evals[0] 671 if newEval.BlockedEval != blocked.ID { 672 t.Fatalf("setStatus() didn't set BlockedEval correctly: %v", newEval) 673 } 674 675 // Test metrics 676 h = NewHarness(t) 677 metrics := map[string]*structs.AllocMetric{"foo": nil} 678 if err := setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil); err != nil { 679 t.Fatalf("setStatus() failed: %v", err) 680 } 681 682 if len(h.Evals) != 1 { 683 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 684 } 685 686 newEval = h.Evals[0] 687 if !reflect.DeepEqual(newEval.FailedTGAllocs, metrics) { 688 t.Fatalf("setStatus() didn't set failed task group metrics correctly: %v", newEval) 689 } 690 691 // Test queued allocations 692 h = NewHarness(t) 693 queuedAllocs := map[string]int{"web": 1} 694 695 if err := setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs); err != nil { 696 t.Fatalf("setStatus() failed: %v", err) 697 } 698 699 if len(h.Evals) != 1 { 700 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 701 } 702 703 newEval = h.Evals[0] 704 if !reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs) { 705 t.Fatalf("setStatus() didn't set failed task group metrics correctly: %v", newEval) 706 } 707 } 708 709 func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { 710 state, ctx := testContext(t) 711 eval := mock.Eval() 712 job := mock.Job() 713 714 node := mock.Node() 715 noErr(t, state.UpsertNode(900, node)) 716 717 // Register an alloc 718 alloc := &structs.Allocation{ 719 ID: structs.GenerateUUID(), 720 EvalID: eval.ID, 721 NodeID: node.ID, 722 JobID: job.ID, 723 Job: job, 724 Resources: &structs.Resources{ 725 CPU: 2048, 726 MemoryMB: 2048, 727 }, 728 DesiredStatus: structs.AllocDesiredStatusRun, 729 TaskGroup: "web", 730 } 731 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 732 noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) 733 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 734 735 // Create a new task group that prevents in-place updates. 736 tg := &structs.TaskGroup{} 737 *tg = *job.TaskGroups[0] 738 task := &structs.Task{Name: "FOO"} 739 tg.Tasks = nil 740 tg.Tasks = append(tg.Tasks, task) 741 742 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 743 stack := NewGenericStack(false, ctx) 744 745 // Do the inplace update. 746 unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) 747 748 if len(unplaced) != 1 || len(inplace) != 0 { 749 t.Fatal("inplaceUpdate incorrectly did an inplace update") 750 } 751 752 if len(ctx.plan.NodeAllocation) != 0 { 753 t.Fatal("inplaceUpdate incorrectly did an inplace update") 754 } 755 } 756 757 func TestInplaceUpdate_NoMatch(t *testing.T) { 758 state, ctx := testContext(t) 759 eval := mock.Eval() 760 job := mock.Job() 761 762 node := mock.Node() 763 noErr(t, state.UpsertNode(900, node)) 764 765 // Register an alloc 766 alloc := &structs.Allocation{ 767 ID: structs.GenerateUUID(), 768 EvalID: eval.ID, 769 NodeID: node.ID, 770 JobID: job.ID, 771 Job: job, 772 Resources: &structs.Resources{ 773 CPU: 2048, 774 MemoryMB: 2048, 775 }, 776 DesiredStatus: structs.AllocDesiredStatusRun, 777 TaskGroup: "web", 778 } 779 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 780 noErr(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID))) 781 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 782 783 // Create a new task group that requires too much resources. 784 tg := &structs.TaskGroup{} 785 *tg = *job.TaskGroups[0] 786 resource := &structs.Resources{CPU: 9999} 787 tg.Tasks[0].Resources = resource 788 789 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 790 stack := NewGenericStack(false, ctx) 791 792 // Do the inplace update. 793 unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) 794 795 if len(unplaced) != 1 || len(inplace) != 0 { 796 t.Fatal("inplaceUpdate incorrectly did an inplace update") 797 } 798 799 if len(ctx.plan.NodeAllocation) != 0 { 800 t.Fatal("inplaceUpdate incorrectly did an inplace update") 801 } 802 } 803 804 func TestInplaceUpdate_Success(t *testing.T) { 805 state, ctx := testContext(t) 806 eval := mock.Eval() 807 job := mock.Job() 808 809 node := mock.Node() 810 noErr(t, state.UpsertNode(900, node)) 811 812 // Register an alloc 813 alloc := &structs.Allocation{ 814 ID: structs.GenerateUUID(), 815 EvalID: eval.ID, 816 NodeID: node.ID, 817 JobID: job.ID, 818 Job: job, 819 TaskGroup: job.TaskGroups[0].Name, 820 Resources: &structs.Resources{ 821 CPU: 2048, 822 MemoryMB: 2048, 823 }, 824 DesiredStatus: structs.AllocDesiredStatusRun, 825 } 826 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 827 noErr(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID))) 828 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 829 830 // Create a new task group that updates the resources. 831 tg := &structs.TaskGroup{} 832 *tg = *job.TaskGroups[0] 833 resource := &structs.Resources{CPU: 737} 834 tg.Tasks[0].Resources = resource 835 newServices := []*structs.Service{ 836 { 837 Name: "dummy-service", 838 PortLabel: "http", 839 }, 840 { 841 Name: "dummy-service2", 842 PortLabel: "http", 843 }, 844 } 845 846 // Delete service 2 847 tg.Tasks[0].Services = tg.Tasks[0].Services[:1] 848 849 // Add the new services 850 tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...) 851 852 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 853 stack := NewGenericStack(false, ctx) 854 stack.SetJob(job) 855 856 // Do the inplace update. 857 unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates) 858 859 if len(unplaced) != 0 || len(inplace) != 1 { 860 t.Fatal("inplaceUpdate did not do an inplace update") 861 } 862 863 if len(ctx.plan.NodeAllocation) != 1 { 864 t.Fatal("inplaceUpdate did not do an inplace update") 865 } 866 867 if inplace[0].Alloc.ID != alloc.ID { 868 t.Fatalf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace) 869 } 870 871 // Get the alloc we inserted. 872 a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0] 873 if a.Job == nil { 874 t.Fatalf("bad") 875 } 876 877 if len(a.Job.TaskGroups) != 1 { 878 t.Fatalf("bad") 879 } 880 881 if len(a.Job.TaskGroups[0].Tasks) != 1 { 882 t.Fatalf("bad") 883 } 884 885 if len(a.Job.TaskGroups[0].Tasks[0].Services) != 3 { 886 t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services)) 887 } 888 889 serviceNames := make(map[string]struct{}, 3) 890 for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services { 891 serviceNames[consulService.Name] = struct{}{} 892 } 893 if len(serviceNames) != 3 { 894 t.Fatalf("bad") 895 } 896 897 for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} { 898 if _, found := serviceNames[name]; !found { 899 t.Errorf("Expected consul service name missing: %v", name) 900 } 901 } 902 } 903 904 func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { 905 _, ctx := testContext(t) 906 allocs := []allocTuple{ 907 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 908 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 909 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 910 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 911 } 912 diff := &diffResult{} 913 914 limit := 6 915 if evictAndPlace(ctx, diff, allocs, "", &limit) { 916 t.Fatal("evictAndReplace() should have returned false") 917 } 918 919 if limit != 2 { 920 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 2", limit) 921 } 922 923 if len(diff.place) != 4 { 924 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 925 } 926 } 927 928 func TestTaskGroupConstraints(t *testing.T) { 929 constr := &structs.Constraint{RTarget: "bar"} 930 constr2 := &structs.Constraint{LTarget: "foo"} 931 constr3 := &structs.Constraint{Operand: "<"} 932 933 tg := &structs.TaskGroup{ 934 Name: "web", 935 Count: 10, 936 Constraints: []*structs.Constraint{constr}, 937 EphemeralDisk: &structs.EphemeralDisk{}, 938 Tasks: []*structs.Task{ 939 &structs.Task{ 940 Driver: "exec", 941 Resources: &structs.Resources{ 942 CPU: 500, 943 MemoryMB: 256, 944 }, 945 Constraints: []*structs.Constraint{constr2}, 946 }, 947 &structs.Task{ 948 Driver: "docker", 949 Resources: &structs.Resources{ 950 CPU: 500, 951 MemoryMB: 256, 952 }, 953 Constraints: []*structs.Constraint{constr3}, 954 }, 955 }, 956 } 957 958 // Build the expected values. 959 expConstr := []*structs.Constraint{constr, constr2, constr3} 960 expDrivers := map[string]struct{}{"exec": struct{}{}, "docker": struct{}{}} 961 expSize := &structs.Resources{ 962 CPU: 1000, 963 MemoryMB: 512, 964 } 965 966 actConstrains := taskGroupConstraints(tg) 967 if !reflect.DeepEqual(actConstrains.constraints, expConstr) { 968 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr) 969 } 970 if !reflect.DeepEqual(actConstrains.drivers, expDrivers) { 971 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers) 972 } 973 if !reflect.DeepEqual(actConstrains.size, expSize) { 974 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.size, expSize) 975 } 976 977 } 978 979 func TestProgressMade(t *testing.T) { 980 noopPlan := &structs.PlanResult{} 981 if progressMade(nil) || progressMade(noopPlan) { 982 t.Fatal("no progress plan marked as making progress") 983 } 984 985 m := map[string][]*structs.Allocation{ 986 "foo": []*structs.Allocation{mock.Alloc()}, 987 } 988 both := &structs.PlanResult{ 989 NodeAllocation: m, 990 NodeUpdate: m, 991 } 992 update := &structs.PlanResult{NodeUpdate: m} 993 alloc := &structs.PlanResult{NodeAllocation: m} 994 if !(progressMade(both) && progressMade(update) && progressMade(alloc)) { 995 t.Fatal("bad") 996 } 997 } 998 999 func TestDesiredUpdates(t *testing.T) { 1000 tg1 := &structs.TaskGroup{Name: "foo"} 1001 tg2 := &structs.TaskGroup{Name: "bar"} 1002 a2 := &structs.Allocation{TaskGroup: "bar"} 1003 1004 place := []allocTuple{ 1005 allocTuple{TaskGroup: tg1}, 1006 allocTuple{TaskGroup: tg1}, 1007 allocTuple{TaskGroup: tg1}, 1008 allocTuple{TaskGroup: tg2}, 1009 } 1010 stop := []allocTuple{ 1011 allocTuple{TaskGroup: tg2, Alloc: a2}, 1012 allocTuple{TaskGroup: tg2, Alloc: a2}, 1013 } 1014 ignore := []allocTuple{ 1015 allocTuple{TaskGroup: tg1}, 1016 } 1017 migrate := []allocTuple{ 1018 allocTuple{TaskGroup: tg2}, 1019 } 1020 inplace := []allocTuple{ 1021 allocTuple{TaskGroup: tg1}, 1022 allocTuple{TaskGroup: tg1}, 1023 } 1024 destructive := []allocTuple{ 1025 allocTuple{TaskGroup: tg1}, 1026 allocTuple{TaskGroup: tg2}, 1027 allocTuple{TaskGroup: tg2}, 1028 } 1029 diff := &diffResult{ 1030 place: place, 1031 stop: stop, 1032 ignore: ignore, 1033 migrate: migrate, 1034 } 1035 1036 expected := map[string]*structs.DesiredUpdates{ 1037 "foo": { 1038 Place: 3, 1039 Ignore: 1, 1040 InPlaceUpdate: 2, 1041 DestructiveUpdate: 1, 1042 }, 1043 "bar": { 1044 Place: 1, 1045 Stop: 2, 1046 Migrate: 1, 1047 DestructiveUpdate: 2, 1048 }, 1049 } 1050 1051 desired := desiredUpdates(diff, inplace, destructive) 1052 if !reflect.DeepEqual(desired, expected) { 1053 t.Fatalf("desiredUpdates() returned %#v; want %#v", desired, expected) 1054 } 1055 } 1056 1057 func TestUtil_AdjustQueuedAllocations(t *testing.T) { 1058 logger := log.New(os.Stderr, "", log.LstdFlags) 1059 alloc1 := mock.Alloc() 1060 alloc2 := mock.Alloc() 1061 alloc2.CreateIndex = 4 1062 alloc2.ModifyIndex = 4 1063 alloc3 := mock.Alloc() 1064 alloc3.CreateIndex = 3 1065 alloc3.ModifyIndex = 5 1066 alloc4 := mock.Alloc() 1067 alloc4.CreateIndex = 6 1068 alloc4.ModifyIndex = 8 1069 1070 planResult := structs.PlanResult{ 1071 NodeUpdate: map[string][]*structs.Allocation{ 1072 "node-1": []*structs.Allocation{alloc1}, 1073 }, 1074 NodeAllocation: map[string][]*structs.Allocation{ 1075 "node-1": []*structs.Allocation{ 1076 alloc2, 1077 }, 1078 "node-2": []*structs.Allocation{ 1079 alloc3, alloc4, 1080 }, 1081 }, 1082 RefreshIndex: 3, 1083 AllocIndex: 16, // Should not be considered 1084 } 1085 1086 queuedAllocs := map[string]int{"web": 2} 1087 adjustQueuedAllocations(logger, &planResult, queuedAllocs) 1088 1089 if queuedAllocs["web"] != 1 { 1090 t.Fatalf("expected: %v, actual: %v", 1, queuedAllocs["web"]) 1091 } 1092 } 1093 1094 func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) { 1095 node := mock.Node() 1096 alloc1 := mock.Alloc() 1097 alloc1.NodeID = node.ID 1098 alloc1.DesiredStatus = structs.AllocDesiredStatusStop 1099 1100 alloc2 := mock.Alloc() 1101 alloc2.NodeID = node.ID 1102 alloc2.DesiredStatus = structs.AllocDesiredStatusStop 1103 alloc2.ClientStatus = structs.AllocClientStatusRunning 1104 1105 alloc3 := mock.Alloc() 1106 alloc3.NodeID = node.ID 1107 alloc3.DesiredStatus = structs.AllocDesiredStatusStop 1108 alloc3.ClientStatus = structs.AllocClientStatusComplete 1109 1110 alloc4 := mock.Alloc() 1111 alloc4.NodeID = node.ID 1112 alloc4.DesiredStatus = structs.AllocDesiredStatusStop 1113 alloc4.ClientStatus = structs.AllocClientStatusFailed 1114 1115 allocs := []*structs.Allocation{alloc1, alloc2, alloc3, alloc4} 1116 plan := structs.Plan{ 1117 NodeUpdate: make(map[string][]*structs.Allocation), 1118 } 1119 tainted := map[string]*structs.Node{node.ID: node} 1120 1121 updateNonTerminalAllocsToLost(&plan, tainted, allocs) 1122 1123 allocsLost := make([]string, 0, 2) 1124 for _, alloc := range plan.NodeUpdate[node.ID] { 1125 allocsLost = append(allocsLost, alloc.ID) 1126 } 1127 expected := []string{alloc1.ID, alloc2.ID} 1128 if !reflect.DeepEqual(allocsLost, expected) { 1129 t.Fatalf("actual: %v, expected: %v", allocsLost, expected) 1130 } 1131 }