github.com/maier/nomad@v0.4.1-0.20161110003312-a9e3d0b8549d/scheduler/generic_sched_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "reflect" 6 "sort" 7 "testing" 8 "time" 9 10 "github.com/hashicorp/nomad/nomad/mock" 11 "github.com/hashicorp/nomad/nomad/structs" 12 ) 13 14 func TestServiceSched_JobRegister(t *testing.T) { 15 h := NewHarness(t) 16 17 // Create some nodes 18 for i := 0; i < 10; i++ { 19 node := mock.Node() 20 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 21 } 22 23 // Create a job 24 job := mock.Job() 25 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 26 27 // Create a mock evaluation to register the job 28 eval := &structs.Evaluation{ 29 ID: structs.GenerateUUID(), 30 Priority: job.Priority, 31 TriggeredBy: structs.EvalTriggerJobRegister, 32 JobID: job.ID, 33 } 34 35 // Process the evaluation 36 err := h.Process(NewServiceScheduler, eval) 37 if err != nil { 38 t.Fatalf("err: %v", err) 39 } 40 41 // Ensure a single plan 42 if len(h.Plans) != 1 { 43 t.Fatalf("bad: %#v", h.Plans) 44 } 45 plan := h.Plans[0] 46 47 // Ensure the plan doesn't have annotations. 48 if plan.Annotations != nil { 49 t.Fatalf("expected no annotations") 50 } 51 52 // Ensure the eval has no spawned blocked eval 53 if len(h.CreateEvals) != 0 { 54 t.Fatalf("bad: %#v", h.CreateEvals) 55 if h.Evals[0].BlockedEval != "" { 56 t.Fatalf("bad: %#v", h.Evals[0]) 57 } 58 } 59 60 // Ensure the plan allocated 61 var planned []*structs.Allocation 62 for _, allocList := range plan.NodeAllocation { 63 planned = append(planned, allocList...) 64 } 65 if len(planned) != 10 { 66 t.Fatalf("bad: %#v", plan) 67 } 68 69 // Lookup the allocations by JobID 70 out, err := h.State.AllocsByJob(job.ID) 71 noErr(t, err) 72 73 // Ensure all allocations placed 74 if len(out) != 10 { 75 t.Fatalf("bad: %#v", out) 76 } 77 78 // Ensure different ports were used. 79 used := make(map[int]struct{}) 80 for _, alloc := range out { 81 for _, resource := range alloc.TaskResources { 82 for _, port := range resource.Networks[0].DynamicPorts { 83 if _, ok := used[port.Value]; ok { 84 t.Fatalf("Port collision %v", port.Value) 85 } 86 used[port.Value] = struct{}{} 87 } 88 } 89 } 90 91 h.AssertEvalStatus(t, structs.EvalStatusComplete) 92 } 93 94 func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { 95 h := NewHarness(t) 96 97 // Create some nodes 98 for i := 0; i < 10; i++ { 99 node := mock.Node() 100 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 101 } 102 103 // Create a job 104 job := mock.Job() 105 job.TaskGroups[0].EphemeralDisk.Sticky = true 106 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 107 108 // Create a mock evaluation to register the job 109 eval := &structs.Evaluation{ 110 ID: structs.GenerateUUID(), 111 Priority: job.Priority, 112 TriggeredBy: structs.EvalTriggerJobRegister, 113 JobID: job.ID, 114 } 115 116 // Process the evaluation 117 if err := h.Process(NewServiceScheduler, eval); err != nil { 118 t.Fatalf("err: %v", err) 119 } 120 121 // Ensure the plan allocated 122 plan := h.Plans[0] 123 var planned []*structs.Allocation 124 for _, allocList := range plan.NodeAllocation { 125 planned = append(planned, allocList...) 126 } 127 if len(planned) != 10 { 128 t.Fatalf("bad: %#v", plan) 129 } 130 131 // Get an allocation and mark it as failed 132 alloc := planned[4].Copy() 133 alloc.ClientStatus = structs.AllocClientStatusFailed 134 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc})) 135 136 // Create a mock evaluation to handle the update 137 eval = &structs.Evaluation{ 138 ID: structs.GenerateUUID(), 139 Priority: job.Priority, 140 TriggeredBy: structs.EvalTriggerNodeUpdate, 141 JobID: job.ID, 142 } 143 h1 := NewHarnessWithState(t, h.State) 144 if err := h1.Process(NewServiceScheduler, eval); err != nil { 145 t.Fatalf("err: %v", err) 146 } 147 148 // Ensure we have created only one new allocation 149 plan = h1.Plans[0] 150 var newPlanned []*structs.Allocation 151 for _, allocList := range plan.NodeAllocation { 152 newPlanned = append(newPlanned, allocList...) 153 } 154 if len(newPlanned) != 1 { 155 t.Fatalf("bad plan: %#v", plan) 156 } 157 // Ensure that the new allocation was placed on the same node as the older 158 // one 159 if newPlanned[0].NodeID != alloc.NodeID || newPlanned[0].PreviousAllocation != alloc.ID { 160 t.Fatalf("expected: %#v, actual: %#v", alloc, newPlanned[0]) 161 } 162 } 163 164 func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { 165 h := NewHarness(t) 166 167 // Create a node 168 node := mock.Node() 169 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 170 171 // Create a job with count 2 and disk as 60GB so that only one allocation 172 // can fit 173 job := mock.Job() 174 job.TaskGroups[0].Count = 2 175 job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024 176 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 177 178 // Create a mock evaluation to register the job 179 eval := &structs.Evaluation{ 180 ID: structs.GenerateUUID(), 181 Priority: job.Priority, 182 TriggeredBy: structs.EvalTriggerJobRegister, 183 JobID: job.ID, 184 } 185 186 // Process the evaluation 187 err := h.Process(NewServiceScheduler, eval) 188 if err != nil { 189 t.Fatalf("err: %v", err) 190 } 191 192 // Ensure a single plan 193 if len(h.Plans) != 1 { 194 t.Fatalf("bad: %#v", h.Plans) 195 } 196 plan := h.Plans[0] 197 198 // Ensure the plan doesn't have annotations. 199 if plan.Annotations != nil { 200 t.Fatalf("expected no annotations") 201 } 202 203 // Ensure the eval has a blocked eval 204 if len(h.CreateEvals) != 1 { 205 t.Fatalf("bad: %#v", h.CreateEvals) 206 } 207 208 // Ensure the plan allocated only one allocation 209 var planned []*structs.Allocation 210 for _, allocList := range plan.NodeAllocation { 211 planned = append(planned, allocList...) 212 } 213 if len(planned) != 1 { 214 t.Fatalf("bad: %#v", plan) 215 } 216 217 // Lookup the allocations by JobID 218 out, err := h.State.AllocsByJob(job.ID) 219 noErr(t, err) 220 221 // Ensure only one allocation was placed 222 if len(out) != 1 { 223 t.Fatalf("bad: %#v", out) 224 } 225 226 h.AssertEvalStatus(t, structs.EvalStatusComplete) 227 } 228 229 func TestServiceSched_JobRegister_Annotate(t *testing.T) { 230 h := NewHarness(t) 231 232 // Create some nodes 233 for i := 0; i < 10; i++ { 234 node := mock.Node() 235 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 236 } 237 238 // Create a job 239 job := mock.Job() 240 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 241 242 // Create a mock evaluation to register the job 243 eval := &structs.Evaluation{ 244 ID: structs.GenerateUUID(), 245 Priority: job.Priority, 246 TriggeredBy: structs.EvalTriggerJobRegister, 247 JobID: job.ID, 248 AnnotatePlan: true, 249 } 250 251 // Process the evaluation 252 err := h.Process(NewServiceScheduler, eval) 253 if err != nil { 254 t.Fatalf("err: %v", err) 255 } 256 257 // Ensure a single plan 258 if len(h.Plans) != 1 { 259 t.Fatalf("bad: %#v", h.Plans) 260 } 261 plan := h.Plans[0] 262 263 // Ensure the plan allocated 264 var planned []*structs.Allocation 265 for _, allocList := range plan.NodeAllocation { 266 planned = append(planned, allocList...) 267 } 268 if len(planned) != 10 { 269 t.Fatalf("bad: %#v", plan) 270 } 271 272 // Lookup the allocations by JobID 273 out, err := h.State.AllocsByJob(job.ID) 274 noErr(t, err) 275 276 // Ensure all allocations placed 277 if len(out) != 10 { 278 t.Fatalf("bad: %#v", out) 279 } 280 281 h.AssertEvalStatus(t, structs.EvalStatusComplete) 282 283 // Ensure the plan had annotations. 284 if plan.Annotations == nil { 285 t.Fatalf("expected annotations") 286 } 287 288 desiredTGs := plan.Annotations.DesiredTGUpdates 289 if l := len(desiredTGs); l != 1 { 290 t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1) 291 } 292 293 desiredChanges, ok := desiredTGs["web"] 294 if !ok { 295 t.Fatalf("expected task group web to have desired changes") 296 } 297 298 expected := &structs.DesiredUpdates{Place: 10} 299 if !reflect.DeepEqual(desiredChanges, expected) { 300 t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) 301 } 302 } 303 304 func TestServiceSched_JobRegister_CountZero(t *testing.T) { 305 h := NewHarness(t) 306 307 // Create some nodes 308 for i := 0; i < 10; i++ { 309 node := mock.Node() 310 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 311 } 312 313 // Create a job and set the task group count to zero. 314 job := mock.Job() 315 job.TaskGroups[0].Count = 0 316 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 317 318 // Create a mock evaluation to register the job 319 eval := &structs.Evaluation{ 320 ID: structs.GenerateUUID(), 321 Priority: job.Priority, 322 TriggeredBy: structs.EvalTriggerJobRegister, 323 JobID: job.ID, 324 } 325 326 // Process the evaluation 327 err := h.Process(NewServiceScheduler, eval) 328 if err != nil { 329 t.Fatalf("err: %v", err) 330 } 331 332 // Ensure there was no plan 333 if len(h.Plans) != 0 { 334 t.Fatalf("bad: %#v", h.Plans) 335 } 336 337 // Lookup the allocations by JobID 338 out, err := h.State.AllocsByJob(job.ID) 339 noErr(t, err) 340 341 // Ensure no allocations placed 342 if len(out) != 0 { 343 t.Fatalf("bad: %#v", out) 344 } 345 346 h.AssertEvalStatus(t, structs.EvalStatusComplete) 347 } 348 349 func TestServiceSched_JobRegister_AllocFail(t *testing.T) { 350 h := NewHarness(t) 351 352 // Create NO nodes 353 // Create a job 354 job := mock.Job() 355 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 356 357 // Create a mock evaluation to register the job 358 eval := &structs.Evaluation{ 359 ID: structs.GenerateUUID(), 360 Priority: job.Priority, 361 TriggeredBy: structs.EvalTriggerJobRegister, 362 JobID: job.ID, 363 } 364 365 // Process the evaluation 366 err := h.Process(NewServiceScheduler, eval) 367 if err != nil { 368 t.Fatalf("err: %v", err) 369 } 370 371 // Ensure no plan 372 if len(h.Plans) != 0 { 373 t.Fatalf("bad: %#v", h.Plans) 374 } 375 376 // Ensure there is a follow up eval. 377 if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked { 378 t.Fatalf("bad: %#v", h.CreateEvals) 379 } 380 381 if len(h.Evals) != 1 { 382 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 383 } 384 outEval := h.Evals[0] 385 386 // Ensure the eval has its spawned blocked eval 387 if outEval.BlockedEval != h.CreateEvals[0].ID { 388 t.Fatalf("bad: %#v", outEval) 389 } 390 391 // Ensure the plan failed to alloc 392 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 393 t.Fatalf("bad: %#v", outEval) 394 } 395 396 metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name] 397 if !ok { 398 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 399 } 400 401 // Check the coalesced failures 402 if metrics.CoalescedFailures != 9 { 403 t.Fatalf("bad: %#v", metrics) 404 } 405 406 // Check the available nodes 407 if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 0 { 408 t.Fatalf("bad: %#v", metrics) 409 } 410 411 // Check queued allocations 412 queued := outEval.QueuedAllocations["web"] 413 if queued != 10 { 414 t.Fatalf("expected queued: %v, actual: %v", 10, queued) 415 } 416 h.AssertEvalStatus(t, structs.EvalStatusComplete) 417 } 418 419 func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { 420 h := NewHarness(t) 421 422 // Create a full node 423 node := mock.Node() 424 node.Reserved = node.Resources 425 node.ComputeClass() 426 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 427 428 // Create an ineligible node 429 node2 := mock.Node() 430 node2.Attributes["kernel.name"] = "windows" 431 node2.ComputeClass() 432 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 433 434 // Create a jobs 435 job := mock.Job() 436 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 437 438 // Create a mock evaluation to register the job 439 eval := &structs.Evaluation{ 440 ID: structs.GenerateUUID(), 441 Priority: job.Priority, 442 TriggeredBy: structs.EvalTriggerJobRegister, 443 JobID: job.ID, 444 } 445 446 // Process the evaluation 447 err := h.Process(NewServiceScheduler, eval) 448 if err != nil { 449 t.Fatalf("err: %v", err) 450 } 451 452 // Ensure no plan 453 if len(h.Plans) != 0 { 454 t.Fatalf("bad: %#v", h.Plans) 455 } 456 457 // Ensure the plan has created a follow up eval. 458 if len(h.CreateEvals) != 1 { 459 t.Fatalf("bad: %#v", h.CreateEvals) 460 } 461 462 created := h.CreateEvals[0] 463 if created.Status != structs.EvalStatusBlocked { 464 t.Fatalf("bad: %#v", created) 465 } 466 467 classes := created.ClassEligibility 468 if len(classes) != 2 || !classes[node.ComputedClass] || classes[node2.ComputedClass] { 469 t.Fatalf("bad: %#v", classes) 470 } 471 472 if created.EscapedComputedClass { 473 t.Fatalf("bad: %#v", created) 474 } 475 476 // Ensure there is a follow up eval. 477 if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked { 478 t.Fatalf("bad: %#v", h.CreateEvals) 479 } 480 481 if len(h.Evals) != 1 { 482 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 483 } 484 outEval := h.Evals[0] 485 486 // Ensure the plan failed to alloc 487 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 488 t.Fatalf("bad: %#v", outEval) 489 } 490 491 metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name] 492 if !ok { 493 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 494 } 495 496 // Check the coalesced failures 497 if metrics.CoalescedFailures != 9 { 498 t.Fatalf("bad: %#v", metrics) 499 } 500 501 // Check the available nodes 502 if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 2 { 503 t.Fatalf("bad: %#v", metrics) 504 } 505 506 h.AssertEvalStatus(t, structs.EvalStatusComplete) 507 } 508 509 func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { 510 h := NewHarness(t) 511 512 // Create one node 513 node := mock.Node() 514 node.NodeClass = "class_0" 515 noErr(t, node.ComputeClass()) 516 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 517 518 // Create a job that constrains on a node class 519 job := mock.Job() 520 job.TaskGroups[0].Count = 2 521 job.TaskGroups[0].Constraints = append(job.Constraints, 522 &structs.Constraint{ 523 LTarget: "${node.class}", 524 RTarget: "class_0", 525 Operand: "=", 526 }, 527 ) 528 tg2 := job.TaskGroups[0].Copy() 529 tg2.Name = "web2" 530 tg2.Constraints[1].RTarget = "class_1" 531 job.TaskGroups = append(job.TaskGroups, tg2) 532 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 533 534 // Create a mock evaluation to register the job 535 eval := &structs.Evaluation{ 536 ID: structs.GenerateUUID(), 537 Priority: job.Priority, 538 TriggeredBy: structs.EvalTriggerJobRegister, 539 JobID: job.ID, 540 } 541 542 // Process the evaluation 543 err := h.Process(NewServiceScheduler, eval) 544 if err != nil { 545 t.Fatalf("err: %v", err) 546 } 547 548 // Ensure a single plan 549 if len(h.Plans) != 1 { 550 t.Fatalf("bad: %#v", h.Plans) 551 } 552 plan := h.Plans[0] 553 554 // Ensure the plan allocated 555 var planned []*structs.Allocation 556 for _, allocList := range plan.NodeAllocation { 557 planned = append(planned, allocList...) 558 } 559 if len(planned) != 2 { 560 t.Fatalf("bad: %#v", plan) 561 } 562 563 // Ensure two allocations placed 564 out, err := h.State.AllocsByJob(job.ID) 565 noErr(t, err) 566 if len(out) != 2 { 567 t.Fatalf("bad: %#v", out) 568 } 569 570 if len(h.Evals) != 1 { 571 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 572 } 573 outEval := h.Evals[0] 574 575 // Ensure the eval has its spawned blocked eval 576 if outEval.BlockedEval != h.CreateEvals[0].ID { 577 t.Fatalf("bad: %#v", outEval) 578 } 579 580 // Ensure the plan failed to alloc one tg 581 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 582 t.Fatalf("bad: %#v", outEval) 583 } 584 585 metrics, ok := outEval.FailedTGAllocs[tg2.Name] 586 if !ok { 587 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 588 } 589 590 // Check the coalesced failures 591 if metrics.CoalescedFailures != tg2.Count-1 { 592 t.Fatalf("bad: %#v", metrics) 593 } 594 595 h.AssertEvalStatus(t, structs.EvalStatusComplete) 596 } 597 598 // This test just ensures the scheduler handles the eval type to avoid 599 // regressions. 600 func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { 601 h := NewHarness(t) 602 603 // Create a job and set the task group count to zero. 604 job := mock.Job() 605 job.TaskGroups[0].Count = 0 606 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 607 608 // Create a mock blocked evaluation 609 eval := &structs.Evaluation{ 610 ID: structs.GenerateUUID(), 611 Status: structs.EvalStatusBlocked, 612 Priority: job.Priority, 613 TriggeredBy: structs.EvalTriggerMaxPlans, 614 JobID: job.ID, 615 } 616 617 // Insert it into the state store 618 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 619 620 // Process the evaluation 621 err := h.Process(NewServiceScheduler, eval) 622 if err != nil { 623 t.Fatalf("err: %v", err) 624 } 625 626 // Ensure there was no plan 627 if len(h.Plans) != 0 { 628 t.Fatalf("bad: %#v", h.Plans) 629 } 630 631 h.AssertEvalStatus(t, structs.EvalStatusComplete) 632 } 633 634 func TestServiceSched_Plan_Partial_Progress(t *testing.T) { 635 h := NewHarness(t) 636 637 // Create a node 638 node := mock.Node() 639 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 640 641 // Create a job with a high resource ask so that all the allocations can't 642 // be placed on a single node. 643 job := mock.Job() 644 job.TaskGroups[0].Count = 3 645 job.TaskGroups[0].Tasks[0].Resources.CPU = 3600 646 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 647 648 // Create a mock evaluation to register the job 649 eval := &structs.Evaluation{ 650 ID: structs.GenerateUUID(), 651 Priority: job.Priority, 652 TriggeredBy: structs.EvalTriggerJobRegister, 653 JobID: job.ID, 654 } 655 656 // Process the evaluation 657 err := h.Process(NewServiceScheduler, eval) 658 if err != nil { 659 t.Fatalf("err: %v", err) 660 } 661 662 // Ensure a single plan 663 if len(h.Plans) != 1 { 664 t.Fatalf("bad: %#v", h.Plans) 665 } 666 plan := h.Plans[0] 667 668 // Ensure the plan doesn't have annotations. 669 if plan.Annotations != nil { 670 t.Fatalf("expected no annotations") 671 } 672 673 // Ensure the plan allocated 674 var planned []*structs.Allocation 675 for _, allocList := range plan.NodeAllocation { 676 planned = append(planned, allocList...) 677 } 678 if len(planned) != 1 { 679 t.Fatalf("bad: %#v", plan) 680 } 681 682 // Lookup the allocations by JobID 683 out, err := h.State.AllocsByJob(job.ID) 684 noErr(t, err) 685 686 // Ensure only one allocations placed 687 if len(out) != 1 { 688 t.Fatalf("bad: %#v", out) 689 } 690 691 queued := h.Evals[0].QueuedAllocations["web"] 692 if queued != 2 { 693 t.Fatalf("expected: %v, actual: %v", 2, queued) 694 } 695 696 h.AssertEvalStatus(t, structs.EvalStatusComplete) 697 } 698 699 func TestServiceSched_EvaluateBlockedEval(t *testing.T) { 700 h := NewHarness(t) 701 702 // Create a job 703 job := mock.Job() 704 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 705 706 // Create a mock blocked evaluation 707 eval := &structs.Evaluation{ 708 ID: structs.GenerateUUID(), 709 Status: structs.EvalStatusBlocked, 710 Priority: job.Priority, 711 TriggeredBy: structs.EvalTriggerJobRegister, 712 JobID: job.ID, 713 } 714 715 // Insert it into the state store 716 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 717 718 // Process the evaluation 719 err := h.Process(NewServiceScheduler, eval) 720 if err != nil { 721 t.Fatalf("err: %v", err) 722 } 723 724 // Ensure there was no plan 725 if len(h.Plans) != 0 { 726 t.Fatalf("bad: %#v", h.Plans) 727 } 728 729 // Ensure that the eval was reblocked 730 if len(h.ReblockEvals) != 1 { 731 t.Fatalf("bad: %#v", h.ReblockEvals) 732 } 733 if h.ReblockEvals[0].ID != eval.ID { 734 t.Fatalf("expect same eval to be reblocked; got %q; want %q", h.ReblockEvals[0].ID, eval.ID) 735 } 736 737 // Ensure the eval status was not updated 738 if len(h.Evals) != 0 { 739 t.Fatalf("Existing eval should not have status set") 740 } 741 } 742 743 func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { 744 h := NewHarness(t) 745 746 // Create some nodes 747 for i := 0; i < 10; i++ { 748 node := mock.Node() 749 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 750 } 751 752 // Create a job and set the task group count to zero. 753 job := mock.Job() 754 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 755 756 // Create a mock blocked evaluation 757 eval := &structs.Evaluation{ 758 ID: structs.GenerateUUID(), 759 Status: structs.EvalStatusBlocked, 760 Priority: job.Priority, 761 TriggeredBy: structs.EvalTriggerJobRegister, 762 JobID: job.ID, 763 } 764 765 // Insert it into the state store 766 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 767 768 // Process the evaluation 769 err := h.Process(NewServiceScheduler, eval) 770 if err != nil { 771 t.Fatalf("err: %v", err) 772 } 773 774 // Ensure a single plan 775 if len(h.Plans) != 1 { 776 t.Fatalf("bad: %#v", h.Plans) 777 } 778 plan := h.Plans[0] 779 780 // Ensure the plan doesn't have annotations. 781 if plan.Annotations != nil { 782 t.Fatalf("expected no annotations") 783 } 784 785 // Ensure the eval has no spawned blocked eval 786 if len(h.Evals) != 1 { 787 t.Fatalf("bad: %#v", h.Evals) 788 if h.Evals[0].BlockedEval != "" { 789 t.Fatalf("bad: %#v", h.Evals[0]) 790 } 791 } 792 793 // Ensure the plan allocated 794 var planned []*structs.Allocation 795 for _, allocList := range plan.NodeAllocation { 796 planned = append(planned, allocList...) 797 } 798 if len(planned) != 10 { 799 t.Fatalf("bad: %#v", plan) 800 } 801 802 // Lookup the allocations by JobID 803 out, err := h.State.AllocsByJob(job.ID) 804 noErr(t, err) 805 806 // Ensure all allocations placed 807 if len(out) != 10 { 808 t.Fatalf("bad: %#v", out) 809 } 810 811 // Ensure the eval was not reblocked 812 if len(h.ReblockEvals) != 0 { 813 t.Fatalf("Existing eval should not have been reblocked as it placed all allocations") 814 } 815 816 h.AssertEvalStatus(t, structs.EvalStatusComplete) 817 818 // Ensure queued allocations is zero 819 queued := h.Evals[0].QueuedAllocations["web"] 820 if queued != 0 { 821 t.Fatalf("expected queued: %v, actual: %v", 0, queued) 822 } 823 } 824 825 func TestServiceSched_JobModify(t *testing.T) { 826 h := NewHarness(t) 827 828 // Create some nodes 829 var nodes []*structs.Node 830 for i := 0; i < 10; i++ { 831 node := mock.Node() 832 nodes = append(nodes, node) 833 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 834 } 835 836 // Generate a fake job with allocations 837 job := mock.Job() 838 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 839 840 var allocs []*structs.Allocation 841 for i := 0; i < 10; i++ { 842 alloc := mock.Alloc() 843 alloc.Job = job 844 alloc.JobID = job.ID 845 alloc.NodeID = nodes[i].ID 846 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 847 allocs = append(allocs, alloc) 848 } 849 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 850 851 // Add a few terminal status allocations, these should be ignored 852 var terminal []*structs.Allocation 853 for i := 0; i < 5; i++ { 854 alloc := mock.Alloc() 855 alloc.Job = job 856 alloc.JobID = job.ID 857 alloc.NodeID = nodes[i].ID 858 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 859 alloc.DesiredStatus = structs.AllocDesiredStatusStop 860 terminal = append(terminal, alloc) 861 } 862 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 863 864 // Update the job 865 job2 := mock.Job() 866 job2.ID = job.ID 867 868 // Update the task, such that it cannot be done in-place 869 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 870 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 871 872 // Create a mock evaluation to deal with drain 873 eval := &structs.Evaluation{ 874 ID: structs.GenerateUUID(), 875 Priority: 50, 876 TriggeredBy: structs.EvalTriggerJobRegister, 877 JobID: job.ID, 878 } 879 880 // Process the evaluation 881 err := h.Process(NewServiceScheduler, eval) 882 if err != nil { 883 t.Fatalf("err: %v", err) 884 } 885 886 // Ensure a single plan 887 if len(h.Plans) != 1 { 888 t.Fatalf("bad: %#v", h.Plans) 889 } 890 plan := h.Plans[0] 891 892 // Ensure the plan evicted all allocs 893 var update []*structs.Allocation 894 for _, updateList := range plan.NodeUpdate { 895 update = append(update, updateList...) 896 } 897 if len(update) != len(allocs) { 898 t.Fatalf("bad: %#v", plan) 899 } 900 901 // Ensure the plan allocated 902 var planned []*structs.Allocation 903 for _, allocList := range plan.NodeAllocation { 904 planned = append(planned, allocList...) 905 } 906 if len(planned) != 10 { 907 t.Fatalf("bad: %#v", plan) 908 } 909 910 // Lookup the allocations by JobID 911 out, err := h.State.AllocsByJob(job.ID) 912 noErr(t, err) 913 914 // Ensure all allocations placed 915 out, _ = structs.FilterTerminalAllocs(out) 916 if len(out) != 10 { 917 t.Fatalf("bad: %#v", out) 918 } 919 920 h.AssertEvalStatus(t, structs.EvalStatusComplete) 921 } 922 923 // Have a single node and submit a job. Increment the count such that all fit 924 // on the node but the node doesn't have enough resources to fit the new count + 925 // 1. This tests that we properly discount the resources of existing allocs. 926 func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { 927 h := NewHarness(t) 928 929 // Create one node 930 node := mock.Node() 931 node.Resources.CPU = 1000 932 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 933 934 // Generate a fake job with one allocation 935 job := mock.Job() 936 job.TaskGroups[0].Tasks[0].Resources.CPU = 256 937 job2 := job.Copy() 938 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 939 940 var allocs []*structs.Allocation 941 alloc := mock.Alloc() 942 alloc.Job = job 943 alloc.JobID = job.ID 944 alloc.NodeID = node.ID 945 alloc.Name = "my-job.web[0]" 946 alloc.Resources.CPU = 256 947 allocs = append(allocs, alloc) 948 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 949 950 // Update the job to count 3 951 job2.TaskGroups[0].Count = 3 952 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 953 954 // Create a mock evaluation to deal with drain 955 eval := &structs.Evaluation{ 956 ID: structs.GenerateUUID(), 957 Priority: 50, 958 TriggeredBy: structs.EvalTriggerJobRegister, 959 JobID: job.ID, 960 } 961 962 // Process the evaluation 963 err := h.Process(NewServiceScheduler, eval) 964 if err != nil { 965 t.Fatalf("err: %v", err) 966 } 967 968 // Ensure a single plan 969 if len(h.Plans) != 1 { 970 t.Fatalf("bad: %#v", h.Plans) 971 } 972 plan := h.Plans[0] 973 974 // Ensure the plan didn't evicted the alloc 975 var update []*structs.Allocation 976 for _, updateList := range plan.NodeUpdate { 977 update = append(update, updateList...) 978 } 979 if len(update) != 0 { 980 t.Fatalf("bad: %#v", plan) 981 } 982 983 // Ensure the plan allocated 984 var planned []*structs.Allocation 985 for _, allocList := range plan.NodeAllocation { 986 planned = append(planned, allocList...) 987 } 988 if len(planned) != 3 { 989 t.Fatalf("bad: %#v", plan) 990 } 991 992 // Ensure the plan had no failures 993 if len(h.Evals) != 1 { 994 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 995 } 996 outEval := h.Evals[0] 997 if outEval == nil || len(outEval.FailedTGAllocs) != 0 { 998 t.Fatalf("bad: %#v", outEval) 999 } 1000 1001 // Lookup the allocations by JobID 1002 out, err := h.State.AllocsByJob(job.ID) 1003 noErr(t, err) 1004 1005 // Ensure all allocations placed 1006 out, _ = structs.FilterTerminalAllocs(out) 1007 if len(out) != 3 { 1008 t.Fatalf("bad: %#v", out) 1009 } 1010 1011 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1012 } 1013 1014 func TestServiceSched_JobModify_CountZero(t *testing.T) { 1015 h := NewHarness(t) 1016 1017 // Create some nodes 1018 var nodes []*structs.Node 1019 for i := 0; i < 10; i++ { 1020 node := mock.Node() 1021 nodes = append(nodes, node) 1022 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1023 } 1024 1025 // Generate a fake job with allocations 1026 job := mock.Job() 1027 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1028 1029 var allocs []*structs.Allocation 1030 for i := 0; i < 10; i++ { 1031 alloc := mock.Alloc() 1032 alloc.Job = job 1033 alloc.JobID = job.ID 1034 alloc.NodeID = nodes[i].ID 1035 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1036 allocs = append(allocs, alloc) 1037 } 1038 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1039 1040 // Add a few terminal status allocations, these should be ignored 1041 var terminal []*structs.Allocation 1042 for i := 0; i < 5; i++ { 1043 alloc := mock.Alloc() 1044 alloc.Job = job 1045 alloc.JobID = job.ID 1046 alloc.NodeID = nodes[i].ID 1047 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1048 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1049 terminal = append(terminal, alloc) 1050 } 1051 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 1052 1053 // Update the job to be count zero 1054 job2 := mock.Job() 1055 job2.ID = job.ID 1056 job2.TaskGroups[0].Count = 0 1057 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1058 1059 // Create a mock evaluation to deal with drain 1060 eval := &structs.Evaluation{ 1061 ID: structs.GenerateUUID(), 1062 Priority: 50, 1063 TriggeredBy: structs.EvalTriggerJobRegister, 1064 JobID: job.ID, 1065 } 1066 1067 // Process the evaluation 1068 err := h.Process(NewServiceScheduler, eval) 1069 if err != nil { 1070 t.Fatalf("err: %v", err) 1071 } 1072 1073 // Ensure a single plan 1074 if len(h.Plans) != 1 { 1075 t.Fatalf("bad: %#v", h.Plans) 1076 } 1077 plan := h.Plans[0] 1078 1079 // Ensure the plan evicted all allocs 1080 var update []*structs.Allocation 1081 for _, updateList := range plan.NodeUpdate { 1082 update = append(update, updateList...) 1083 } 1084 if len(update) != len(allocs) { 1085 t.Fatalf("bad: %#v", plan) 1086 } 1087 1088 // Ensure the plan didn't allocated 1089 var planned []*structs.Allocation 1090 for _, allocList := range plan.NodeAllocation { 1091 planned = append(planned, allocList...) 1092 } 1093 if len(planned) != 0 { 1094 t.Fatalf("bad: %#v", plan) 1095 } 1096 1097 // Lookup the allocations by JobID 1098 out, err := h.State.AllocsByJob(job.ID) 1099 noErr(t, err) 1100 1101 // Ensure all allocations placed 1102 out, _ = structs.FilterTerminalAllocs(out) 1103 if len(out) != 0 { 1104 t.Fatalf("bad: %#v", out) 1105 } 1106 1107 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1108 } 1109 1110 func TestServiceSched_JobModify_Rolling(t *testing.T) { 1111 h := NewHarness(t) 1112 1113 // Create some nodes 1114 var nodes []*structs.Node 1115 for i := 0; i < 10; i++ { 1116 node := mock.Node() 1117 nodes = append(nodes, node) 1118 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1119 } 1120 1121 // Generate a fake job with allocations 1122 job := mock.Job() 1123 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1124 1125 var allocs []*structs.Allocation 1126 for i := 0; i < 10; i++ { 1127 alloc := mock.Alloc() 1128 alloc.Job = job 1129 alloc.JobID = job.ID 1130 alloc.NodeID = nodes[i].ID 1131 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1132 allocs = append(allocs, alloc) 1133 } 1134 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1135 1136 // Update the job 1137 job2 := mock.Job() 1138 job2.ID = job.ID 1139 job2.Update = structs.UpdateStrategy{ 1140 Stagger: 30 * time.Second, 1141 MaxParallel: 5, 1142 } 1143 1144 // Update the task, such that it cannot be done in-place 1145 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1146 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1147 1148 // Create a mock evaluation to deal with drain 1149 eval := &structs.Evaluation{ 1150 ID: structs.GenerateUUID(), 1151 Priority: 50, 1152 TriggeredBy: structs.EvalTriggerJobRegister, 1153 JobID: job.ID, 1154 } 1155 1156 // Process the evaluation 1157 err := h.Process(NewServiceScheduler, eval) 1158 if err != nil { 1159 t.Fatalf("err: %v", err) 1160 } 1161 1162 // Ensure a single plan 1163 if len(h.Plans) != 1 { 1164 t.Fatalf("bad: %#v", h.Plans) 1165 } 1166 plan := h.Plans[0] 1167 1168 // Ensure the plan evicted only MaxParallel 1169 var update []*structs.Allocation 1170 for _, updateList := range plan.NodeUpdate { 1171 update = append(update, updateList...) 1172 } 1173 if len(update) != job2.Update.MaxParallel { 1174 t.Fatalf("bad: %#v", plan) 1175 } 1176 1177 // Ensure the plan allocated 1178 var planned []*structs.Allocation 1179 for _, allocList := range plan.NodeAllocation { 1180 planned = append(planned, allocList...) 1181 } 1182 if len(planned) != job2.Update.MaxParallel { 1183 t.Fatalf("bad: %#v", plan) 1184 } 1185 1186 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1187 1188 // Ensure a follow up eval was created 1189 eval = h.Evals[0] 1190 if eval.NextEval == "" { 1191 t.Fatalf("missing next eval") 1192 } 1193 1194 // Check for create 1195 if len(h.CreateEvals) == 0 { 1196 t.Fatalf("missing created eval") 1197 } 1198 create := h.CreateEvals[0] 1199 if eval.NextEval != create.ID { 1200 t.Fatalf("ID mismatch") 1201 } 1202 if create.PreviousEval != eval.ID { 1203 t.Fatalf("missing previous eval") 1204 } 1205 1206 if create.TriggeredBy != structs.EvalTriggerRollingUpdate { 1207 t.Fatalf("bad: %#v", create) 1208 } 1209 } 1210 1211 func TestServiceSched_JobModify_InPlace(t *testing.T) { 1212 h := NewHarness(t) 1213 1214 // Create some nodes 1215 var nodes []*structs.Node 1216 for i := 0; i < 10; i++ { 1217 node := mock.Node() 1218 nodes = append(nodes, node) 1219 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1220 } 1221 1222 // Generate a fake job with allocations 1223 job := mock.Job() 1224 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1225 1226 var allocs []*structs.Allocation 1227 for i := 0; i < 10; i++ { 1228 alloc := mock.Alloc() 1229 alloc.Job = job 1230 alloc.JobID = job.ID 1231 alloc.NodeID = nodes[i].ID 1232 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1233 allocs = append(allocs, alloc) 1234 } 1235 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1236 1237 // Update the job 1238 job2 := mock.Job() 1239 job2.ID = job.ID 1240 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1241 1242 // Create a mock evaluation to deal with drain 1243 eval := &structs.Evaluation{ 1244 ID: structs.GenerateUUID(), 1245 Priority: 50, 1246 TriggeredBy: structs.EvalTriggerJobRegister, 1247 JobID: job.ID, 1248 } 1249 1250 // Process the evaluation 1251 err := h.Process(NewServiceScheduler, eval) 1252 if err != nil { 1253 t.Fatalf("err: %v", err) 1254 } 1255 1256 // Ensure a single plan 1257 if len(h.Plans) != 1 { 1258 t.Fatalf("bad: %#v", h.Plans) 1259 } 1260 plan := h.Plans[0] 1261 1262 // Ensure the plan did not evict any allocs 1263 var update []*structs.Allocation 1264 for _, updateList := range plan.NodeUpdate { 1265 update = append(update, updateList...) 1266 } 1267 if len(update) != 0 { 1268 t.Fatalf("bad: %#v", plan) 1269 } 1270 1271 // Ensure the plan updated the existing allocs 1272 var planned []*structs.Allocation 1273 for _, allocList := range plan.NodeAllocation { 1274 planned = append(planned, allocList...) 1275 } 1276 if len(planned) != 10 { 1277 t.Fatalf("bad: %#v", plan) 1278 } 1279 for _, p := range planned { 1280 if p.Job != job2 { 1281 t.Fatalf("should update job") 1282 } 1283 } 1284 1285 // Lookup the allocations by JobID 1286 out, err := h.State.AllocsByJob(job.ID) 1287 noErr(t, err) 1288 1289 // Ensure all allocations placed 1290 if len(out) != 10 { 1291 for _, alloc := range out { 1292 t.Logf("%#v", alloc) 1293 } 1294 t.Fatalf("bad: %#v", out) 1295 } 1296 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1297 1298 // Verify the network did not change 1299 rp := structs.Port{Label: "main", Value: 5000} 1300 for _, alloc := range out { 1301 for _, resources := range alloc.TaskResources { 1302 if resources.Networks[0].ReservedPorts[0] != rp { 1303 t.Fatalf("bad: %#v", alloc) 1304 } 1305 } 1306 } 1307 } 1308 1309 func TestServiceSched_JobDeregister(t *testing.T) { 1310 h := NewHarness(t) 1311 1312 // Generate a fake job with allocations 1313 job := mock.Job() 1314 1315 var allocs []*structs.Allocation 1316 for i := 0; i < 10; i++ { 1317 alloc := mock.Alloc() 1318 alloc.Job = job 1319 alloc.JobID = job.ID 1320 allocs = append(allocs, alloc) 1321 } 1322 for _, alloc := range allocs { 1323 h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) 1324 } 1325 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1326 1327 // Create a mock evaluation to deregister the job 1328 eval := &structs.Evaluation{ 1329 ID: structs.GenerateUUID(), 1330 Priority: 50, 1331 TriggeredBy: structs.EvalTriggerJobDeregister, 1332 JobID: job.ID, 1333 } 1334 1335 // Process the evaluation 1336 err := h.Process(NewServiceScheduler, eval) 1337 if err != nil { 1338 t.Fatalf("err: %v", err) 1339 } 1340 1341 // Ensure a single plan 1342 if len(h.Plans) != 1 { 1343 t.Fatalf("bad: %#v", h.Plans) 1344 } 1345 plan := h.Plans[0] 1346 1347 // Ensure the plan evicted all nodes 1348 if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) { 1349 t.Fatalf("bad: %#v", plan) 1350 } 1351 1352 // Lookup the allocations by JobID 1353 out, err := h.State.AllocsByJob(job.ID) 1354 noErr(t, err) 1355 1356 // Ensure that the job field on the allocation is still populated 1357 for _, alloc := range out { 1358 if alloc.Job == nil { 1359 t.Fatalf("bad: %#v", alloc) 1360 } 1361 } 1362 1363 // Ensure no remaining allocations 1364 out, _ = structs.FilterTerminalAllocs(out) 1365 if len(out) != 0 { 1366 t.Fatalf("bad: %#v", out) 1367 } 1368 1369 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1370 } 1371 1372 func TestServiceSched_NodeDown(t *testing.T) { 1373 h := NewHarness(t) 1374 1375 // Register a node 1376 node := mock.Node() 1377 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1378 1379 // Generate a fake job with allocations and an update policy. 1380 job := mock.Job() 1381 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1382 1383 var allocs []*structs.Allocation 1384 for i := 0; i < 10; i++ { 1385 alloc := mock.Alloc() 1386 alloc.Job = job 1387 alloc.JobID = job.ID 1388 alloc.NodeID = node.ID 1389 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1390 allocs = append(allocs, alloc) 1391 } 1392 1393 // Cover each terminal case and ensure it doesn't change to lost 1394 allocs[7].DesiredStatus = structs.AllocDesiredStatusRun 1395 allocs[7].ClientStatus = structs.AllocClientStatusLost 1396 allocs[8].DesiredStatus = structs.AllocDesiredStatusRun 1397 allocs[8].ClientStatus = structs.AllocClientStatusFailed 1398 allocs[9].DesiredStatus = structs.AllocDesiredStatusRun 1399 allocs[9].ClientStatus = structs.AllocClientStatusComplete 1400 1401 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1402 1403 // Mark some allocs as running 1404 for i := 0; i < 4; i++ { 1405 out, _ := h.State.AllocByID(allocs[i].ID) 1406 out.ClientStatus = structs.AllocClientStatusRunning 1407 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) 1408 } 1409 1410 // Mark the node as down 1411 noErr(t, h.State.UpdateNodeStatus(h.NextIndex(), node.ID, structs.NodeStatusDown)) 1412 1413 // Create a mock evaluation to deal with drain 1414 eval := &structs.Evaluation{ 1415 ID: structs.GenerateUUID(), 1416 Priority: 50, 1417 TriggeredBy: structs.EvalTriggerNodeUpdate, 1418 JobID: job.ID, 1419 NodeID: node.ID, 1420 } 1421 1422 // Process the evaluation 1423 err := h.Process(NewServiceScheduler, eval) 1424 if err != nil { 1425 t.Fatalf("err: %v", err) 1426 } 1427 1428 // Ensure a single plan 1429 if len(h.Plans) != 1 { 1430 t.Fatalf("bad: %#v", h.Plans) 1431 } 1432 plan := h.Plans[0] 1433 1434 // Test the scheduler marked all non-terminal allocations as lost 1435 if len(plan.NodeUpdate[node.ID]) != 7 { 1436 t.Fatalf("bad: %#v", plan) 1437 } 1438 1439 for _, out := range plan.NodeUpdate[node.ID] { 1440 if out.ClientStatus != structs.AllocClientStatusLost && out.DesiredStatus != structs.AllocDesiredStatusStop { 1441 t.Fatalf("bad alloc: %#v", out) 1442 } 1443 } 1444 1445 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1446 } 1447 1448 func TestServiceSched_NodeUpdate(t *testing.T) { 1449 h := NewHarness(t) 1450 1451 // Register a node 1452 node := mock.Node() 1453 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1454 1455 // Generate a fake job with allocations and an update policy. 1456 job := mock.Job() 1457 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1458 1459 var allocs []*structs.Allocation 1460 for i := 0; i < 10; i++ { 1461 alloc := mock.Alloc() 1462 alloc.Job = job 1463 alloc.JobID = job.ID 1464 alloc.NodeID = node.ID 1465 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1466 allocs = append(allocs, alloc) 1467 } 1468 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1469 1470 // Mark some allocs as running 1471 for i := 0; i < 4; i++ { 1472 out, _ := h.State.AllocByID(allocs[i].ID) 1473 out.ClientStatus = structs.AllocClientStatusRunning 1474 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) 1475 } 1476 1477 // Create a mock evaluation which won't trigger any new placements 1478 eval := &structs.Evaluation{ 1479 ID: structs.GenerateUUID(), 1480 Priority: 50, 1481 TriggeredBy: structs.EvalTriggerNodeUpdate, 1482 JobID: job.ID, 1483 NodeID: node.ID, 1484 } 1485 1486 // Process the evaluation 1487 err := h.Process(NewServiceScheduler, eval) 1488 if err != nil { 1489 t.Fatalf("err: %v", err) 1490 } 1491 if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { 1492 t.Fatalf("bad queued allocations: %v", h.Evals[0].QueuedAllocations) 1493 } 1494 1495 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1496 } 1497 1498 func TestServiceSched_NodeDrain(t *testing.T) { 1499 h := NewHarness(t) 1500 1501 // Register a draining node 1502 node := mock.Node() 1503 node.Drain = true 1504 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1505 1506 // Create some nodes 1507 for i := 0; i < 10; i++ { 1508 node := mock.Node() 1509 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1510 } 1511 1512 // Generate a fake job with allocations and an update policy. 1513 job := mock.Job() 1514 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1515 1516 var allocs []*structs.Allocation 1517 for i := 0; i < 10; i++ { 1518 alloc := mock.Alloc() 1519 alloc.Job = job 1520 alloc.JobID = job.ID 1521 alloc.NodeID = node.ID 1522 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1523 allocs = append(allocs, alloc) 1524 } 1525 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1526 1527 // Create a mock evaluation to deal with drain 1528 eval := &structs.Evaluation{ 1529 ID: structs.GenerateUUID(), 1530 Priority: 50, 1531 TriggeredBy: structs.EvalTriggerNodeUpdate, 1532 JobID: job.ID, 1533 NodeID: node.ID, 1534 } 1535 1536 // Process the evaluation 1537 err := h.Process(NewServiceScheduler, eval) 1538 if err != nil { 1539 t.Fatalf("err: %v", err) 1540 } 1541 1542 // Ensure a single plan 1543 if len(h.Plans) != 1 { 1544 t.Fatalf("bad: %#v", h.Plans) 1545 } 1546 plan := h.Plans[0] 1547 1548 // Ensure the plan evicted all allocs 1549 if len(plan.NodeUpdate[node.ID]) != len(allocs) { 1550 t.Fatalf("bad: %#v", plan) 1551 } 1552 1553 // Ensure the plan allocated 1554 var planned []*structs.Allocation 1555 for _, allocList := range plan.NodeAllocation { 1556 planned = append(planned, allocList...) 1557 } 1558 if len(planned) != 10 { 1559 t.Fatalf("bad: %#v", plan) 1560 } 1561 1562 // Lookup the allocations by JobID 1563 out, err := h.State.AllocsByJob(job.ID) 1564 noErr(t, err) 1565 1566 // Ensure all allocations placed 1567 out, _ = structs.FilterTerminalAllocs(out) 1568 if len(out) != 10 { 1569 t.Fatalf("bad: %#v", out) 1570 } 1571 1572 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1573 } 1574 1575 func TestServiceSched_NodeDrain_Down(t *testing.T) { 1576 h := NewHarness(t) 1577 1578 // Register a draining node 1579 node := mock.Node() 1580 node.Drain = true 1581 node.Status = structs.NodeStatusDown 1582 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1583 1584 // Generate a fake job with allocations 1585 job := mock.Job() 1586 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1587 1588 var allocs []*structs.Allocation 1589 for i := 0; i < 10; i++ { 1590 alloc := mock.Alloc() 1591 alloc.Job = job 1592 alloc.JobID = job.ID 1593 alloc.NodeID = node.ID 1594 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1595 allocs = append(allocs, alloc) 1596 } 1597 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1598 1599 // Set the desired state of the allocs to stop 1600 var stop []*structs.Allocation 1601 for i := 0; i < 10; i++ { 1602 newAlloc := allocs[i].Copy() 1603 newAlloc.ClientStatus = structs.AllocDesiredStatusStop 1604 stop = append(stop, newAlloc) 1605 } 1606 noErr(t, h.State.UpsertAllocs(h.NextIndex(), stop)) 1607 1608 // Mark some of the allocations as running 1609 var running []*structs.Allocation 1610 for i := 4; i < 6; i++ { 1611 newAlloc := stop[i].Copy() 1612 newAlloc.ClientStatus = structs.AllocClientStatusRunning 1613 running = append(running, newAlloc) 1614 } 1615 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running)) 1616 1617 // Mark some of the allocations as complete 1618 var complete []*structs.Allocation 1619 for i := 6; i < 10; i++ { 1620 newAlloc := stop[i].Copy() 1621 newAlloc.ClientStatus = structs.AllocClientStatusComplete 1622 complete = append(complete, newAlloc) 1623 } 1624 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete)) 1625 1626 // Create a mock evaluation to deal with the node update 1627 eval := &structs.Evaluation{ 1628 ID: structs.GenerateUUID(), 1629 Priority: 50, 1630 TriggeredBy: structs.EvalTriggerNodeUpdate, 1631 JobID: job.ID, 1632 NodeID: node.ID, 1633 } 1634 1635 // Process the evaluation 1636 err := h.Process(NewServiceScheduler, eval) 1637 if err != nil { 1638 t.Fatalf("err: %v", err) 1639 } 1640 1641 // Ensure a single plan 1642 if len(h.Plans) != 1 { 1643 t.Fatalf("bad: %#v", h.Plans) 1644 } 1645 plan := h.Plans[0] 1646 1647 // Ensure the plan evicted non terminal allocs 1648 if len(plan.NodeUpdate[node.ID]) != 6 { 1649 t.Fatalf("bad: %#v", plan) 1650 } 1651 1652 // Ensure that all the allocations which were in running or pending state 1653 // has been marked as lost 1654 var lostAllocs []string 1655 for _, alloc := range plan.NodeUpdate[node.ID] { 1656 lostAllocs = append(lostAllocs, alloc.ID) 1657 } 1658 sort.Strings(lostAllocs) 1659 1660 var expectedLostAllocs []string 1661 for i := 0; i < 6; i++ { 1662 expectedLostAllocs = append(expectedLostAllocs, allocs[i].ID) 1663 } 1664 sort.Strings(expectedLostAllocs) 1665 1666 if !reflect.DeepEqual(expectedLostAllocs, lostAllocs) { 1667 t.Fatalf("expected: %v, actual: %v", expectedLostAllocs, lostAllocs) 1668 } 1669 1670 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1671 } 1672 1673 func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { 1674 h := NewHarness(t) 1675 1676 // Register a draining node 1677 node := mock.Node() 1678 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1679 1680 // Generate a fake job with allocations and an update policy. 1681 job := mock.Job() 1682 job.TaskGroups[0].Count = 2 1683 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1684 1685 var allocs []*structs.Allocation 1686 for i := 0; i < 2; i++ { 1687 alloc := mock.Alloc() 1688 alloc.Job = job 1689 alloc.JobID = job.ID 1690 alloc.NodeID = node.ID 1691 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1692 allocs = append(allocs, alloc) 1693 } 1694 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1695 1696 node.Drain = true 1697 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1698 1699 // Create a mock evaluation to deal with drain 1700 eval := &structs.Evaluation{ 1701 ID: structs.GenerateUUID(), 1702 Priority: 50, 1703 TriggeredBy: structs.EvalTriggerNodeUpdate, 1704 JobID: job.ID, 1705 NodeID: node.ID, 1706 } 1707 1708 // Process the evaluation 1709 err := h.Process(NewServiceScheduler, eval) 1710 if err != nil { 1711 t.Fatalf("err: %v", err) 1712 } 1713 1714 queued := h.Evals[0].QueuedAllocations["web"] 1715 if queued != 2 { 1716 t.Fatalf("expected: %v, actual: %v", 2, queued) 1717 } 1718 } 1719 1720 func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) { 1721 h := NewHarness(t) 1722 1723 // Register a draining node 1724 node := mock.Node() 1725 node.Drain = true 1726 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1727 1728 // Create some nodes 1729 for i := 0; i < 10; i++ { 1730 node := mock.Node() 1731 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1732 } 1733 1734 // Generate a fake job with allocations and an update policy. 1735 job := mock.Job() 1736 mp := 5 1737 job.Update = structs.UpdateStrategy{ 1738 Stagger: time.Second, 1739 MaxParallel: mp, 1740 } 1741 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1742 1743 var allocs []*structs.Allocation 1744 for i := 0; i < 10; i++ { 1745 alloc := mock.Alloc() 1746 alloc.Job = job 1747 alloc.JobID = job.ID 1748 alloc.NodeID = node.ID 1749 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1750 allocs = append(allocs, alloc) 1751 } 1752 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1753 1754 // Create a mock evaluation to deal with drain 1755 eval := &structs.Evaluation{ 1756 ID: structs.GenerateUUID(), 1757 Priority: 50, 1758 TriggeredBy: structs.EvalTriggerNodeUpdate, 1759 JobID: job.ID, 1760 NodeID: node.ID, 1761 } 1762 1763 // Process the evaluation 1764 err := h.Process(NewServiceScheduler, eval) 1765 if err != nil { 1766 t.Fatalf("err: %v", err) 1767 } 1768 1769 // Ensure a single plan 1770 if len(h.Plans) != 1 { 1771 t.Fatalf("bad: %#v", h.Plans) 1772 } 1773 plan := h.Plans[0] 1774 1775 // Ensure the plan evicted all allocs 1776 if len(plan.NodeUpdate[node.ID]) != mp { 1777 t.Fatalf("bad: %#v", plan) 1778 } 1779 1780 // Ensure the plan allocated 1781 var planned []*structs.Allocation 1782 for _, allocList := range plan.NodeAllocation { 1783 planned = append(planned, allocList...) 1784 } 1785 if len(planned) != mp { 1786 t.Fatalf("bad: %#v", plan) 1787 } 1788 1789 // Ensure there is a followup eval. 1790 if len(h.CreateEvals) != 1 || 1791 h.CreateEvals[0].TriggeredBy != structs.EvalTriggerRollingUpdate { 1792 t.Fatalf("bad: %#v", h.CreateEvals) 1793 } 1794 1795 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1796 } 1797 1798 func TestServiceSched_RetryLimit(t *testing.T) { 1799 h := NewHarness(t) 1800 h.Planner = &RejectPlan{h} 1801 1802 // Create some nodes 1803 for i := 0; i < 10; i++ { 1804 node := mock.Node() 1805 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1806 } 1807 1808 // Create a job 1809 job := mock.Job() 1810 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1811 1812 // Create a mock evaluation to register the job 1813 eval := &structs.Evaluation{ 1814 ID: structs.GenerateUUID(), 1815 Priority: job.Priority, 1816 TriggeredBy: structs.EvalTriggerJobRegister, 1817 JobID: job.ID, 1818 } 1819 1820 // Process the evaluation 1821 err := h.Process(NewServiceScheduler, eval) 1822 if err != nil { 1823 t.Fatalf("err: %v", err) 1824 } 1825 1826 // Ensure multiple plans 1827 if len(h.Plans) == 0 { 1828 t.Fatalf("bad: %#v", h.Plans) 1829 } 1830 1831 // Lookup the allocations by JobID 1832 out, err := h.State.AllocsByJob(job.ID) 1833 noErr(t, err) 1834 1835 // Ensure no allocations placed 1836 if len(out) != 0 { 1837 t.Fatalf("bad: %#v", out) 1838 } 1839 1840 // Should hit the retry limit 1841 h.AssertEvalStatus(t, structs.EvalStatusFailed) 1842 } 1843 1844 func TestBatchSched_Run_CompleteAlloc(t *testing.T) { 1845 h := NewHarness(t) 1846 1847 // Create a node 1848 node := mock.Node() 1849 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1850 1851 // Create a job 1852 job := mock.Job() 1853 job.TaskGroups[0].Count = 1 1854 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1855 1856 // Create a complete alloc 1857 alloc := mock.Alloc() 1858 alloc.Job = job 1859 alloc.JobID = job.ID 1860 alloc.NodeID = node.ID 1861 alloc.Name = "my-job.web[0]" 1862 alloc.ClientStatus = structs.AllocClientStatusComplete 1863 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1864 1865 // Create a mock evaluation to register the job 1866 eval := &structs.Evaluation{ 1867 ID: structs.GenerateUUID(), 1868 Priority: job.Priority, 1869 TriggeredBy: structs.EvalTriggerJobRegister, 1870 JobID: job.ID, 1871 } 1872 1873 // Process the evaluation 1874 err := h.Process(NewBatchScheduler, eval) 1875 if err != nil { 1876 t.Fatalf("err: %v", err) 1877 } 1878 1879 // Ensure no plan as it should be a no-op 1880 if len(h.Plans) != 0 { 1881 t.Fatalf("bad: %#v", h.Plans) 1882 } 1883 1884 // Lookup the allocations by JobID 1885 out, err := h.State.AllocsByJob(job.ID) 1886 noErr(t, err) 1887 1888 // Ensure no allocations placed 1889 if len(out) != 1 { 1890 t.Fatalf("bad: %#v", out) 1891 } 1892 1893 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1894 } 1895 1896 func TestBatchSched_Run_DrainedAlloc(t *testing.T) { 1897 h := NewHarness(t) 1898 1899 // Create a node 1900 node := mock.Node() 1901 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1902 1903 // Create a job 1904 job := mock.Job() 1905 job.TaskGroups[0].Count = 1 1906 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1907 1908 // Create a complete alloc 1909 alloc := mock.Alloc() 1910 alloc.Job = job 1911 alloc.JobID = job.ID 1912 alloc.NodeID = node.ID 1913 alloc.Name = "my-job.web[0]" 1914 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1915 alloc.ClientStatus = structs.AllocClientStatusComplete 1916 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1917 1918 // Create a mock evaluation to register the job 1919 eval := &structs.Evaluation{ 1920 ID: structs.GenerateUUID(), 1921 Priority: job.Priority, 1922 TriggeredBy: structs.EvalTriggerJobRegister, 1923 JobID: job.ID, 1924 } 1925 1926 // Process the evaluation 1927 err := h.Process(NewBatchScheduler, eval) 1928 if err != nil { 1929 t.Fatalf("err: %v", err) 1930 } 1931 1932 // Ensure a plan 1933 if len(h.Plans) != 1 { 1934 t.Fatalf("bad: %#v", h.Plans) 1935 } 1936 1937 // Lookup the allocations by JobID 1938 out, err := h.State.AllocsByJob(job.ID) 1939 noErr(t, err) 1940 1941 // Ensure a replacement alloc was placed. 1942 if len(out) != 2 { 1943 t.Fatalf("bad: %#v", out) 1944 } 1945 1946 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1947 } 1948 1949 func TestBatchSched_Run_FailedAlloc(t *testing.T) { 1950 h := NewHarness(t) 1951 1952 // Create a node 1953 node := mock.Node() 1954 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1955 1956 // Create a job 1957 job := mock.Job() 1958 job.TaskGroups[0].Count = 1 1959 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1960 1961 // Create a failed alloc 1962 alloc := mock.Alloc() 1963 alloc.Job = job 1964 alloc.JobID = job.ID 1965 alloc.NodeID = node.ID 1966 alloc.Name = "my-job.web[0]" 1967 alloc.ClientStatus = structs.AllocClientStatusFailed 1968 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1969 1970 // Create a mock evaluation to register the job 1971 eval := &structs.Evaluation{ 1972 ID: structs.GenerateUUID(), 1973 Priority: job.Priority, 1974 TriggeredBy: structs.EvalTriggerJobRegister, 1975 JobID: job.ID, 1976 } 1977 1978 // Process the evaluation 1979 err := h.Process(NewBatchScheduler, eval) 1980 if err != nil { 1981 t.Fatalf("err: %v", err) 1982 } 1983 1984 // Ensure a plan 1985 if len(h.Plans) != 1 { 1986 t.Fatalf("bad: %#v", h.Plans) 1987 } 1988 1989 // Lookup the allocations by JobID 1990 out, err := h.State.AllocsByJob(job.ID) 1991 noErr(t, err) 1992 1993 // Ensure a replacement alloc was placed. 1994 if len(out) != 2 { 1995 t.Fatalf("bad: %#v", out) 1996 } 1997 1998 // Ensure that the scheduler is recording the correct number of queued 1999 // allocations 2000 queued := h.Evals[0].QueuedAllocations["web"] 2001 if queued != 0 { 2002 t.Fatalf("expected: %v, actual: %v", 1, queued) 2003 } 2004 2005 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2006 } 2007 2008 func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { 2009 h := NewHarness(t) 2010 2011 node := mock.Node() 2012 node.Drain = true 2013 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2014 2015 // Create a job 2016 job := mock.Job() 2017 job.TaskGroups[0].Count = 1 2018 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2019 2020 // Create a failed alloc 2021 alloc := mock.Alloc() 2022 alloc.Job = job 2023 alloc.JobID = job.ID 2024 alloc.NodeID = node.ID 2025 alloc.Name = "my-job.web[0]" 2026 alloc.ClientStatus = structs.AllocClientStatusFailed 2027 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2028 2029 // Create a mock evaluation to register the job 2030 eval := &structs.Evaluation{ 2031 ID: structs.GenerateUUID(), 2032 Priority: job.Priority, 2033 TriggeredBy: structs.EvalTriggerJobRegister, 2034 JobID: job.ID, 2035 } 2036 2037 // Process the evaluation 2038 err := h.Process(NewBatchScheduler, eval) 2039 if err != nil { 2040 t.Fatalf("err: %v", err) 2041 } 2042 2043 // Ensure that the scheduler is recording the correct number of queued 2044 // allocations 2045 queued := h.Evals[0].QueuedAllocations["web"] 2046 if queued != 1 { 2047 t.Fatalf("expected: %v, actual: %v", 1, queued) 2048 } 2049 } 2050 2051 func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { 2052 h := NewHarness(t) 2053 2054 // Create two nodes, one that is drained and has a successfully finished 2055 // alloc and a fresh undrained one 2056 node := mock.Node() 2057 node.Drain = true 2058 node2 := mock.Node() 2059 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2060 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 2061 2062 // Create a job 2063 job := mock.Job() 2064 job.Type = structs.JobTypeBatch 2065 job.TaskGroups[0].Count = 1 2066 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2067 2068 // Create a successful alloc 2069 alloc := mock.Alloc() 2070 alloc.Job = job 2071 alloc.JobID = job.ID 2072 alloc.NodeID = node.ID 2073 alloc.Name = "my-job.web[0]" 2074 alloc.ClientStatus = structs.AllocClientStatusComplete 2075 alloc.TaskStates = map[string]*structs.TaskState{ 2076 "web": &structs.TaskState{ 2077 State: structs.TaskStateDead, 2078 Events: []*structs.TaskEvent{ 2079 { 2080 Type: structs.TaskTerminated, 2081 ExitCode: 0, 2082 }, 2083 }, 2084 }, 2085 } 2086 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2087 2088 // Create a mock evaluation to rerun the job 2089 eval := &structs.Evaluation{ 2090 ID: structs.GenerateUUID(), 2091 Priority: job.Priority, 2092 TriggeredBy: structs.EvalTriggerJobRegister, 2093 JobID: job.ID, 2094 } 2095 2096 // Process the evaluation 2097 err := h.Process(NewBatchScheduler, eval) 2098 if err != nil { 2099 t.Fatalf("err: %v", err) 2100 } 2101 2102 // Ensure no plan 2103 if len(h.Plans) != 0 { 2104 t.Fatalf("bad: %#v", h.Plans) 2105 } 2106 2107 // Lookup the allocations by JobID 2108 out, err := h.State.AllocsByJob(job.ID) 2109 noErr(t, err) 2110 2111 // Ensure no replacement alloc was placed. 2112 if len(out) != 1 { 2113 t.Fatalf("bad: %#v", out) 2114 } 2115 2116 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2117 } 2118 2119 func TestGenericSched_FilterCompleteAllocs(t *testing.T) { 2120 running := mock.Alloc() 2121 desiredStop := mock.Alloc() 2122 desiredStop.DesiredStatus = structs.AllocDesiredStatusStop 2123 2124 new := mock.Alloc() 2125 new.CreateIndex = 10000 2126 2127 oldSuccessful := mock.Alloc() 2128 oldSuccessful.CreateIndex = 30 2129 oldSuccessful.DesiredStatus = structs.AllocDesiredStatusStop 2130 oldSuccessful.ClientStatus = structs.AllocClientStatusComplete 2131 oldSuccessful.TaskStates = make(map[string]*structs.TaskState, 1) 2132 oldSuccessful.TaskStates["foo"] = &structs.TaskState{ 2133 State: structs.TaskStateDead, 2134 Events: []*structs.TaskEvent{{Type: structs.TaskTerminated, ExitCode: 0}}, 2135 } 2136 2137 unsuccessful := mock.Alloc() 2138 unsuccessful.DesiredStatus = structs.AllocDesiredStatusRun 2139 unsuccessful.ClientStatus = structs.AllocClientStatusFailed 2140 unsuccessful.TaskStates = make(map[string]*structs.TaskState, 1) 2141 unsuccessful.TaskStates["foo"] = &structs.TaskState{ 2142 State: structs.TaskStateDead, 2143 Events: []*structs.TaskEvent{{Type: structs.TaskTerminated, ExitCode: 1}}, 2144 } 2145 2146 cases := []struct { 2147 Batch bool 2148 Input, Output []*structs.Allocation 2149 TerminalAllocs map[string]*structs.Allocation 2150 }{ 2151 { 2152 Input: []*structs.Allocation{running}, 2153 Output: []*structs.Allocation{running}, 2154 TerminalAllocs: map[string]*structs.Allocation{}, 2155 }, 2156 { 2157 Input: []*structs.Allocation{running, desiredStop}, 2158 Output: []*structs.Allocation{running}, 2159 TerminalAllocs: map[string]*structs.Allocation{ 2160 desiredStop.Name: desiredStop, 2161 }, 2162 }, 2163 { 2164 Batch: true, 2165 Input: []*structs.Allocation{running}, 2166 Output: []*structs.Allocation{running}, 2167 TerminalAllocs: map[string]*structs.Allocation{}, 2168 }, 2169 { 2170 Batch: true, 2171 Input: []*structs.Allocation{new, oldSuccessful}, 2172 Output: []*structs.Allocation{new}, 2173 TerminalAllocs: map[string]*structs.Allocation{}, 2174 }, 2175 { 2176 Batch: true, 2177 Input: []*structs.Allocation{unsuccessful}, 2178 Output: []*structs.Allocation{}, 2179 TerminalAllocs: map[string]*structs.Allocation{ 2180 unsuccessful.Name: unsuccessful, 2181 }, 2182 }, 2183 } 2184 2185 for i, c := range cases { 2186 g := &GenericScheduler{batch: c.Batch} 2187 out, terminalAllocs := g.filterCompleteAllocs(c.Input) 2188 2189 if !reflect.DeepEqual(out, c.Output) { 2190 t.Log("Got:") 2191 for i, a := range out { 2192 t.Logf("%d: %#v", i, a) 2193 } 2194 t.Log("Want:") 2195 for i, a := range c.Output { 2196 t.Logf("%d: %#v", i, a) 2197 } 2198 t.Fatalf("Case %d failed", i+1) 2199 } 2200 2201 if !reflect.DeepEqual(terminalAllocs, c.TerminalAllocs) { 2202 t.Log("Got:") 2203 for n, a := range terminalAllocs { 2204 t.Logf("%v: %#v", n, a) 2205 } 2206 t.Log("Want:") 2207 for n, a := range c.TerminalAllocs { 2208 t.Logf("%v: %#v", n, a) 2209 } 2210 t.Fatalf("Case %d failed", i+1) 2211 } 2212 2213 } 2214 } 2215 2216 func TestGenericSched_ChainedAlloc(t *testing.T) { 2217 h := NewHarness(t) 2218 2219 // Create some nodes 2220 for i := 0; i < 10; i++ { 2221 node := mock.Node() 2222 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2223 } 2224 2225 // Create a job 2226 job := mock.Job() 2227 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2228 2229 // Create a mock evaluation to register the job 2230 eval := &structs.Evaluation{ 2231 ID: structs.GenerateUUID(), 2232 Priority: job.Priority, 2233 TriggeredBy: structs.EvalTriggerJobRegister, 2234 JobID: job.ID, 2235 } 2236 // Process the evaluation 2237 if err := h.Process(NewServiceScheduler, eval); err != nil { 2238 t.Fatalf("err: %v", err) 2239 } 2240 2241 var allocIDs []string 2242 for _, allocList := range h.Plans[0].NodeAllocation { 2243 for _, alloc := range allocList { 2244 allocIDs = append(allocIDs, alloc.ID) 2245 } 2246 } 2247 sort.Strings(allocIDs) 2248 2249 // Create a new harness to invoke the scheduler again 2250 h1 := NewHarnessWithState(t, h.State) 2251 job1 := mock.Job() 2252 job1.ID = job.ID 2253 job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" 2254 job1.TaskGroups[0].Count = 12 2255 noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) 2256 2257 // Create a mock evaluation to update the job 2258 eval1 := &structs.Evaluation{ 2259 ID: structs.GenerateUUID(), 2260 Priority: job1.Priority, 2261 TriggeredBy: structs.EvalTriggerJobRegister, 2262 JobID: job1.ID, 2263 } 2264 // Process the evaluation 2265 if err := h1.Process(NewServiceScheduler, eval1); err != nil { 2266 t.Fatalf("err: %v", err) 2267 } 2268 2269 plan := h1.Plans[0] 2270 2271 // Collect all the chained allocation ids and the new allocations which 2272 // don't have any chained allocations 2273 var prevAllocs []string 2274 var newAllocs []string 2275 for _, allocList := range plan.NodeAllocation { 2276 for _, alloc := range allocList { 2277 if alloc.PreviousAllocation == "" { 2278 newAllocs = append(newAllocs, alloc.ID) 2279 continue 2280 } 2281 prevAllocs = append(prevAllocs, alloc.PreviousAllocation) 2282 } 2283 } 2284 sort.Strings(prevAllocs) 2285 2286 // Ensure that the new allocations has their corresponging original 2287 // allocation ids 2288 if !reflect.DeepEqual(prevAllocs, allocIDs) { 2289 t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs)) 2290 } 2291 2292 // Ensuring two new allocations don't have any chained allocations 2293 if len(newAllocs) != 2 { 2294 t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs)) 2295 } 2296 } 2297 2298 func TestServiceSched_NodeDrain_Sticky(t *testing.T) { 2299 h := NewHarness(t) 2300 2301 // Register a draining node 2302 node := mock.Node() 2303 node.Drain = true 2304 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2305 2306 // Create an alloc on the draining node 2307 alloc := mock.Alloc() 2308 alloc.Name = "my-job.web[0]" 2309 alloc.DesiredStatus = structs.AllocDesiredStatusStop 2310 alloc.NodeID = node.ID 2311 alloc.Job.TaskGroups[0].Count = 1 2312 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true 2313 noErr(t, h.State.UpsertJob(h.NextIndex(), alloc.Job)) 2314 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2315 2316 // Create a mock evaluation to deal with drain 2317 eval := &structs.Evaluation{ 2318 ID: structs.GenerateUUID(), 2319 Priority: 50, 2320 TriggeredBy: structs.EvalTriggerNodeUpdate, 2321 JobID: alloc.Job.ID, 2322 NodeID: node.ID, 2323 } 2324 2325 // Process the evaluation 2326 err := h.Process(NewServiceScheduler, eval) 2327 if err != nil { 2328 t.Fatalf("err: %v", err) 2329 } 2330 2331 // Ensure a single plan 2332 if len(h.Plans) != 1 { 2333 t.Fatalf("bad: %#v", h.Plans) 2334 } 2335 plan := h.Plans[0] 2336 2337 // Ensure the plan evicted all allocs 2338 if len(plan.NodeUpdate[node.ID]) != 1 { 2339 t.Fatalf("bad: %#v", plan) 2340 } 2341 2342 // Ensure the plan didn't create any new allocations 2343 var planned []*structs.Allocation 2344 for _, allocList := range plan.NodeAllocation { 2345 planned = append(planned, allocList...) 2346 } 2347 if len(planned) != 0 { 2348 t.Fatalf("bad: %#v", plan) 2349 } 2350 2351 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2352 }