github.com/blixtra/nomad@v0.7.2-0.20171221000451-da9a1d7bb050/scheduler/system_sched_test.go (about) 1 package scheduler 2 3 import ( 4 "reflect" 5 "sort" 6 "testing" 7 "time" 8 9 memdb "github.com/hashicorp/go-memdb" 10 "github.com/hashicorp/nomad/helper/uuid" 11 "github.com/hashicorp/nomad/nomad/mock" 12 "github.com/hashicorp/nomad/nomad/structs" 13 ) 14 15 func TestSystemSched_JobRegister(t *testing.T) { 16 h := NewHarness(t) 17 18 // Create some nodes 19 for i := 0; i < 10; i++ { 20 node := mock.Node() 21 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 22 } 23 24 // Create a job 25 job := mock.SystemJob() 26 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 27 28 // Create a mock evaluation to deregister the job 29 eval := &structs.Evaluation{ 30 Namespace: structs.DefaultNamespace, 31 ID: uuid.Generate(), 32 Priority: job.Priority, 33 TriggeredBy: structs.EvalTriggerJobRegister, 34 JobID: job.ID, 35 Status: structs.EvalStatusPending, 36 } 37 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 38 39 // Process the evaluation 40 err := h.Process(NewSystemScheduler, eval) 41 if err != nil { 42 t.Fatalf("err: %v", err) 43 } 44 45 // Ensure a single plan 46 if len(h.Plans) != 1 { 47 t.Fatalf("bad: %#v", h.Plans) 48 } 49 plan := h.Plans[0] 50 51 // Ensure the plan doesn't have annotations. 52 if plan.Annotations != nil { 53 t.Fatalf("expected no annotations") 54 } 55 56 // Ensure the plan allocated 57 var planned []*structs.Allocation 58 for _, allocList := range plan.NodeAllocation { 59 planned = append(planned, allocList...) 60 } 61 if len(planned) != 10 { 62 t.Fatalf("bad: %#v", plan) 63 } 64 65 // Lookup the allocations by JobID 66 ws := memdb.NewWatchSet() 67 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 68 noErr(t, err) 69 70 // Ensure all allocations placed 71 if len(out) != 10 { 72 t.Fatalf("bad: %#v", out) 73 } 74 75 // Check the available nodes 76 if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 { 77 t.Fatalf("bad: %#v", out[0].Metrics) 78 } 79 80 // Ensure no allocations are queued 81 queued := h.Evals[0].QueuedAllocations["web"] 82 if queued != 0 { 83 t.Fatalf("expected queued allocations: %v, actual: %v", 0, queued) 84 } 85 86 h.AssertEvalStatus(t, structs.EvalStatusComplete) 87 } 88 89 func TestSystemeSched_JobRegister_StickyAllocs(t *testing.T) { 90 h := NewHarness(t) 91 92 // Create some nodes 93 for i := 0; i < 10; i++ { 94 node := mock.Node() 95 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 96 } 97 98 // Create a job 99 job := mock.SystemJob() 100 job.TaskGroups[0].EphemeralDisk.Sticky = true 101 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 102 103 // Create a mock evaluation to register the job 104 eval := &structs.Evaluation{ 105 Namespace: structs.DefaultNamespace, 106 ID: uuid.Generate(), 107 Priority: job.Priority, 108 TriggeredBy: structs.EvalTriggerJobRegister, 109 JobID: job.ID, 110 Status: structs.EvalStatusPending, 111 } 112 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 113 114 // Process the evaluation 115 if err := h.Process(NewSystemScheduler, eval); err != nil { 116 t.Fatalf("err: %v", err) 117 } 118 119 // Ensure the plan allocated 120 plan := h.Plans[0] 121 var planned []*structs.Allocation 122 for _, allocList := range plan.NodeAllocation { 123 planned = append(planned, allocList...) 124 } 125 if len(planned) != 10 { 126 t.Fatalf("bad: %#v", plan) 127 } 128 129 // Get an allocation and mark it as failed 130 alloc := planned[4].Copy() 131 alloc.ClientStatus = structs.AllocClientStatusFailed 132 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc})) 133 134 // Create a mock evaluation to handle the update 135 eval = &structs.Evaluation{ 136 Namespace: structs.DefaultNamespace, 137 ID: uuid.Generate(), 138 Priority: job.Priority, 139 TriggeredBy: structs.EvalTriggerNodeUpdate, 140 JobID: job.ID, 141 Status: structs.EvalStatusPending, 142 } 143 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 144 h1 := NewHarnessWithState(t, h.State) 145 if err := h1.Process(NewSystemScheduler, eval); err != nil { 146 t.Fatalf("err: %v", err) 147 } 148 149 // Ensure we have created only one new allocation 150 plan = h1.Plans[0] 151 var newPlanned []*structs.Allocation 152 for _, allocList := range plan.NodeAllocation { 153 newPlanned = append(newPlanned, allocList...) 154 } 155 if len(newPlanned) != 1 { 156 t.Fatalf("bad plan: %#v", plan) 157 } 158 // Ensure that the new allocation was placed on the same node as the older 159 // one 160 if newPlanned[0].NodeID != alloc.NodeID || newPlanned[0].PreviousAllocation != alloc.ID { 161 t.Fatalf("expected: %#v, actual: %#v", alloc, newPlanned[0]) 162 } 163 } 164 165 func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) { 166 h := NewHarness(t) 167 168 // Create a nodes 169 node := mock.Node() 170 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 171 172 // Create a job 173 job := mock.SystemJob() 174 job.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 175 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 176 177 // Create another job with a lot of disk resource ask so that it doesn't fit 178 // the node 179 job1 := mock.SystemJob() 180 job1.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024 181 noErr(t, h.State.UpsertJob(h.NextIndex(), job1)) 182 183 // Create a mock evaluation to register the job 184 eval := &structs.Evaluation{ 185 Namespace: structs.DefaultNamespace, 186 ID: uuid.Generate(), 187 Priority: job.Priority, 188 TriggeredBy: structs.EvalTriggerJobRegister, 189 JobID: job.ID, 190 Status: structs.EvalStatusPending, 191 } 192 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 193 194 // Process the evaluation 195 if err := h.Process(NewSystemScheduler, eval); err != nil { 196 t.Fatalf("err: %v", err) 197 } 198 199 // Lookup the allocations by JobID 200 ws := memdb.NewWatchSet() 201 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 202 noErr(t, err) 203 204 // Ensure all allocations placed 205 if len(out) != 1 { 206 t.Fatalf("bad: %#v", out) 207 } 208 209 // Create a new harness to test the scheduling result for the second job 210 h1 := NewHarnessWithState(t, h.State) 211 // Create a mock evaluation to register the job 212 eval1 := &structs.Evaluation{ 213 Namespace: structs.DefaultNamespace, 214 ID: uuid.Generate(), 215 Priority: job1.Priority, 216 TriggeredBy: structs.EvalTriggerJobRegister, 217 JobID: job1.ID, 218 Status: structs.EvalStatusPending, 219 } 220 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 221 222 // Process the evaluation 223 if err := h1.Process(NewSystemScheduler, eval1); err != nil { 224 t.Fatalf("err: %v", err) 225 } 226 227 out, err = h1.State.AllocsByJob(ws, job.Namespace, job1.ID, false) 228 noErr(t, err) 229 if len(out) != 0 { 230 t.Fatalf("bad: %#v", out) 231 } 232 } 233 234 func TestSystemSched_ExhaustResources(t *testing.T) { 235 h := NewHarness(t) 236 237 // Create a nodes 238 node := mock.Node() 239 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 240 241 // Create a service job which consumes most of the system resources 242 svcJob := mock.Job() 243 svcJob.TaskGroups[0].Count = 1 244 svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 3600 245 noErr(t, h.State.UpsertJob(h.NextIndex(), svcJob)) 246 247 // Create a mock evaluation to register the job 248 eval := &structs.Evaluation{ 249 Namespace: structs.DefaultNamespace, 250 ID: uuid.Generate(), 251 Priority: svcJob.Priority, 252 TriggeredBy: structs.EvalTriggerJobRegister, 253 JobID: svcJob.ID, 254 Status: structs.EvalStatusPending, 255 } 256 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 257 // Process the evaluation 258 err := h.Process(NewServiceScheduler, eval) 259 if err != nil { 260 t.Fatalf("err: %v", err) 261 } 262 263 // Create a system job 264 job := mock.SystemJob() 265 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 266 267 // Create a mock evaluation to register the job 268 eval1 := &structs.Evaluation{ 269 Namespace: structs.DefaultNamespace, 270 ID: uuid.Generate(), 271 Priority: job.Priority, 272 TriggeredBy: structs.EvalTriggerJobRegister, 273 JobID: job.ID, 274 Status: structs.EvalStatusPending, 275 } 276 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 277 // Process the evaluation 278 if err := h.Process(NewSystemScheduler, eval1); err != nil { 279 t.Fatalf("err: %v", err) 280 } 281 282 // Ensure that we have one allocation queued from the system job eval 283 queued := h.Evals[1].QueuedAllocations["web"] 284 if queued != 1 { 285 t.Fatalf("expected: %v, actual: %v", 1, queued) 286 } 287 } 288 289 func TestSystemSched_JobRegister_Annotate(t *testing.T) { 290 h := NewHarness(t) 291 292 // Create some nodes 293 for i := 0; i < 10; i++ { 294 node := mock.Node() 295 if i < 9 { 296 node.NodeClass = "foo" 297 } else { 298 node.NodeClass = "bar" 299 } 300 node.ComputeClass() 301 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 302 } 303 304 // Create a job constraining on node class 305 job := mock.SystemJob() 306 fooConstraint := &structs.Constraint{ 307 LTarget: "${node.class}", 308 RTarget: "foo", 309 Operand: "==", 310 } 311 job.Constraints = append(job.Constraints, fooConstraint) 312 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 313 314 // Create a mock evaluation to deregister the job 315 eval := &structs.Evaluation{ 316 Namespace: structs.DefaultNamespace, 317 ID: uuid.Generate(), 318 Priority: job.Priority, 319 TriggeredBy: structs.EvalTriggerJobRegister, 320 JobID: job.ID, 321 AnnotatePlan: true, 322 Status: structs.EvalStatusPending, 323 } 324 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 325 326 // Process the evaluation 327 err := h.Process(NewSystemScheduler, eval) 328 if err != nil { 329 t.Fatalf("err: %v", err) 330 } 331 332 // Ensure a single plan 333 if len(h.Plans) != 1 { 334 t.Fatalf("bad: %#v", h.Plans) 335 } 336 plan := h.Plans[0] 337 338 // Ensure the plan allocated 339 var planned []*structs.Allocation 340 for _, allocList := range plan.NodeAllocation { 341 planned = append(planned, allocList...) 342 } 343 if len(planned) != 9 { 344 t.Fatalf("bad: %#v %d", planned, len(planned)) 345 } 346 347 // Lookup the allocations by JobID 348 ws := memdb.NewWatchSet() 349 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 350 noErr(t, err) 351 352 // Ensure all allocations placed 353 if len(out) != 9 { 354 t.Fatalf("bad: %#v", out) 355 } 356 357 // Check the available nodes 358 if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 { 359 t.Fatalf("bad: %#v", out[0].Metrics) 360 } 361 362 h.AssertEvalStatus(t, structs.EvalStatusComplete) 363 364 // Ensure the plan had annotations. 365 if plan.Annotations == nil { 366 t.Fatalf("expected annotations") 367 } 368 369 desiredTGs := plan.Annotations.DesiredTGUpdates 370 if l := len(desiredTGs); l != 1 { 371 t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1) 372 } 373 374 desiredChanges, ok := desiredTGs["web"] 375 if !ok { 376 t.Fatalf("expected task group web to have desired changes") 377 } 378 379 expected := &structs.DesiredUpdates{Place: 9} 380 if !reflect.DeepEqual(desiredChanges, expected) { 381 t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) 382 } 383 } 384 385 func TestSystemSched_JobRegister_AddNode(t *testing.T) { 386 h := NewHarness(t) 387 388 // Create some nodes 389 var nodes []*structs.Node 390 for i := 0; i < 10; i++ { 391 node := mock.Node() 392 nodes = append(nodes, node) 393 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 394 } 395 396 // Generate a fake job with allocations 397 job := mock.SystemJob() 398 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 399 400 var allocs []*structs.Allocation 401 for _, node := range nodes { 402 alloc := mock.Alloc() 403 alloc.Job = job 404 alloc.JobID = job.ID 405 alloc.NodeID = node.ID 406 alloc.Name = "my-job.web[0]" 407 allocs = append(allocs, alloc) 408 } 409 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 410 411 // Add a new node. 412 node := mock.Node() 413 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 414 415 // Create a mock evaluation to deal with the node update 416 eval := &structs.Evaluation{ 417 Namespace: structs.DefaultNamespace, 418 ID: uuid.Generate(), 419 Priority: 50, 420 TriggeredBy: structs.EvalTriggerNodeUpdate, 421 JobID: job.ID, 422 Status: structs.EvalStatusPending, 423 } 424 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 425 // Process the evaluation 426 err := h.Process(NewSystemScheduler, eval) 427 if err != nil { 428 t.Fatalf("err: %v", err) 429 } 430 431 // Ensure a single plan 432 if len(h.Plans) != 1 { 433 t.Fatalf("bad: %#v", h.Plans) 434 } 435 plan := h.Plans[0] 436 437 // Ensure the plan had no node updates 438 var update []*structs.Allocation 439 for _, updateList := range plan.NodeUpdate { 440 update = append(update, updateList...) 441 } 442 if len(update) != 0 { 443 t.Log(len(update)) 444 t.Fatalf("bad: %#v", plan) 445 } 446 447 // Ensure the plan allocated on the new node 448 var planned []*structs.Allocation 449 for _, allocList := range plan.NodeAllocation { 450 planned = append(planned, allocList...) 451 } 452 if len(planned) != 1 { 453 t.Fatalf("bad: %#v", plan) 454 } 455 456 // Ensure it allocated on the right node 457 if _, ok := plan.NodeAllocation[node.ID]; !ok { 458 t.Fatalf("allocated on wrong node: %#v", plan) 459 } 460 461 // Lookup the allocations by JobID 462 ws := memdb.NewWatchSet() 463 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 464 noErr(t, err) 465 466 // Ensure all allocations placed 467 out, _ = structs.FilterTerminalAllocs(out) 468 if len(out) != 11 { 469 t.Fatalf("bad: %#v", out) 470 } 471 472 h.AssertEvalStatus(t, structs.EvalStatusComplete) 473 } 474 475 func TestSystemSched_JobRegister_AllocFail(t *testing.T) { 476 h := NewHarness(t) 477 478 // Create NO nodes 479 // Create a job 480 job := mock.SystemJob() 481 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 482 483 // Create a mock evaluation to register the job 484 eval := &structs.Evaluation{ 485 Namespace: structs.DefaultNamespace, 486 ID: uuid.Generate(), 487 Priority: job.Priority, 488 TriggeredBy: structs.EvalTriggerJobRegister, 489 JobID: job.ID, 490 Status: structs.EvalStatusPending, 491 } 492 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 493 // Process the evaluation 494 err := h.Process(NewSystemScheduler, eval) 495 if err != nil { 496 t.Fatalf("err: %v", err) 497 } 498 499 // Ensure no plan as this should be a no-op. 500 if len(h.Plans) != 0 { 501 t.Fatalf("bad: %#v", h.Plans) 502 } 503 504 h.AssertEvalStatus(t, structs.EvalStatusComplete) 505 } 506 507 func TestSystemSched_JobModify(t *testing.T) { 508 h := NewHarness(t) 509 510 // Create some nodes 511 var nodes []*structs.Node 512 for i := 0; i < 10; i++ { 513 node := mock.Node() 514 nodes = append(nodes, node) 515 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 516 } 517 518 // Generate a fake job with allocations 519 job := mock.SystemJob() 520 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 521 522 var allocs []*structs.Allocation 523 for _, node := range nodes { 524 alloc := mock.Alloc() 525 alloc.Job = job 526 alloc.JobID = job.ID 527 alloc.NodeID = node.ID 528 alloc.Name = "my-job.web[0]" 529 allocs = append(allocs, alloc) 530 } 531 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 532 533 // Add a few terminal status allocations, these should be ignored 534 var terminal []*structs.Allocation 535 for i := 0; i < 5; i++ { 536 alloc := mock.Alloc() 537 alloc.Job = job 538 alloc.JobID = job.ID 539 alloc.NodeID = nodes[i].ID 540 alloc.Name = "my-job.web[0]" 541 alloc.DesiredStatus = structs.AllocDesiredStatusStop 542 terminal = append(terminal, alloc) 543 } 544 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 545 546 // Update the job 547 job2 := mock.SystemJob() 548 job2.ID = job.ID 549 550 // Update the task, such that it cannot be done in-place 551 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 552 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 553 554 // Create a mock evaluation to deal with drain 555 eval := &structs.Evaluation{ 556 Namespace: structs.DefaultNamespace, 557 ID: uuid.Generate(), 558 Priority: 50, 559 TriggeredBy: structs.EvalTriggerJobRegister, 560 JobID: job.ID, 561 Status: structs.EvalStatusPending, 562 } 563 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 564 565 // Process the evaluation 566 err := h.Process(NewSystemScheduler, eval) 567 if err != nil { 568 t.Fatalf("err: %v", err) 569 } 570 571 // Ensure a single plan 572 if len(h.Plans) != 1 { 573 t.Fatalf("bad: %#v", h.Plans) 574 } 575 plan := h.Plans[0] 576 577 // Ensure the plan evicted all allocs 578 var update []*structs.Allocation 579 for _, updateList := range plan.NodeUpdate { 580 update = append(update, updateList...) 581 } 582 if len(update) != len(allocs) { 583 t.Fatalf("bad: %#v", plan) 584 } 585 586 // Ensure the plan allocated 587 var planned []*structs.Allocation 588 for _, allocList := range plan.NodeAllocation { 589 planned = append(planned, allocList...) 590 } 591 if len(planned) != 10 { 592 t.Fatalf("bad: %#v", plan) 593 } 594 595 // Lookup the allocations by JobID 596 ws := memdb.NewWatchSet() 597 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 598 noErr(t, err) 599 600 // Ensure all allocations placed 601 out, _ = structs.FilterTerminalAllocs(out) 602 if len(out) != 10 { 603 t.Fatalf("bad: %#v", out) 604 } 605 606 h.AssertEvalStatus(t, structs.EvalStatusComplete) 607 } 608 609 func TestSystemSched_JobModify_Rolling(t *testing.T) { 610 h := NewHarness(t) 611 612 // Create some nodes 613 var nodes []*structs.Node 614 for i := 0; i < 10; i++ { 615 node := mock.Node() 616 nodes = append(nodes, node) 617 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 618 } 619 620 // Generate a fake job with allocations 621 job := mock.SystemJob() 622 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 623 624 var allocs []*structs.Allocation 625 for _, node := range nodes { 626 alloc := mock.Alloc() 627 alloc.Job = job 628 alloc.JobID = job.ID 629 alloc.NodeID = node.ID 630 alloc.Name = "my-job.web[0]" 631 allocs = append(allocs, alloc) 632 } 633 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 634 635 // Update the job 636 job2 := mock.SystemJob() 637 job2.ID = job.ID 638 job2.Update = structs.UpdateStrategy{ 639 Stagger: 30 * time.Second, 640 MaxParallel: 5, 641 } 642 643 // Update the task, such that it cannot be done in-place 644 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 645 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 646 647 // Create a mock evaluation to deal with drain 648 eval := &structs.Evaluation{ 649 Namespace: structs.DefaultNamespace, 650 ID: uuid.Generate(), 651 Priority: 50, 652 TriggeredBy: structs.EvalTriggerJobRegister, 653 JobID: job.ID, 654 Status: structs.EvalStatusPending, 655 } 656 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 657 // Process the evaluation 658 err := h.Process(NewSystemScheduler, eval) 659 if err != nil { 660 t.Fatalf("err: %v", err) 661 } 662 663 // Ensure a single plan 664 if len(h.Plans) != 1 { 665 t.Fatalf("bad: %#v", h.Plans) 666 } 667 plan := h.Plans[0] 668 669 // Ensure the plan evicted only MaxParallel 670 var update []*structs.Allocation 671 for _, updateList := range plan.NodeUpdate { 672 update = append(update, updateList...) 673 } 674 if len(update) != job2.Update.MaxParallel { 675 t.Fatalf("bad: %#v", plan) 676 } 677 678 // Ensure the plan allocated 679 var planned []*structs.Allocation 680 for _, allocList := range plan.NodeAllocation { 681 planned = append(planned, allocList...) 682 } 683 if len(planned) != job2.Update.MaxParallel { 684 t.Fatalf("bad: %#v", plan) 685 } 686 687 h.AssertEvalStatus(t, structs.EvalStatusComplete) 688 689 // Ensure a follow up eval was created 690 eval = h.Evals[0] 691 if eval.NextEval == "" { 692 t.Fatalf("missing next eval") 693 } 694 695 // Check for create 696 if len(h.CreateEvals) == 0 { 697 t.Fatalf("missing created eval") 698 } 699 create := h.CreateEvals[0] 700 if eval.NextEval != create.ID { 701 t.Fatalf("ID mismatch") 702 } 703 if create.PreviousEval != eval.ID { 704 t.Fatalf("missing previous eval") 705 } 706 707 if create.TriggeredBy != structs.EvalTriggerRollingUpdate { 708 t.Fatalf("bad: %#v", create) 709 } 710 } 711 712 func TestSystemSched_JobModify_InPlace(t *testing.T) { 713 h := NewHarness(t) 714 715 // Create some nodes 716 var nodes []*structs.Node 717 for i := 0; i < 10; i++ { 718 node := mock.Node() 719 nodes = append(nodes, node) 720 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 721 } 722 723 // Generate a fake job with allocations 724 job := mock.SystemJob() 725 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 726 727 var allocs []*structs.Allocation 728 for _, node := range nodes { 729 alloc := mock.Alloc() 730 alloc.Job = job 731 alloc.JobID = job.ID 732 alloc.NodeID = node.ID 733 alloc.Name = "my-job.web[0]" 734 allocs = append(allocs, alloc) 735 } 736 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 737 738 // Update the job 739 job2 := mock.SystemJob() 740 job2.ID = job.ID 741 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 742 743 // Create a mock evaluation to deal with drain 744 eval := &structs.Evaluation{ 745 Namespace: structs.DefaultNamespace, 746 ID: uuid.Generate(), 747 Priority: 50, 748 TriggeredBy: structs.EvalTriggerJobRegister, 749 JobID: job.ID, 750 Status: structs.EvalStatusPending, 751 } 752 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 753 754 // Process the evaluation 755 err := h.Process(NewSystemScheduler, eval) 756 if err != nil { 757 t.Fatalf("err: %v", err) 758 } 759 760 // Ensure a single plan 761 if len(h.Plans) != 1 { 762 t.Fatalf("bad: %#v", h.Plans) 763 } 764 plan := h.Plans[0] 765 766 // Ensure the plan did not evict any allocs 767 var update []*structs.Allocation 768 for _, updateList := range plan.NodeUpdate { 769 update = append(update, updateList...) 770 } 771 if len(update) != 0 { 772 t.Fatalf("bad: %#v", plan) 773 } 774 775 // Ensure the plan updated the existing allocs 776 var planned []*structs.Allocation 777 for _, allocList := range plan.NodeAllocation { 778 planned = append(planned, allocList...) 779 } 780 if len(planned) != 10 { 781 t.Fatalf("bad: %#v", plan) 782 } 783 for _, p := range planned { 784 if p.Job != job2 { 785 t.Fatalf("should update job") 786 } 787 } 788 789 // Lookup the allocations by JobID 790 ws := memdb.NewWatchSet() 791 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 792 noErr(t, err) 793 794 // Ensure all allocations placed 795 if len(out) != 10 { 796 t.Fatalf("bad: %#v", out) 797 } 798 h.AssertEvalStatus(t, structs.EvalStatusComplete) 799 800 // Verify the network did not change 801 rp := structs.Port{Label: "admin", Value: 5000} 802 for _, alloc := range out { 803 for _, resources := range alloc.TaskResources { 804 if resources.Networks[0].ReservedPorts[0] != rp { 805 t.Fatalf("bad: %#v", alloc) 806 } 807 } 808 } 809 } 810 811 func TestSystemSched_JobDeregister_Purged(t *testing.T) { 812 h := NewHarness(t) 813 814 // Create some nodes 815 var nodes []*structs.Node 816 for i := 0; i < 10; i++ { 817 node := mock.Node() 818 nodes = append(nodes, node) 819 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 820 } 821 822 // Generate a fake job with allocations 823 job := mock.SystemJob() 824 825 var allocs []*structs.Allocation 826 for _, node := range nodes { 827 alloc := mock.Alloc() 828 alloc.Job = job 829 alloc.JobID = job.ID 830 alloc.NodeID = node.ID 831 alloc.Name = "my-job.web[0]" 832 allocs = append(allocs, alloc) 833 } 834 for _, alloc := range allocs { 835 noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) 836 } 837 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 838 839 // Create a mock evaluation to deregister the job 840 eval := &structs.Evaluation{ 841 Namespace: structs.DefaultNamespace, 842 ID: uuid.Generate(), 843 Priority: 50, 844 TriggeredBy: structs.EvalTriggerJobDeregister, 845 JobID: job.ID, 846 Status: structs.EvalStatusPending, 847 } 848 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 849 850 // Process the evaluation 851 err := h.Process(NewSystemScheduler, eval) 852 if err != nil { 853 t.Fatalf("err: %v", err) 854 } 855 856 // Ensure a single plan 857 if len(h.Plans) != 1 { 858 t.Fatalf("bad: %#v", h.Plans) 859 } 860 plan := h.Plans[0] 861 862 // Ensure the plan evicted the job from all nodes. 863 for _, node := range nodes { 864 if len(plan.NodeUpdate[node.ID]) != 1 { 865 t.Fatalf("bad: %#v", plan) 866 } 867 } 868 869 // Lookup the allocations by JobID 870 ws := memdb.NewWatchSet() 871 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 872 noErr(t, err) 873 874 // Ensure no remaining allocations 875 out, _ = structs.FilterTerminalAllocs(out) 876 if len(out) != 0 { 877 t.Fatalf("bad: %#v", out) 878 } 879 880 h.AssertEvalStatus(t, structs.EvalStatusComplete) 881 } 882 883 func TestSystemSched_JobDeregister_Stopped(t *testing.T) { 884 h := NewHarness(t) 885 886 // Create some nodes 887 var nodes []*structs.Node 888 for i := 0; i < 10; i++ { 889 node := mock.Node() 890 nodes = append(nodes, node) 891 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 892 } 893 894 // Generate a fake job with allocations 895 job := mock.SystemJob() 896 job.Stop = true 897 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 898 899 var allocs []*structs.Allocation 900 for _, node := range nodes { 901 alloc := mock.Alloc() 902 alloc.Job = job 903 alloc.JobID = job.ID 904 alloc.NodeID = node.ID 905 alloc.Name = "my-job.web[0]" 906 allocs = append(allocs, alloc) 907 } 908 for _, alloc := range allocs { 909 noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID))) 910 } 911 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 912 913 // Create a mock evaluation to deregister the job 914 eval := &structs.Evaluation{ 915 Namespace: structs.DefaultNamespace, 916 ID: uuid.Generate(), 917 Priority: 50, 918 TriggeredBy: structs.EvalTriggerJobDeregister, 919 JobID: job.ID, 920 Status: structs.EvalStatusPending, 921 } 922 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 923 924 // Process the evaluation 925 err := h.Process(NewSystemScheduler, eval) 926 if err != nil { 927 t.Fatalf("err: %v", err) 928 } 929 930 // Ensure a single plan 931 if len(h.Plans) != 1 { 932 t.Fatalf("bad: %#v", h.Plans) 933 } 934 plan := h.Plans[0] 935 936 // Ensure the plan evicted the job from all nodes. 937 for _, node := range nodes { 938 if len(plan.NodeUpdate[node.ID]) != 1 { 939 t.Fatalf("bad: %#v", plan) 940 } 941 } 942 943 // Lookup the allocations by JobID 944 ws := memdb.NewWatchSet() 945 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 946 noErr(t, err) 947 948 // Ensure no remaining allocations 949 out, _ = structs.FilterTerminalAllocs(out) 950 if len(out) != 0 { 951 t.Fatalf("bad: %#v", out) 952 } 953 954 h.AssertEvalStatus(t, structs.EvalStatusComplete) 955 } 956 957 func TestSystemSched_NodeDown(t *testing.T) { 958 h := NewHarness(t) 959 960 // Register a down node 961 node := mock.Node() 962 node.Status = structs.NodeStatusDown 963 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 964 965 // Generate a fake job allocated on that node. 966 job := mock.SystemJob() 967 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 968 969 alloc := mock.Alloc() 970 alloc.Job = job 971 alloc.JobID = job.ID 972 alloc.NodeID = node.ID 973 alloc.Name = "my-job.web[0]" 974 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 975 976 // Create a mock evaluation to deal with drain 977 eval := &structs.Evaluation{ 978 Namespace: structs.DefaultNamespace, 979 ID: uuid.Generate(), 980 Priority: 50, 981 TriggeredBy: structs.EvalTriggerNodeUpdate, 982 JobID: job.ID, 983 NodeID: node.ID, 984 Status: structs.EvalStatusPending, 985 } 986 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 987 988 // Process the evaluation 989 err := h.Process(NewSystemScheduler, eval) 990 if err != nil { 991 t.Fatalf("err: %v", err) 992 } 993 994 // Ensure a single plan 995 if len(h.Plans) != 1 { 996 t.Fatalf("bad: %#v", h.Plans) 997 } 998 plan := h.Plans[0] 999 1000 // Ensure the plan evicted all allocs 1001 if len(plan.NodeUpdate[node.ID]) != 1 { 1002 t.Fatalf("bad: %#v", plan) 1003 } 1004 1005 // Ensure the plan updated the allocation. 1006 var planned []*structs.Allocation 1007 for _, allocList := range plan.NodeUpdate { 1008 planned = append(planned, allocList...) 1009 } 1010 if len(planned) != 1 { 1011 t.Fatalf("bad: %#v", plan) 1012 } 1013 1014 // Ensure the allocations is stopped 1015 if p := planned[0]; p.DesiredStatus != structs.AllocDesiredStatusStop && 1016 p.ClientStatus != structs.AllocClientStatusLost { 1017 t.Fatalf("bad: %#v", planned[0]) 1018 } 1019 1020 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1021 } 1022 1023 func TestSystemSched_NodeDrain_Down(t *testing.T) { 1024 h := NewHarness(t) 1025 1026 // Register a draining node 1027 node := mock.Node() 1028 node.Drain = true 1029 node.Status = structs.NodeStatusDown 1030 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1031 1032 // Generate a fake job allocated on that node. 1033 job := mock.SystemJob() 1034 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1035 1036 alloc := mock.Alloc() 1037 alloc.Job = job 1038 alloc.JobID = job.ID 1039 alloc.NodeID = node.ID 1040 alloc.Name = "my-job.web[0]" 1041 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1042 1043 // Create a mock evaluation to deal with the node update 1044 eval := &structs.Evaluation{ 1045 Namespace: structs.DefaultNamespace, 1046 ID: uuid.Generate(), 1047 Priority: 50, 1048 TriggeredBy: structs.EvalTriggerNodeUpdate, 1049 JobID: job.ID, 1050 NodeID: node.ID, 1051 Status: structs.EvalStatusPending, 1052 } 1053 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1054 1055 // Process the evaluation 1056 err := h.Process(NewServiceScheduler, eval) 1057 if err != nil { 1058 t.Fatalf("err: %v", err) 1059 } 1060 1061 // Ensure a single plan 1062 if len(h.Plans) != 1 { 1063 t.Fatalf("bad: %#v", h.Plans) 1064 } 1065 plan := h.Plans[0] 1066 1067 // Ensure the plan evicted non terminal allocs 1068 if len(plan.NodeUpdate[node.ID]) != 1 { 1069 t.Fatalf("bad: %#v", plan) 1070 } 1071 1072 // Ensure that the allocation is marked as lost 1073 var lostAllocs []string 1074 for _, alloc := range plan.NodeUpdate[node.ID] { 1075 lostAllocs = append(lostAllocs, alloc.ID) 1076 } 1077 expected := []string{alloc.ID} 1078 1079 if !reflect.DeepEqual(lostAllocs, expected) { 1080 t.Fatalf("expected: %v, actual: %v", expected, lostAllocs) 1081 } 1082 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1083 } 1084 1085 func TestSystemSched_NodeDrain(t *testing.T) { 1086 h := NewHarness(t) 1087 1088 // Register a draining node 1089 node := mock.Node() 1090 node.Drain = true 1091 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1092 1093 // Generate a fake job allocated on that node. 1094 job := mock.SystemJob() 1095 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1096 1097 alloc := mock.Alloc() 1098 alloc.Job = job 1099 alloc.JobID = job.ID 1100 alloc.NodeID = node.ID 1101 alloc.Name = "my-job.web[0]" 1102 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1103 1104 // Create a mock evaluation to deal with drain 1105 eval := &structs.Evaluation{ 1106 Namespace: structs.DefaultNamespace, 1107 ID: uuid.Generate(), 1108 Priority: 50, 1109 TriggeredBy: structs.EvalTriggerNodeUpdate, 1110 JobID: job.ID, 1111 NodeID: node.ID, 1112 Status: structs.EvalStatusPending, 1113 } 1114 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1115 1116 // Process the evaluation 1117 err := h.Process(NewSystemScheduler, eval) 1118 if err != nil { 1119 t.Fatalf("err: %v", err) 1120 } 1121 1122 // Ensure a single plan 1123 if len(h.Plans) != 1 { 1124 t.Fatalf("bad: %#v", h.Plans) 1125 } 1126 plan := h.Plans[0] 1127 1128 // Ensure the plan evicted all allocs 1129 if len(plan.NodeUpdate[node.ID]) != 1 { 1130 t.Fatalf("bad: %#v", plan) 1131 } 1132 1133 // Ensure the plan updated the allocation. 1134 var planned []*structs.Allocation 1135 for _, allocList := range plan.NodeUpdate { 1136 planned = append(planned, allocList...) 1137 } 1138 if len(planned) != 1 { 1139 t.Log(len(planned)) 1140 t.Fatalf("bad: %#v", plan) 1141 } 1142 1143 // Ensure the allocations is stopped 1144 if planned[0].DesiredStatus != structs.AllocDesiredStatusStop { 1145 t.Fatalf("bad: %#v", planned[0]) 1146 } 1147 1148 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1149 } 1150 1151 func TestSystemSched_NodeUpdate(t *testing.T) { 1152 h := NewHarness(t) 1153 1154 // Register a node 1155 node := mock.Node() 1156 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1157 1158 // Generate a fake job allocated on that node. 1159 job := mock.SystemJob() 1160 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1161 1162 alloc := mock.Alloc() 1163 alloc.Job = job 1164 alloc.JobID = job.ID 1165 alloc.NodeID = node.ID 1166 alloc.Name = "my-job.web[0]" 1167 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1168 1169 // Create a mock evaluation to deal 1170 eval := &structs.Evaluation{ 1171 Namespace: structs.DefaultNamespace, 1172 ID: uuid.Generate(), 1173 Priority: 50, 1174 TriggeredBy: structs.EvalTriggerNodeUpdate, 1175 JobID: job.ID, 1176 NodeID: node.ID, 1177 Status: structs.EvalStatusPending, 1178 } 1179 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1180 1181 // Process the evaluation 1182 err := h.Process(NewSystemScheduler, eval) 1183 if err != nil { 1184 t.Fatalf("err: %v", err) 1185 } 1186 1187 // Ensure that queued allocations is zero 1188 if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { 1189 t.Fatalf("bad queued allocations: %#v", h.Evals[0].QueuedAllocations) 1190 } 1191 1192 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1193 } 1194 1195 func TestSystemSched_RetryLimit(t *testing.T) { 1196 h := NewHarness(t) 1197 h.Planner = &RejectPlan{h} 1198 1199 // Create some nodes 1200 for i := 0; i < 10; i++ { 1201 node := mock.Node() 1202 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1203 } 1204 1205 // Create a job 1206 job := mock.SystemJob() 1207 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1208 1209 // Create a mock evaluation to deregister the job 1210 eval := &structs.Evaluation{ 1211 Namespace: structs.DefaultNamespace, 1212 ID: uuid.Generate(), 1213 Priority: job.Priority, 1214 TriggeredBy: structs.EvalTriggerJobRegister, 1215 JobID: job.ID, 1216 Status: structs.EvalStatusPending, 1217 } 1218 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1219 1220 // Process the evaluation 1221 err := h.Process(NewSystemScheduler, eval) 1222 if err != nil { 1223 t.Fatalf("err: %v", err) 1224 } 1225 1226 // Ensure multiple plans 1227 if len(h.Plans) == 0 { 1228 t.Fatalf("bad: %#v", h.Plans) 1229 } 1230 1231 // Lookup the allocations by JobID 1232 ws := memdb.NewWatchSet() 1233 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1234 noErr(t, err) 1235 1236 // Ensure no allocations placed 1237 if len(out) != 0 { 1238 t.Fatalf("bad: %#v", out) 1239 } 1240 1241 // Should hit the retry limit 1242 h.AssertEvalStatus(t, structs.EvalStatusFailed) 1243 } 1244 1245 // This test ensures that the scheduler doesn't increment the queued allocation 1246 // count for a task group when allocations can't be created on currently 1247 // available nodes because of constrain mismatches. 1248 func TestSystemSched_Queued_With_Constraints(t *testing.T) { 1249 h := NewHarness(t) 1250 1251 // Register a node 1252 node := mock.Node() 1253 node.Attributes["kernel.name"] = "darwin" 1254 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1255 1256 // Generate a system job which can't be placed on the node 1257 job := mock.SystemJob() 1258 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1259 1260 // Create a mock evaluation to deal 1261 eval := &structs.Evaluation{ 1262 Namespace: structs.DefaultNamespace, 1263 ID: uuid.Generate(), 1264 Priority: 50, 1265 TriggeredBy: structs.EvalTriggerNodeUpdate, 1266 JobID: job.ID, 1267 NodeID: node.ID, 1268 Status: structs.EvalStatusPending, 1269 } 1270 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1271 1272 // Process the evaluation 1273 err := h.Process(NewSystemScheduler, eval) 1274 if err != nil { 1275 t.Fatalf("err: %v", err) 1276 } 1277 1278 // Ensure that queued allocations is zero 1279 if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { 1280 t.Fatalf("bad queued allocations: %#v", h.Evals[0].QueuedAllocations) 1281 } 1282 } 1283 1284 func TestSystemSched_ChainedAlloc(t *testing.T) { 1285 h := NewHarness(t) 1286 1287 // Create some nodes 1288 for i := 0; i < 10; i++ { 1289 node := mock.Node() 1290 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1291 } 1292 1293 // Create a job 1294 job := mock.SystemJob() 1295 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1296 1297 // Create a mock evaluation to register the job 1298 eval := &structs.Evaluation{ 1299 Namespace: structs.DefaultNamespace, 1300 ID: uuid.Generate(), 1301 Priority: job.Priority, 1302 TriggeredBy: structs.EvalTriggerJobRegister, 1303 JobID: job.ID, 1304 Status: structs.EvalStatusPending, 1305 } 1306 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1307 // Process the evaluation 1308 if err := h.Process(NewSystemScheduler, eval); err != nil { 1309 t.Fatalf("err: %v", err) 1310 } 1311 1312 var allocIDs []string 1313 for _, allocList := range h.Plans[0].NodeAllocation { 1314 for _, alloc := range allocList { 1315 allocIDs = append(allocIDs, alloc.ID) 1316 } 1317 } 1318 sort.Strings(allocIDs) 1319 1320 // Create a new harness to invoke the scheduler again 1321 h1 := NewHarnessWithState(t, h.State) 1322 job1 := mock.SystemJob() 1323 job1.ID = job.ID 1324 job1.TaskGroups[0].Tasks[0].Env = make(map[string]string) 1325 job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" 1326 noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) 1327 1328 // Insert two more nodes 1329 for i := 0; i < 2; i++ { 1330 node := mock.Node() 1331 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1332 } 1333 1334 // Create a mock evaluation to update the job 1335 eval1 := &structs.Evaluation{ 1336 Namespace: structs.DefaultNamespace, 1337 ID: uuid.Generate(), 1338 Priority: job1.Priority, 1339 TriggeredBy: structs.EvalTriggerJobRegister, 1340 JobID: job1.ID, 1341 Status: structs.EvalStatusPending, 1342 } 1343 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) 1344 // Process the evaluation 1345 if err := h1.Process(NewSystemScheduler, eval1); err != nil { 1346 t.Fatalf("err: %v", err) 1347 } 1348 1349 plan := h1.Plans[0] 1350 1351 // Collect all the chained allocation ids and the new allocations which 1352 // don't have any chained allocations 1353 var prevAllocs []string 1354 var newAllocs []string 1355 for _, allocList := range plan.NodeAllocation { 1356 for _, alloc := range allocList { 1357 if alloc.PreviousAllocation == "" { 1358 newAllocs = append(newAllocs, alloc.ID) 1359 continue 1360 } 1361 prevAllocs = append(prevAllocs, alloc.PreviousAllocation) 1362 } 1363 } 1364 sort.Strings(prevAllocs) 1365 1366 // Ensure that the new allocations has their corresponging original 1367 // allocation ids 1368 if !reflect.DeepEqual(prevAllocs, allocIDs) { 1369 t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs)) 1370 } 1371 1372 // Ensuring two new allocations don't have any chained allocations 1373 if len(newAllocs) != 2 { 1374 t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs)) 1375 } 1376 } 1377 1378 func TestSystemSched_PlanWithDrainedNode(t *testing.T) { 1379 h := NewHarness(t) 1380 1381 // Register two nodes with two different classes 1382 node := mock.Node() 1383 node.NodeClass = "green" 1384 node.Drain = true 1385 node.ComputeClass() 1386 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1387 1388 node2 := mock.Node() 1389 node2.NodeClass = "blue" 1390 node2.ComputeClass() 1391 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 1392 1393 // Create a Job with two task groups, each constrianed on node class 1394 job := mock.SystemJob() 1395 tg1 := job.TaskGroups[0] 1396 tg1.Constraints = append(tg1.Constraints, 1397 &structs.Constraint{ 1398 LTarget: "${node.class}", 1399 RTarget: "green", 1400 Operand: "==", 1401 }) 1402 1403 tg2 := tg1.Copy() 1404 tg2.Name = "web2" 1405 tg2.Constraints[0].RTarget = "blue" 1406 job.TaskGroups = append(job.TaskGroups, tg2) 1407 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1408 1409 // Create an allocation on each node 1410 alloc := mock.Alloc() 1411 alloc.Job = job 1412 alloc.JobID = job.ID 1413 alloc.NodeID = node.ID 1414 alloc.Name = "my-job.web[0]" 1415 alloc.TaskGroup = "web" 1416 1417 alloc2 := mock.Alloc() 1418 alloc2.Job = job 1419 alloc2.JobID = job.ID 1420 alloc2.NodeID = node2.ID 1421 alloc2.Name = "my-job.web2[0]" 1422 alloc2.TaskGroup = "web2" 1423 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2})) 1424 1425 // Create a mock evaluation to deal with drain 1426 eval := &structs.Evaluation{ 1427 Namespace: structs.DefaultNamespace, 1428 ID: uuid.Generate(), 1429 Priority: 50, 1430 TriggeredBy: structs.EvalTriggerNodeUpdate, 1431 JobID: job.ID, 1432 NodeID: node.ID, 1433 Status: structs.EvalStatusPending, 1434 } 1435 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1436 1437 // Process the evaluation 1438 err := h.Process(NewSystemScheduler, eval) 1439 if err != nil { 1440 t.Fatalf("err: %v", err) 1441 } 1442 1443 // Ensure a single plan 1444 if len(h.Plans) != 1 { 1445 t.Fatalf("bad: %#v", h.Plans) 1446 } 1447 plan := h.Plans[0] 1448 1449 // Ensure the plan evicted the alloc on the failed node 1450 planned := plan.NodeUpdate[node.ID] 1451 if len(planned) != 1 { 1452 t.Fatalf("bad: %#v", plan) 1453 } 1454 1455 // Ensure the plan didn't place 1456 if len(plan.NodeAllocation) != 0 { 1457 t.Fatalf("bad: %#v", plan) 1458 } 1459 1460 // Ensure the allocations is stopped 1461 if planned[0].DesiredStatus != structs.AllocDesiredStatusStop { 1462 t.Fatalf("bad: %#v", planned[0]) 1463 } 1464 1465 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1466 } 1467 1468 func TestSystemSched_QueuedAllocsMultTG(t *testing.T) { 1469 h := NewHarness(t) 1470 1471 // Register two nodes with two different classes 1472 node := mock.Node() 1473 node.NodeClass = "green" 1474 node.ComputeClass() 1475 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1476 1477 node2 := mock.Node() 1478 node2.NodeClass = "blue" 1479 node2.ComputeClass() 1480 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 1481 1482 // Create a Job with two task groups, each constrianed on node class 1483 job := mock.SystemJob() 1484 tg1 := job.TaskGroups[0] 1485 tg1.Constraints = append(tg1.Constraints, 1486 &structs.Constraint{ 1487 LTarget: "${node.class}", 1488 RTarget: "green", 1489 Operand: "==", 1490 }) 1491 1492 tg2 := tg1.Copy() 1493 tg2.Name = "web2" 1494 tg2.Constraints[0].RTarget = "blue" 1495 job.TaskGroups = append(job.TaskGroups, tg2) 1496 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1497 1498 // Create a mock evaluation to deal with drain 1499 eval := &structs.Evaluation{ 1500 Namespace: structs.DefaultNamespace, 1501 ID: uuid.Generate(), 1502 Priority: 50, 1503 TriggeredBy: structs.EvalTriggerNodeUpdate, 1504 JobID: job.ID, 1505 NodeID: node.ID, 1506 Status: structs.EvalStatusPending, 1507 } 1508 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1509 1510 // Process the evaluation 1511 err := h.Process(NewSystemScheduler, eval) 1512 if err != nil { 1513 t.Fatalf("err: %v", err) 1514 } 1515 1516 // Ensure a single plan 1517 if len(h.Plans) != 1 { 1518 t.Fatalf("bad: %#v", h.Plans) 1519 } 1520 1521 qa := h.Evals[0].QueuedAllocations 1522 if qa["web"] != 0 || qa["web2"] != 0 { 1523 t.Fatalf("bad queued allocations %#v", qa) 1524 } 1525 1526 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1527 }