github.com/hspak/nomad@v0.7.2-0.20180309000617-bc4ae22a39a5/scheduler/generic_sched_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "reflect" 6 "sort" 7 "testing" 8 "time" 9 10 memdb "github.com/hashicorp/go-memdb" 11 "github.com/hashicorp/nomad/helper" 12 "github.com/hashicorp/nomad/helper/uuid" 13 "github.com/hashicorp/nomad/nomad/mock" 14 "github.com/hashicorp/nomad/nomad/structs" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 ) 18 19 func TestServiceSched_JobRegister(t *testing.T) { 20 h := NewHarness(t) 21 22 // Create some nodes 23 for i := 0; i < 10; i++ { 24 node := mock.Node() 25 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 26 } 27 28 // Create a job 29 job := mock.Job() 30 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 31 32 // Create a mock evaluation to register the job 33 eval := &structs.Evaluation{ 34 Namespace: structs.DefaultNamespace, 35 ID: uuid.Generate(), 36 Priority: job.Priority, 37 TriggeredBy: structs.EvalTriggerJobRegister, 38 JobID: job.ID, 39 Status: structs.EvalStatusPending, 40 } 41 42 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 43 44 // Process the evaluation 45 err := h.Process(NewServiceScheduler, eval) 46 if err != nil { 47 t.Fatalf("err: %v", err) 48 } 49 50 // Ensure a single plan 51 if len(h.Plans) != 1 { 52 t.Fatalf("bad: %#v", h.Plans) 53 } 54 plan := h.Plans[0] 55 56 // Ensure the plan doesn't have annotations. 57 if plan.Annotations != nil { 58 t.Fatalf("expected no annotations") 59 } 60 61 // Ensure the eval has no spawned blocked eval 62 if len(h.CreateEvals) != 0 { 63 t.Fatalf("bad: %#v", h.CreateEvals) 64 if h.Evals[0].BlockedEval != "" { 65 t.Fatalf("bad: %#v", h.Evals[0]) 66 } 67 } 68 69 // Ensure the plan allocated 70 var planned []*structs.Allocation 71 for _, allocList := range plan.NodeAllocation { 72 planned = append(planned, allocList...) 73 } 74 if len(planned) != 10 { 75 t.Fatalf("bad: %#v", plan) 76 } 77 78 // Lookup the allocations by JobID 79 ws := memdb.NewWatchSet() 80 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 81 noErr(t, err) 82 83 // Ensure all allocations placed 84 if len(out) != 10 { 85 t.Fatalf("bad: %#v", out) 86 } 87 88 // Ensure different ports were used. 89 used := make(map[int]struct{}) 90 for _, alloc := range out { 91 for _, resource := range alloc.TaskResources { 92 for _, port := range resource.Networks[0].DynamicPorts { 93 if _, ok := used[port.Value]; ok { 94 t.Fatalf("Port collision %v", port.Value) 95 } 96 used[port.Value] = struct{}{} 97 } 98 } 99 } 100 101 h.AssertEvalStatus(t, structs.EvalStatusComplete) 102 } 103 104 func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { 105 h := NewHarness(t) 106 107 // Create some nodes 108 for i := 0; i < 10; i++ { 109 node := mock.Node() 110 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 111 } 112 113 // Create a job 114 job := mock.Job() 115 job.TaskGroups[0].EphemeralDisk.Sticky = true 116 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 117 118 // Create a mock evaluation to register the job 119 eval := &structs.Evaluation{ 120 Namespace: structs.DefaultNamespace, 121 ID: uuid.Generate(), 122 Priority: job.Priority, 123 TriggeredBy: structs.EvalTriggerJobRegister, 124 JobID: job.ID, 125 Status: structs.EvalStatusPending, 126 } 127 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 128 129 // Process the evaluation 130 if err := h.Process(NewServiceScheduler, eval); err != nil { 131 t.Fatalf("err: %v", err) 132 } 133 134 // Ensure the plan allocated 135 plan := h.Plans[0] 136 planned := make(map[string]*structs.Allocation) 137 for _, allocList := range plan.NodeAllocation { 138 for _, alloc := range allocList { 139 planned[alloc.ID] = alloc 140 } 141 } 142 if len(planned) != 10 { 143 t.Fatalf("bad: %#v", plan) 144 } 145 146 // Update the job to force a rolling upgrade 147 updated := job.Copy() 148 updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 149 noErr(t, h.State.UpsertJob(h.NextIndex(), updated)) 150 151 // Create a mock evaluation to handle the update 152 eval = &structs.Evaluation{ 153 Namespace: structs.DefaultNamespace, 154 ID: uuid.Generate(), 155 Priority: job.Priority, 156 TriggeredBy: structs.EvalTriggerNodeUpdate, 157 JobID: job.ID, 158 Status: structs.EvalStatusPending, 159 } 160 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 161 h1 := NewHarnessWithState(t, h.State) 162 if err := h1.Process(NewServiceScheduler, eval); err != nil { 163 t.Fatalf("err: %v", err) 164 } 165 166 // Ensure we have created only one new allocation 167 // Ensure a single plan 168 if len(h1.Plans) != 1 { 169 t.Fatalf("bad: %#v", h1.Plans) 170 } 171 plan = h1.Plans[0] 172 var newPlanned []*structs.Allocation 173 for _, allocList := range plan.NodeAllocation { 174 newPlanned = append(newPlanned, allocList...) 175 } 176 if len(newPlanned) != 10 { 177 t.Fatalf("bad plan: %#v", plan) 178 } 179 // Ensure that the new allocations were placed on the same node as the older 180 // ones 181 for _, new := range newPlanned { 182 if new.PreviousAllocation == "" { 183 t.Fatalf("new alloc %q doesn't have a previous allocation", new.ID) 184 } 185 186 old, ok := planned[new.PreviousAllocation] 187 if !ok { 188 t.Fatalf("new alloc %q previous allocation doesn't match any prior placed alloc (%q)", new.ID, new.PreviousAllocation) 189 } 190 if new.NodeID != old.NodeID { 191 t.Fatalf("new alloc and old alloc node doesn't match; got %q; want %q", new.NodeID, old.NodeID) 192 } 193 } 194 } 195 196 func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { 197 h := NewHarness(t) 198 199 // Create a node 200 node := mock.Node() 201 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 202 203 // Create a job with count 2 and disk as 60GB so that only one allocation 204 // can fit 205 job := mock.Job() 206 job.TaskGroups[0].Count = 2 207 job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024 208 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 209 210 // Create a mock evaluation to register the job 211 eval := &structs.Evaluation{ 212 Namespace: structs.DefaultNamespace, 213 ID: uuid.Generate(), 214 Priority: job.Priority, 215 TriggeredBy: structs.EvalTriggerJobRegister, 216 JobID: job.ID, 217 Status: structs.EvalStatusPending, 218 } 219 220 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 221 222 // Process the evaluation 223 err := h.Process(NewServiceScheduler, eval) 224 if err != nil { 225 t.Fatalf("err: %v", err) 226 } 227 228 // Ensure a single plan 229 if len(h.Plans) != 1 { 230 t.Fatalf("bad: %#v", h.Plans) 231 } 232 plan := h.Plans[0] 233 234 // Ensure the plan doesn't have annotations. 235 if plan.Annotations != nil { 236 t.Fatalf("expected no annotations") 237 } 238 239 // Ensure the eval has a blocked eval 240 if len(h.CreateEvals) != 1 { 241 t.Fatalf("bad: %#v", h.CreateEvals) 242 } 243 244 // Ensure the plan allocated only one allocation 245 var planned []*structs.Allocation 246 for _, allocList := range plan.NodeAllocation { 247 planned = append(planned, allocList...) 248 } 249 if len(planned) != 1 { 250 t.Fatalf("bad: %#v", plan) 251 } 252 253 // Lookup the allocations by JobID 254 ws := memdb.NewWatchSet() 255 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 256 noErr(t, err) 257 258 // Ensure only one allocation was placed 259 if len(out) != 1 { 260 t.Fatalf("bad: %#v", out) 261 } 262 263 h.AssertEvalStatus(t, structs.EvalStatusComplete) 264 } 265 266 func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { 267 h := NewHarness(t) 268 269 // Create some nodes 270 for i := 0; i < 10; i++ { 271 node := mock.Node() 272 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 273 } 274 275 // Create a job that uses distinct host and has count 1 higher than what is 276 // possible. 277 job := mock.Job() 278 job.TaskGroups[0].Count = 11 279 job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts}) 280 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 281 282 // Create a mock evaluation to register the job 283 eval := &structs.Evaluation{ 284 Namespace: structs.DefaultNamespace, 285 ID: uuid.Generate(), 286 Priority: job.Priority, 287 TriggeredBy: structs.EvalTriggerJobRegister, 288 JobID: job.ID, 289 Status: structs.EvalStatusPending, 290 } 291 292 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 293 294 // Process the evaluation 295 err := h.Process(NewServiceScheduler, eval) 296 if err != nil { 297 t.Fatalf("err: %v", err) 298 } 299 300 // Ensure a single plan 301 if len(h.Plans) != 1 { 302 t.Fatalf("bad: %#v", h.Plans) 303 } 304 plan := h.Plans[0] 305 306 // Ensure the eval has spawned blocked eval 307 if len(h.CreateEvals) != 1 { 308 t.Fatalf("bad: %#v", h.CreateEvals) 309 } 310 311 // Ensure the plan failed to alloc 312 outEval := h.Evals[0] 313 if len(outEval.FailedTGAllocs) != 1 { 314 t.Fatalf("bad: %+v", outEval) 315 } 316 317 // Ensure the plan allocated 318 var planned []*structs.Allocation 319 for _, allocList := range plan.NodeAllocation { 320 planned = append(planned, allocList...) 321 } 322 if len(planned) != 10 { 323 t.Fatalf("bad: %#v", plan) 324 } 325 326 // Lookup the allocations by JobID 327 ws := memdb.NewWatchSet() 328 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 329 noErr(t, err) 330 331 // Ensure all allocations placed 332 if len(out) != 10 { 333 t.Fatalf("bad: %#v", out) 334 } 335 336 // Ensure different node was used per. 337 used := make(map[string]struct{}) 338 for _, alloc := range out { 339 if _, ok := used[alloc.NodeID]; ok { 340 t.Fatalf("Node collision %v", alloc.NodeID) 341 } 342 used[alloc.NodeID] = struct{}{} 343 } 344 345 h.AssertEvalStatus(t, structs.EvalStatusComplete) 346 } 347 348 func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { 349 h := NewHarness(t) 350 351 // Create some nodes 352 for i := 0; i < 10; i++ { 353 node := mock.Node() 354 rack := "rack2" 355 if i < 5 { 356 rack = "rack1" 357 } 358 node.Meta["rack"] = rack 359 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 360 } 361 362 // Create a job that uses distinct property and has count higher than what is 363 // possible. 364 job := mock.Job() 365 job.TaskGroups[0].Count = 8 366 job.Constraints = append(job.Constraints, 367 &structs.Constraint{ 368 Operand: structs.ConstraintDistinctProperty, 369 LTarget: "${meta.rack}", 370 RTarget: "2", 371 }) 372 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 373 374 // Create a mock evaluation to register the job 375 eval := &structs.Evaluation{ 376 Namespace: structs.DefaultNamespace, 377 ID: uuid.Generate(), 378 Priority: job.Priority, 379 TriggeredBy: structs.EvalTriggerJobRegister, 380 JobID: job.ID, 381 Status: structs.EvalStatusPending, 382 } 383 384 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 385 386 // Process the evaluation 387 err := h.Process(NewServiceScheduler, eval) 388 if err != nil { 389 t.Fatalf("err: %v", err) 390 } 391 392 // Ensure a single plan 393 if len(h.Plans) != 1 { 394 t.Fatalf("bad: %#v", h.Plans) 395 } 396 plan := h.Plans[0] 397 398 // Ensure the plan doesn't have annotations. 399 if plan.Annotations != nil { 400 t.Fatalf("expected no annotations") 401 } 402 403 // Ensure the eval has spawned blocked eval 404 if len(h.CreateEvals) != 1 { 405 t.Fatalf("bad: %#v", h.CreateEvals) 406 } 407 408 // Ensure the plan failed to alloc 409 outEval := h.Evals[0] 410 if len(outEval.FailedTGAllocs) != 1 { 411 t.Fatalf("bad: %+v", outEval) 412 } 413 414 // Ensure the plan allocated 415 var planned []*structs.Allocation 416 for _, allocList := range plan.NodeAllocation { 417 planned = append(planned, allocList...) 418 } 419 if len(planned) != 4 { 420 t.Fatalf("bad: %#v", plan) 421 } 422 423 // Lookup the allocations by JobID 424 ws := memdb.NewWatchSet() 425 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 426 noErr(t, err) 427 428 // Ensure all allocations placed 429 if len(out) != 4 { 430 t.Fatalf("bad: %#v", out) 431 } 432 433 // Ensure each node was only used twice 434 used := make(map[string]uint64) 435 for _, alloc := range out { 436 if count, _ := used[alloc.NodeID]; count > 2 { 437 t.Fatalf("Node %v used too much: %d", alloc.NodeID, count) 438 } 439 used[alloc.NodeID]++ 440 } 441 442 h.AssertEvalStatus(t, structs.EvalStatusComplete) 443 } 444 445 func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { 446 h := NewHarness(t) 447 448 // Create some nodes 449 for i := 0; i < 2; i++ { 450 node := mock.Node() 451 node.Meta["ssd"] = "true" 452 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 453 } 454 455 // Create a job that uses distinct property only on one task group. 456 job := mock.Job() 457 job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy()) 458 job.TaskGroups[0].Count = 1 459 job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints, 460 &structs.Constraint{ 461 Operand: structs.ConstraintDistinctProperty, 462 LTarget: "${meta.ssd}", 463 }) 464 465 job.TaskGroups[1].Name = "tg2" 466 job.TaskGroups[1].Count = 2 467 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 468 469 // Create a mock evaluation to register the job 470 eval := &structs.Evaluation{ 471 Namespace: structs.DefaultNamespace, 472 ID: uuid.Generate(), 473 Priority: job.Priority, 474 TriggeredBy: structs.EvalTriggerJobRegister, 475 JobID: job.ID, 476 Status: structs.EvalStatusPending, 477 } 478 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 479 480 // Process the evaluation 481 err := h.Process(NewServiceScheduler, eval) 482 if err != nil { 483 t.Fatalf("err: %v", err) 484 } 485 486 // Ensure a single plan 487 if len(h.Plans) != 1 { 488 t.Fatalf("bad: %#v", h.Plans) 489 } 490 plan := h.Plans[0] 491 492 // Ensure the plan doesn't have annotations. 493 if plan.Annotations != nil { 494 t.Fatalf("expected no annotations") 495 } 496 497 // Ensure the eval hasn't spawned blocked eval 498 if len(h.CreateEvals) != 0 { 499 t.Fatalf("bad: %#v", h.CreateEvals[0]) 500 } 501 502 // Ensure the plan allocated 503 var planned []*structs.Allocation 504 for _, allocList := range plan.NodeAllocation { 505 planned = append(planned, allocList...) 506 } 507 if len(planned) != 3 { 508 t.Fatalf("bad: %#v", plan) 509 } 510 511 // Lookup the allocations by JobID 512 ws := memdb.NewWatchSet() 513 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 514 noErr(t, err) 515 516 // Ensure all allocations placed 517 if len(out) != 3 { 518 t.Fatalf("bad: %#v", out) 519 } 520 521 h.AssertEvalStatus(t, structs.EvalStatusComplete) 522 } 523 524 func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) { 525 h := NewHarness(t) 526 assert := assert.New(t) 527 528 // Create a job that uses distinct property over the node-id 529 job := mock.Job() 530 job.TaskGroups[0].Count = 3 531 job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints, 532 &structs.Constraint{ 533 Operand: structs.ConstraintDistinctProperty, 534 LTarget: "${node.unique.id}", 535 }) 536 assert.Nil(h.State.UpsertJob(h.NextIndex(), job), "UpsertJob") 537 538 // Create some nodes 539 var nodes []*structs.Node 540 for i := 0; i < 6; i++ { 541 node := mock.Node() 542 nodes = append(nodes, node) 543 assert.Nil(h.State.UpsertNode(h.NextIndex(), node), "UpsertNode") 544 } 545 546 // Create some allocations 547 var allocs []*structs.Allocation 548 for i := 0; i < 3; i++ { 549 alloc := mock.Alloc() 550 alloc.Job = job 551 alloc.JobID = job.ID 552 alloc.NodeID = nodes[i].ID 553 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 554 allocs = append(allocs, alloc) 555 } 556 assert.Nil(h.State.UpsertAllocs(h.NextIndex(), allocs), "UpsertAllocs") 557 558 // Update the count 559 job2 := job.Copy() 560 job2.TaskGroups[0].Count = 6 561 assert.Nil(h.State.UpsertJob(h.NextIndex(), job2), "UpsertJob") 562 563 // Create a mock evaluation to register the job 564 eval := &structs.Evaluation{ 565 Namespace: structs.DefaultNamespace, 566 ID: uuid.Generate(), 567 Priority: job.Priority, 568 TriggeredBy: structs.EvalTriggerJobRegister, 569 JobID: job.ID, 570 Status: structs.EvalStatusPending, 571 } 572 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 573 574 // Process the evaluation 575 assert.Nil(h.Process(NewServiceScheduler, eval), "Process") 576 577 // Ensure a single plan 578 assert.Len(h.Plans, 1, "Number of plans") 579 plan := h.Plans[0] 580 581 // Ensure the plan doesn't have annotations. 582 assert.Nil(plan.Annotations, "Plan.Annotations") 583 584 // Ensure the eval hasn't spawned blocked eval 585 assert.Len(h.CreateEvals, 0, "Created Evals") 586 587 // Ensure the plan allocated 588 var planned []*structs.Allocation 589 for _, allocList := range plan.NodeAllocation { 590 planned = append(planned, allocList...) 591 } 592 assert.Len(planned, 6, "Planned Allocations") 593 594 // Lookup the allocations by JobID 595 ws := memdb.NewWatchSet() 596 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 597 assert.Nil(err, "AllocsByJob") 598 599 // Ensure all allocations placed 600 assert.Len(out, 6, "Placed Allocations") 601 602 h.AssertEvalStatus(t, structs.EvalStatusComplete) 603 } 604 605 func TestServiceSched_JobRegister_Annotate(t *testing.T) { 606 h := NewHarness(t) 607 608 // Create some nodes 609 for i := 0; i < 10; i++ { 610 node := mock.Node() 611 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 612 } 613 614 // Create a job 615 job := mock.Job() 616 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 617 618 // Create a mock evaluation to register the job 619 eval := &structs.Evaluation{ 620 Namespace: structs.DefaultNamespace, 621 ID: uuid.Generate(), 622 Priority: job.Priority, 623 TriggeredBy: structs.EvalTriggerJobRegister, 624 JobID: job.ID, 625 AnnotatePlan: true, 626 Status: structs.EvalStatusPending, 627 } 628 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 629 630 // Process the evaluation 631 err := h.Process(NewServiceScheduler, eval) 632 if err != nil { 633 t.Fatalf("err: %v", err) 634 } 635 636 // Ensure a single plan 637 if len(h.Plans) != 1 { 638 t.Fatalf("bad: %#v", h.Plans) 639 } 640 plan := h.Plans[0] 641 642 // Ensure the plan allocated 643 var planned []*structs.Allocation 644 for _, allocList := range plan.NodeAllocation { 645 planned = append(planned, allocList...) 646 } 647 if len(planned) != 10 { 648 t.Fatalf("bad: %#v", plan) 649 } 650 651 // Lookup the allocations by JobID 652 ws := memdb.NewWatchSet() 653 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 654 noErr(t, err) 655 656 // Ensure all allocations placed 657 if len(out) != 10 { 658 t.Fatalf("bad: %#v", out) 659 } 660 661 h.AssertEvalStatus(t, structs.EvalStatusComplete) 662 663 // Ensure the plan had annotations. 664 if plan.Annotations == nil { 665 t.Fatalf("expected annotations") 666 } 667 668 desiredTGs := plan.Annotations.DesiredTGUpdates 669 if l := len(desiredTGs); l != 1 { 670 t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1) 671 } 672 673 desiredChanges, ok := desiredTGs["web"] 674 if !ok { 675 t.Fatalf("expected task group web to have desired changes") 676 } 677 678 expected := &structs.DesiredUpdates{Place: 10} 679 if !reflect.DeepEqual(desiredChanges, expected) { 680 t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) 681 } 682 } 683 684 func TestServiceSched_JobRegister_CountZero(t *testing.T) { 685 h := NewHarness(t) 686 687 // Create some nodes 688 for i := 0; i < 10; i++ { 689 node := mock.Node() 690 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 691 } 692 693 // Create a job and set the task group count to zero. 694 job := mock.Job() 695 job.TaskGroups[0].Count = 0 696 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 697 698 // Create a mock evaluation to register the job 699 eval := &structs.Evaluation{ 700 Namespace: structs.DefaultNamespace, 701 ID: uuid.Generate(), 702 Priority: job.Priority, 703 TriggeredBy: structs.EvalTriggerJobRegister, 704 JobID: job.ID, 705 Status: structs.EvalStatusPending, 706 } 707 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 708 709 // Process the evaluation 710 err := h.Process(NewServiceScheduler, eval) 711 if err != nil { 712 t.Fatalf("err: %v", err) 713 } 714 715 // Ensure there was no plan 716 if len(h.Plans) != 0 { 717 t.Fatalf("bad: %#v", h.Plans) 718 } 719 720 // Lookup the allocations by JobID 721 ws := memdb.NewWatchSet() 722 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 723 noErr(t, err) 724 725 // Ensure no allocations placed 726 if len(out) != 0 { 727 t.Fatalf("bad: %#v", out) 728 } 729 730 h.AssertEvalStatus(t, structs.EvalStatusComplete) 731 } 732 733 func TestServiceSched_JobRegister_AllocFail(t *testing.T) { 734 h := NewHarness(t) 735 736 // Create NO nodes 737 // Create a job 738 job := mock.Job() 739 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 740 741 // Create a mock evaluation to register the job 742 eval := &structs.Evaluation{ 743 Namespace: structs.DefaultNamespace, 744 ID: uuid.Generate(), 745 Priority: job.Priority, 746 TriggeredBy: structs.EvalTriggerJobRegister, 747 JobID: job.ID, 748 Status: structs.EvalStatusPending, 749 } 750 751 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 752 753 // Process the evaluation 754 err := h.Process(NewServiceScheduler, eval) 755 if err != nil { 756 t.Fatalf("err: %v", err) 757 } 758 759 // Ensure no plan 760 if len(h.Plans) != 0 { 761 t.Fatalf("bad: %#v", h.Plans) 762 } 763 764 // Ensure there is a follow up eval. 765 if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked { 766 t.Fatalf("bad: %#v", h.CreateEvals) 767 } 768 769 if len(h.Evals) != 1 { 770 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 771 } 772 outEval := h.Evals[0] 773 774 // Ensure the eval has its spawned blocked eval 775 if outEval.BlockedEval != h.CreateEvals[0].ID { 776 t.Fatalf("bad: %#v", outEval) 777 } 778 779 // Ensure the plan failed to alloc 780 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 781 t.Fatalf("bad: %#v", outEval) 782 } 783 784 metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name] 785 if !ok { 786 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 787 } 788 789 // Check the coalesced failures 790 if metrics.CoalescedFailures != 9 { 791 t.Fatalf("bad: %#v", metrics) 792 } 793 794 // Check the available nodes 795 if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 0 { 796 t.Fatalf("bad: %#v", metrics) 797 } 798 799 // Check queued allocations 800 queued := outEval.QueuedAllocations["web"] 801 if queued != 10 { 802 t.Fatalf("expected queued: %v, actual: %v", 10, queued) 803 } 804 h.AssertEvalStatus(t, structs.EvalStatusComplete) 805 } 806 807 func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { 808 h := NewHarness(t) 809 810 // Create a full node 811 node := mock.Node() 812 node.Reserved = node.Resources 813 node.ComputeClass() 814 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 815 816 // Create an ineligible node 817 node2 := mock.Node() 818 node2.Attributes["kernel.name"] = "windows" 819 node2.ComputeClass() 820 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 821 822 // Create a jobs 823 job := mock.Job() 824 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 825 826 // Create a mock evaluation to register the job 827 eval := &structs.Evaluation{ 828 Namespace: structs.DefaultNamespace, 829 ID: uuid.Generate(), 830 Priority: job.Priority, 831 TriggeredBy: structs.EvalTriggerJobRegister, 832 JobID: job.ID, 833 Status: structs.EvalStatusPending, 834 } 835 836 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 837 838 // Process the evaluation 839 err := h.Process(NewServiceScheduler, eval) 840 if err != nil { 841 t.Fatalf("err: %v", err) 842 } 843 844 // Ensure no plan 845 if len(h.Plans) != 0 { 846 t.Fatalf("bad: %#v", h.Plans) 847 } 848 849 // Ensure the plan has created a follow up eval. 850 if len(h.CreateEvals) != 1 { 851 t.Fatalf("bad: %#v", h.CreateEvals) 852 } 853 854 created := h.CreateEvals[0] 855 if created.Status != structs.EvalStatusBlocked { 856 t.Fatalf("bad: %#v", created) 857 } 858 859 classes := created.ClassEligibility 860 if len(classes) != 2 || !classes[node.ComputedClass] || classes[node2.ComputedClass] { 861 t.Fatalf("bad: %#v", classes) 862 } 863 864 if created.EscapedComputedClass { 865 t.Fatalf("bad: %#v", created) 866 } 867 868 // Ensure there is a follow up eval. 869 if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked { 870 t.Fatalf("bad: %#v", h.CreateEvals) 871 } 872 873 if len(h.Evals) != 1 { 874 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 875 } 876 outEval := h.Evals[0] 877 878 // Ensure the plan failed to alloc 879 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 880 t.Fatalf("bad: %#v", outEval) 881 } 882 883 metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name] 884 if !ok { 885 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 886 } 887 888 // Check the coalesced failures 889 if metrics.CoalescedFailures != 9 { 890 t.Fatalf("bad: %#v", metrics) 891 } 892 893 // Check the available nodes 894 if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 2 { 895 t.Fatalf("bad: %#v", metrics) 896 } 897 898 h.AssertEvalStatus(t, structs.EvalStatusComplete) 899 } 900 901 func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { 902 h := NewHarness(t) 903 904 // Create one node 905 node := mock.Node() 906 node.NodeClass = "class_0" 907 noErr(t, node.ComputeClass()) 908 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 909 910 // Create a job that constrains on a node class 911 job := mock.Job() 912 job.TaskGroups[0].Count = 2 913 job.TaskGroups[0].Constraints = append(job.Constraints, 914 &structs.Constraint{ 915 LTarget: "${node.class}", 916 RTarget: "class_0", 917 Operand: "=", 918 }, 919 ) 920 tg2 := job.TaskGroups[0].Copy() 921 tg2.Name = "web2" 922 tg2.Constraints[1].RTarget = "class_1" 923 job.TaskGroups = append(job.TaskGroups, tg2) 924 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 925 926 // Create a mock evaluation to register the job 927 eval := &structs.Evaluation{ 928 Namespace: structs.DefaultNamespace, 929 ID: uuid.Generate(), 930 Priority: job.Priority, 931 TriggeredBy: structs.EvalTriggerJobRegister, 932 JobID: job.ID, 933 Status: structs.EvalStatusPending, 934 } 935 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 936 // Process the evaluation 937 err := h.Process(NewServiceScheduler, eval) 938 if err != nil { 939 t.Fatalf("err: %v", err) 940 } 941 942 // Ensure a single plan 943 if len(h.Plans) != 1 { 944 t.Fatalf("bad: %#v", h.Plans) 945 } 946 plan := h.Plans[0] 947 948 // Ensure the plan allocated 949 var planned []*structs.Allocation 950 for _, allocList := range plan.NodeAllocation { 951 planned = append(planned, allocList...) 952 } 953 if len(planned) != 2 { 954 t.Fatalf("bad: %#v", plan) 955 } 956 957 // Ensure two allocations placed 958 ws := memdb.NewWatchSet() 959 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 960 noErr(t, err) 961 if len(out) != 2 { 962 t.Fatalf("bad: %#v", out) 963 } 964 965 if len(h.Evals) != 1 { 966 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 967 } 968 outEval := h.Evals[0] 969 970 // Ensure the eval has its spawned blocked eval 971 if outEval.BlockedEval != h.CreateEvals[0].ID { 972 t.Fatalf("bad: %#v", outEval) 973 } 974 975 // Ensure the plan failed to alloc one tg 976 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 977 t.Fatalf("bad: %#v", outEval) 978 } 979 980 metrics, ok := outEval.FailedTGAllocs[tg2.Name] 981 if !ok { 982 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 983 } 984 985 // Check the coalesced failures 986 if metrics.CoalescedFailures != tg2.Count-1 { 987 t.Fatalf("bad: %#v", metrics) 988 } 989 990 h.AssertEvalStatus(t, structs.EvalStatusComplete) 991 } 992 993 // This test just ensures the scheduler handles the eval type to avoid 994 // regressions. 995 func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { 996 h := NewHarness(t) 997 998 // Create a job and set the task group count to zero. 999 job := mock.Job() 1000 job.TaskGroups[0].Count = 0 1001 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1002 1003 // Create a mock blocked evaluation 1004 eval := &structs.Evaluation{ 1005 Namespace: structs.DefaultNamespace, 1006 ID: uuid.Generate(), 1007 Status: structs.EvalStatusBlocked, 1008 Priority: job.Priority, 1009 TriggeredBy: structs.EvalTriggerMaxPlans, 1010 JobID: job.ID, 1011 } 1012 1013 // Insert it into the state store 1014 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1015 1016 // Process the evaluation 1017 err := h.Process(NewServiceScheduler, eval) 1018 if err != nil { 1019 t.Fatalf("err: %v", err) 1020 } 1021 1022 // Ensure there was no plan 1023 if len(h.Plans) != 0 { 1024 t.Fatalf("bad: %#v", h.Plans) 1025 } 1026 1027 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1028 } 1029 1030 func TestServiceSched_Plan_Partial_Progress(t *testing.T) { 1031 h := NewHarness(t) 1032 1033 // Create a node 1034 node := mock.Node() 1035 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1036 1037 // Create a job with a high resource ask so that all the allocations can't 1038 // be placed on a single node. 1039 job := mock.Job() 1040 job.TaskGroups[0].Count = 3 1041 job.TaskGroups[0].Tasks[0].Resources.CPU = 3600 1042 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1043 1044 // Create a mock evaluation to register the job 1045 eval := &structs.Evaluation{ 1046 Namespace: structs.DefaultNamespace, 1047 ID: uuid.Generate(), 1048 Priority: job.Priority, 1049 TriggeredBy: structs.EvalTriggerJobRegister, 1050 JobID: job.ID, 1051 Status: structs.EvalStatusPending, 1052 } 1053 1054 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1055 1056 // Process the evaluation 1057 err := h.Process(NewServiceScheduler, eval) 1058 if err != nil { 1059 t.Fatalf("err: %v", err) 1060 } 1061 1062 // Ensure a single plan 1063 if len(h.Plans) != 1 { 1064 t.Fatalf("bad: %#v", h.Plans) 1065 } 1066 plan := h.Plans[0] 1067 1068 // Ensure the plan doesn't have annotations. 1069 if plan.Annotations != nil { 1070 t.Fatalf("expected no annotations") 1071 } 1072 1073 // Ensure the plan allocated 1074 var planned []*structs.Allocation 1075 for _, allocList := range plan.NodeAllocation { 1076 planned = append(planned, allocList...) 1077 } 1078 if len(planned) != 1 { 1079 t.Fatalf("bad: %#v", plan) 1080 } 1081 1082 // Lookup the allocations by JobID 1083 ws := memdb.NewWatchSet() 1084 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1085 noErr(t, err) 1086 1087 // Ensure only one allocations placed 1088 if len(out) != 1 { 1089 t.Fatalf("bad: %#v", out) 1090 } 1091 1092 queued := h.Evals[0].QueuedAllocations["web"] 1093 if queued != 2 { 1094 t.Fatalf("expected: %v, actual: %v", 2, queued) 1095 } 1096 1097 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1098 } 1099 1100 func TestServiceSched_EvaluateBlockedEval(t *testing.T) { 1101 h := NewHarness(t) 1102 1103 // Create a job 1104 job := mock.Job() 1105 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1106 1107 // Create a mock blocked evaluation 1108 eval := &structs.Evaluation{ 1109 Namespace: structs.DefaultNamespace, 1110 ID: uuid.Generate(), 1111 Status: structs.EvalStatusBlocked, 1112 Priority: job.Priority, 1113 TriggeredBy: structs.EvalTriggerJobRegister, 1114 JobID: job.ID, 1115 } 1116 1117 // Insert it into the state store 1118 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1119 1120 // Process the evaluation 1121 err := h.Process(NewServiceScheduler, eval) 1122 if err != nil { 1123 t.Fatalf("err: %v", err) 1124 } 1125 1126 // Ensure there was no plan 1127 if len(h.Plans) != 0 { 1128 t.Fatalf("bad: %#v", h.Plans) 1129 } 1130 1131 // Ensure that the eval was reblocked 1132 if len(h.ReblockEvals) != 1 { 1133 t.Fatalf("bad: %#v", h.ReblockEvals) 1134 } 1135 if h.ReblockEvals[0].ID != eval.ID { 1136 t.Fatalf("expect same eval to be reblocked; got %q; want %q", h.ReblockEvals[0].ID, eval.ID) 1137 } 1138 1139 // Ensure the eval status was not updated 1140 if len(h.Evals) != 0 { 1141 t.Fatalf("Existing eval should not have status set") 1142 } 1143 } 1144 1145 func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { 1146 h := NewHarness(t) 1147 1148 // Create some nodes 1149 for i := 0; i < 10; i++ { 1150 node := mock.Node() 1151 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1152 } 1153 1154 // Create a job and set the task group count to zero. 1155 job := mock.Job() 1156 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1157 1158 // Create a mock blocked evaluation 1159 eval := &structs.Evaluation{ 1160 Namespace: structs.DefaultNamespace, 1161 ID: uuid.Generate(), 1162 Status: structs.EvalStatusBlocked, 1163 Priority: job.Priority, 1164 TriggeredBy: structs.EvalTriggerJobRegister, 1165 JobID: job.ID, 1166 } 1167 1168 // Insert it into the state store 1169 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1170 1171 // Process the evaluation 1172 err := h.Process(NewServiceScheduler, eval) 1173 if err != nil { 1174 t.Fatalf("err: %v", err) 1175 } 1176 1177 // Ensure a single plan 1178 if len(h.Plans) != 1 { 1179 t.Fatalf("bad: %#v", h.Plans) 1180 } 1181 plan := h.Plans[0] 1182 1183 // Ensure the plan doesn't have annotations. 1184 if plan.Annotations != nil { 1185 t.Fatalf("expected no annotations") 1186 } 1187 1188 // Ensure the eval has no spawned blocked eval 1189 if len(h.Evals) != 1 { 1190 t.Fatalf("bad: %#v", h.Evals) 1191 if h.Evals[0].BlockedEval != "" { 1192 t.Fatalf("bad: %#v", h.Evals[0]) 1193 } 1194 } 1195 1196 // Ensure the plan allocated 1197 var planned []*structs.Allocation 1198 for _, allocList := range plan.NodeAllocation { 1199 planned = append(planned, allocList...) 1200 } 1201 if len(planned) != 10 { 1202 t.Fatalf("bad: %#v", plan) 1203 } 1204 1205 // Lookup the allocations by JobID 1206 ws := memdb.NewWatchSet() 1207 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1208 noErr(t, err) 1209 1210 // Ensure all allocations placed 1211 if len(out) != 10 { 1212 t.Fatalf("bad: %#v", out) 1213 } 1214 1215 // Ensure the eval was not reblocked 1216 if len(h.ReblockEvals) != 0 { 1217 t.Fatalf("Existing eval should not have been reblocked as it placed all allocations") 1218 } 1219 1220 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1221 1222 // Ensure queued allocations is zero 1223 queued := h.Evals[0].QueuedAllocations["web"] 1224 if queued != 0 { 1225 t.Fatalf("expected queued: %v, actual: %v", 0, queued) 1226 } 1227 } 1228 1229 func TestServiceSched_JobModify(t *testing.T) { 1230 h := NewHarness(t) 1231 1232 // Create some nodes 1233 var nodes []*structs.Node 1234 for i := 0; i < 10; i++ { 1235 node := mock.Node() 1236 nodes = append(nodes, node) 1237 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1238 } 1239 1240 // Generate a fake job with allocations 1241 job := mock.Job() 1242 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1243 1244 var allocs []*structs.Allocation 1245 for i := 0; i < 10; i++ { 1246 alloc := mock.Alloc() 1247 alloc.Job = job 1248 alloc.JobID = job.ID 1249 alloc.NodeID = nodes[i].ID 1250 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1251 allocs = append(allocs, alloc) 1252 } 1253 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1254 1255 // Add a few terminal status allocations, these should be ignored 1256 var terminal []*structs.Allocation 1257 for i := 0; i < 5; i++ { 1258 alloc := mock.Alloc() 1259 alloc.Job = job 1260 alloc.JobID = job.ID 1261 alloc.NodeID = nodes[i].ID 1262 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1263 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1264 terminal = append(terminal, alloc) 1265 } 1266 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 1267 1268 // Update the job 1269 job2 := mock.Job() 1270 job2.ID = job.ID 1271 1272 // Update the task, such that it cannot be done in-place 1273 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1274 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1275 1276 // Create a mock evaluation to deal with drain 1277 eval := &structs.Evaluation{ 1278 Namespace: structs.DefaultNamespace, 1279 ID: uuid.Generate(), 1280 Priority: 50, 1281 TriggeredBy: structs.EvalTriggerJobRegister, 1282 JobID: job.ID, 1283 Status: structs.EvalStatusPending, 1284 } 1285 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1286 1287 // Process the evaluation 1288 err := h.Process(NewServiceScheduler, eval) 1289 if err != nil { 1290 t.Fatalf("err: %v", err) 1291 } 1292 1293 // Ensure a single plan 1294 if len(h.Plans) != 1 { 1295 t.Fatalf("bad: %#v", h.Plans) 1296 } 1297 plan := h.Plans[0] 1298 1299 // Ensure the plan evicted all allocs 1300 var update []*structs.Allocation 1301 for _, updateList := range plan.NodeUpdate { 1302 update = append(update, updateList...) 1303 } 1304 if len(update) != len(allocs) { 1305 t.Fatalf("bad: %#v", plan) 1306 } 1307 1308 // Ensure the plan allocated 1309 var planned []*structs.Allocation 1310 for _, allocList := range plan.NodeAllocation { 1311 planned = append(planned, allocList...) 1312 } 1313 if len(planned) != 10 { 1314 t.Fatalf("bad: %#v", plan) 1315 } 1316 1317 // Lookup the allocations by JobID 1318 ws := memdb.NewWatchSet() 1319 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1320 noErr(t, err) 1321 1322 // Ensure all allocations placed 1323 out, _ = structs.FilterTerminalAllocs(out) 1324 if len(out) != 10 { 1325 t.Fatalf("bad: %#v", out) 1326 } 1327 1328 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1329 } 1330 1331 // Have a single node and submit a job. Increment the count such that all fit 1332 // on the node but the node doesn't have enough resources to fit the new count + 1333 // 1. This tests that we properly discount the resources of existing allocs. 1334 func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { 1335 h := NewHarness(t) 1336 1337 // Create one node 1338 node := mock.Node() 1339 node.Resources.CPU = 1000 1340 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1341 1342 // Generate a fake job with one allocation 1343 job := mock.Job() 1344 job.TaskGroups[0].Tasks[0].Resources.CPU = 256 1345 job2 := job.Copy() 1346 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1347 1348 var allocs []*structs.Allocation 1349 alloc := mock.Alloc() 1350 alloc.Job = job 1351 alloc.JobID = job.ID 1352 alloc.NodeID = node.ID 1353 alloc.Name = "my-job.web[0]" 1354 alloc.Resources.CPU = 256 1355 allocs = append(allocs, alloc) 1356 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1357 1358 // Update the job to count 3 1359 job2.TaskGroups[0].Count = 3 1360 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1361 1362 // Create a mock evaluation to deal with drain 1363 eval := &structs.Evaluation{ 1364 Namespace: structs.DefaultNamespace, 1365 ID: uuid.Generate(), 1366 Priority: 50, 1367 TriggeredBy: structs.EvalTriggerJobRegister, 1368 JobID: job.ID, 1369 Status: structs.EvalStatusPending, 1370 } 1371 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1372 1373 // Process the evaluation 1374 err := h.Process(NewServiceScheduler, eval) 1375 if err != nil { 1376 t.Fatalf("err: %v", err) 1377 } 1378 1379 // Ensure a single plan 1380 if len(h.Plans) != 1 { 1381 t.Fatalf("bad: %#v", h.Plans) 1382 } 1383 plan := h.Plans[0] 1384 1385 // Ensure the plan didn't evicted the alloc 1386 var update []*structs.Allocation 1387 for _, updateList := range plan.NodeUpdate { 1388 update = append(update, updateList...) 1389 } 1390 if len(update) != 0 { 1391 t.Fatalf("bad: %#v", plan) 1392 } 1393 1394 // Ensure the plan allocated 1395 var planned []*structs.Allocation 1396 for _, allocList := range plan.NodeAllocation { 1397 planned = append(planned, allocList...) 1398 } 1399 if len(planned) != 3 { 1400 t.Fatalf("bad: %#v", plan) 1401 } 1402 1403 // Ensure the plan had no failures 1404 if len(h.Evals) != 1 { 1405 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 1406 } 1407 outEval := h.Evals[0] 1408 if outEval == nil || len(outEval.FailedTGAllocs) != 0 { 1409 t.Fatalf("bad: %#v", outEval) 1410 } 1411 1412 // Lookup the allocations by JobID 1413 ws := memdb.NewWatchSet() 1414 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1415 noErr(t, err) 1416 1417 // Ensure all allocations placed 1418 out, _ = structs.FilterTerminalAllocs(out) 1419 if len(out) != 3 { 1420 t.Fatalf("bad: %#v", out) 1421 } 1422 1423 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1424 } 1425 1426 func TestServiceSched_JobModify_CountZero(t *testing.T) { 1427 h := NewHarness(t) 1428 1429 // Create some nodes 1430 var nodes []*structs.Node 1431 for i := 0; i < 10; i++ { 1432 node := mock.Node() 1433 nodes = append(nodes, node) 1434 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1435 } 1436 1437 // Generate a fake job with allocations 1438 job := mock.Job() 1439 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1440 1441 var allocs []*structs.Allocation 1442 for i := 0; i < 10; i++ { 1443 alloc := mock.Alloc() 1444 alloc.Job = job 1445 alloc.JobID = job.ID 1446 alloc.NodeID = nodes[i].ID 1447 alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) 1448 allocs = append(allocs, alloc) 1449 } 1450 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1451 1452 // Add a few terminal status allocations, these should be ignored 1453 var terminal []*structs.Allocation 1454 for i := 0; i < 5; i++ { 1455 alloc := mock.Alloc() 1456 alloc.Job = job 1457 alloc.JobID = job.ID 1458 alloc.NodeID = nodes[i].ID 1459 alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) 1460 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1461 terminal = append(terminal, alloc) 1462 } 1463 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 1464 1465 // Update the job to be count zero 1466 job2 := mock.Job() 1467 job2.ID = job.ID 1468 job2.TaskGroups[0].Count = 0 1469 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1470 1471 // Create a mock evaluation to deal with drain 1472 eval := &structs.Evaluation{ 1473 Namespace: structs.DefaultNamespace, 1474 ID: uuid.Generate(), 1475 Priority: 50, 1476 TriggeredBy: structs.EvalTriggerJobRegister, 1477 JobID: job.ID, 1478 Status: structs.EvalStatusPending, 1479 } 1480 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1481 1482 // Process the evaluation 1483 err := h.Process(NewServiceScheduler, eval) 1484 if err != nil { 1485 t.Fatalf("err: %v", err) 1486 } 1487 1488 // Ensure a single plan 1489 if len(h.Plans) != 1 { 1490 t.Fatalf("bad: %#v", h.Plans) 1491 } 1492 plan := h.Plans[0] 1493 1494 // Ensure the plan evicted all allocs 1495 var update []*structs.Allocation 1496 for _, updateList := range plan.NodeUpdate { 1497 update = append(update, updateList...) 1498 } 1499 if len(update) != len(allocs) { 1500 t.Fatalf("bad: %#v", plan) 1501 } 1502 1503 // Ensure the plan didn't allocated 1504 var planned []*structs.Allocation 1505 for _, allocList := range plan.NodeAllocation { 1506 planned = append(planned, allocList...) 1507 } 1508 if len(planned) != 0 { 1509 t.Fatalf("bad: %#v", plan) 1510 } 1511 1512 // Lookup the allocations by JobID 1513 ws := memdb.NewWatchSet() 1514 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1515 noErr(t, err) 1516 1517 // Ensure all allocations placed 1518 out, _ = structs.FilterTerminalAllocs(out) 1519 if len(out) != 0 { 1520 t.Fatalf("bad: %#v", out) 1521 } 1522 1523 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1524 } 1525 1526 func TestServiceSched_JobModify_Rolling(t *testing.T) { 1527 h := NewHarness(t) 1528 1529 // Create some nodes 1530 var nodes []*structs.Node 1531 for i := 0; i < 10; i++ { 1532 node := mock.Node() 1533 nodes = append(nodes, node) 1534 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1535 } 1536 1537 // Generate a fake job with allocations 1538 job := mock.Job() 1539 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1540 1541 var allocs []*structs.Allocation 1542 for i := 0; i < 10; i++ { 1543 alloc := mock.Alloc() 1544 alloc.Job = job 1545 alloc.JobID = job.ID 1546 alloc.NodeID = nodes[i].ID 1547 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1548 allocs = append(allocs, alloc) 1549 } 1550 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1551 1552 // Update the job 1553 job2 := mock.Job() 1554 job2.ID = job.ID 1555 desiredUpdates := 4 1556 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1557 MaxParallel: desiredUpdates, 1558 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1559 MinHealthyTime: 10 * time.Second, 1560 HealthyDeadline: 10 * time.Minute, 1561 } 1562 1563 // Update the task, such that it cannot be done in-place 1564 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1565 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1566 1567 // Create a mock evaluation to deal with drain 1568 eval := &structs.Evaluation{ 1569 Namespace: structs.DefaultNamespace, 1570 ID: uuid.Generate(), 1571 Priority: 50, 1572 TriggeredBy: structs.EvalTriggerJobRegister, 1573 JobID: job.ID, 1574 Status: structs.EvalStatusPending, 1575 } 1576 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1577 1578 // Process the evaluation 1579 err := h.Process(NewServiceScheduler, eval) 1580 if err != nil { 1581 t.Fatalf("err: %v", err) 1582 } 1583 1584 // Ensure a single plan 1585 if len(h.Plans) != 1 { 1586 t.Fatalf("bad: %#v", h.Plans) 1587 } 1588 plan := h.Plans[0] 1589 1590 // Ensure the plan evicted only MaxParallel 1591 var update []*structs.Allocation 1592 for _, updateList := range plan.NodeUpdate { 1593 update = append(update, updateList...) 1594 } 1595 if len(update) != desiredUpdates { 1596 t.Fatalf("bad: got %d; want %d: %#v", len(update), desiredUpdates, plan) 1597 } 1598 1599 // Ensure the plan allocated 1600 var planned []*structs.Allocation 1601 for _, allocList := range plan.NodeAllocation { 1602 planned = append(planned, allocList...) 1603 } 1604 if len(planned) != desiredUpdates { 1605 t.Fatalf("bad: %#v", plan) 1606 } 1607 1608 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1609 1610 // Check that the deployment id is attached to the eval 1611 if h.Evals[0].DeploymentID == "" { 1612 t.Fatalf("Eval not annotated with deployment id") 1613 } 1614 1615 // Ensure a deployment was created 1616 if plan.Deployment == nil { 1617 t.Fatalf("bad: %#v", plan) 1618 } 1619 state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] 1620 if !ok { 1621 t.Fatalf("bad: %#v", plan) 1622 } 1623 if state.DesiredTotal != 10 && state.DesiredCanaries != 0 { 1624 t.Fatalf("bad: %#v", state) 1625 } 1626 } 1627 1628 // This tests that the old allocation is stopped before placing. 1629 // It is critical to test that the updated job attempts to place more 1630 // allocations as this allows us to assert that destructive changes are done 1631 // first. 1632 func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { 1633 h := NewHarness(t) 1634 1635 // Create a node 1636 node := mock.Node() 1637 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1638 1639 resourceAsk := node.Resources.Copy() 1640 resourceAsk.CPU -= node.Reserved.CPU 1641 resourceAsk.MemoryMB -= node.Reserved.MemoryMB 1642 resourceAsk.DiskMB -= node.Reserved.DiskMB 1643 resourceAsk.Networks = nil 1644 1645 // Generate a fake job with one alloc that consumes the whole node 1646 job := mock.Job() 1647 job.TaskGroups[0].Count = 1 1648 job.TaskGroups[0].Tasks[0].Resources = resourceAsk 1649 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1650 1651 alloc := mock.Alloc() 1652 alloc.Resources = resourceAsk 1653 alloc.Job = job 1654 alloc.JobID = job.ID 1655 alloc.NodeID = node.ID 1656 alloc.Name = "my-job.web[0]" 1657 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1658 1659 // Update the job to place more versions of the task group, drop the count 1660 // and force destructive updates 1661 job2 := job.Copy() 1662 job2.TaskGroups[0].Count = 5 1663 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1664 MaxParallel: 5, 1665 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1666 MinHealthyTime: 10 * time.Second, 1667 HealthyDeadline: 10 * time.Minute, 1668 } 1669 job2.TaskGroups[0].Tasks[0].Resources = mock.Alloc().Resources 1670 1671 // Update the task, such that it cannot be done in-place 1672 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1673 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1674 1675 eval := &structs.Evaluation{ 1676 Namespace: structs.DefaultNamespace, 1677 ID: uuid.Generate(), 1678 Priority: 50, 1679 TriggeredBy: structs.EvalTriggerJobRegister, 1680 JobID: job.ID, 1681 Status: structs.EvalStatusPending, 1682 } 1683 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1684 1685 // Process the evaluation 1686 err := h.Process(NewServiceScheduler, eval) 1687 if err != nil { 1688 t.Fatalf("err: %v", err) 1689 } 1690 1691 // Ensure a single plan 1692 if len(h.Plans) != 1 { 1693 t.Fatalf("bad: %#v", h.Plans) 1694 } 1695 plan := h.Plans[0] 1696 1697 // Ensure the plan evicted only MaxParallel 1698 var update []*structs.Allocation 1699 for _, updateList := range plan.NodeUpdate { 1700 update = append(update, updateList...) 1701 } 1702 if len(update) != 1 { 1703 t.Fatalf("bad: got %d; want %d: %#v", len(update), 1, plan) 1704 } 1705 1706 // Ensure the plan allocated 1707 var planned []*structs.Allocation 1708 for _, allocList := range plan.NodeAllocation { 1709 planned = append(planned, allocList...) 1710 } 1711 if len(planned) != 1 { 1712 t.Fatalf("bad: %#v", plan) 1713 } 1714 1715 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1716 1717 // Check that the deployment id is attached to the eval 1718 if h.Evals[0].DeploymentID == "" { 1719 t.Fatalf("Eval not annotated with deployment id") 1720 } 1721 1722 // Ensure a deployment was created 1723 if plan.Deployment == nil { 1724 t.Fatalf("bad: %#v", plan) 1725 } 1726 state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] 1727 if !ok { 1728 t.Fatalf("bad: %#v", plan) 1729 } 1730 if state.DesiredTotal != 1 && state.DesiredCanaries != 0 { 1731 t.Fatalf("bad: %#v", state) 1732 } 1733 } 1734 1735 func TestServiceSched_JobModify_Canaries(t *testing.T) { 1736 h := NewHarness(t) 1737 1738 // Create some nodes 1739 var nodes []*structs.Node 1740 for i := 0; i < 10; i++ { 1741 node := mock.Node() 1742 nodes = append(nodes, node) 1743 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1744 } 1745 1746 // Generate a fake job with allocations 1747 job := mock.Job() 1748 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1749 1750 var allocs []*structs.Allocation 1751 for i := 0; i < 10; i++ { 1752 alloc := mock.Alloc() 1753 alloc.Job = job 1754 alloc.JobID = job.ID 1755 alloc.NodeID = nodes[i].ID 1756 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1757 allocs = append(allocs, alloc) 1758 } 1759 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1760 1761 // Update the job 1762 job2 := mock.Job() 1763 job2.ID = job.ID 1764 desiredUpdates := 2 1765 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1766 MaxParallel: desiredUpdates, 1767 Canary: desiredUpdates, 1768 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1769 MinHealthyTime: 10 * time.Second, 1770 HealthyDeadline: 10 * time.Minute, 1771 } 1772 1773 // Update the task, such that it cannot be done in-place 1774 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1775 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1776 1777 // Create a mock evaluation to deal with drain 1778 eval := &structs.Evaluation{ 1779 Namespace: structs.DefaultNamespace, 1780 ID: uuid.Generate(), 1781 Priority: 50, 1782 TriggeredBy: structs.EvalTriggerJobRegister, 1783 JobID: job.ID, 1784 Status: structs.EvalStatusPending, 1785 } 1786 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1787 1788 // Process the evaluation 1789 err := h.Process(NewServiceScheduler, eval) 1790 if err != nil { 1791 t.Fatalf("err: %v", err) 1792 } 1793 1794 // Ensure a single plan 1795 if len(h.Plans) != 1 { 1796 t.Fatalf("bad: %#v", h.Plans) 1797 } 1798 plan := h.Plans[0] 1799 1800 // Ensure the plan evicted nothing 1801 var update []*structs.Allocation 1802 for _, updateList := range plan.NodeUpdate { 1803 update = append(update, updateList...) 1804 } 1805 if len(update) != 0 { 1806 t.Fatalf("bad: got %d; want %d: %#v", len(update), 0, plan) 1807 } 1808 1809 // Ensure the plan allocated 1810 var planned []*structs.Allocation 1811 for _, allocList := range plan.NodeAllocation { 1812 planned = append(planned, allocList...) 1813 } 1814 if len(planned) != desiredUpdates { 1815 t.Fatalf("bad: %#v", plan) 1816 } 1817 1818 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1819 1820 // Check that the deployment id is attached to the eval 1821 if h.Evals[0].DeploymentID == "" { 1822 t.Fatalf("Eval not annotated with deployment id") 1823 } 1824 1825 // Ensure a deployment was created 1826 if plan.Deployment == nil { 1827 t.Fatalf("bad: %#v", plan) 1828 } 1829 state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] 1830 if !ok { 1831 t.Fatalf("bad: %#v", plan) 1832 } 1833 if state.DesiredTotal != 10 && state.DesiredCanaries != desiredUpdates { 1834 t.Fatalf("bad: %#v", state) 1835 } 1836 1837 // Assert the canaries were added to the placed list 1838 if len(state.PlacedCanaries) != desiredUpdates { 1839 t.Fatalf("bad: %#v", state) 1840 } 1841 } 1842 1843 func TestServiceSched_JobModify_InPlace(t *testing.T) { 1844 h := NewHarness(t) 1845 1846 // Create some nodes 1847 var nodes []*structs.Node 1848 for i := 0; i < 10; i++ { 1849 node := mock.Node() 1850 nodes = append(nodes, node) 1851 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1852 } 1853 1854 // Generate a fake job with allocations and create an older deployment 1855 job := mock.Job() 1856 d := mock.Deployment() 1857 d.JobID = job.ID 1858 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1859 noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) 1860 1861 // Create allocs that are part of the old deployment 1862 var allocs []*structs.Allocation 1863 for i := 0; i < 10; i++ { 1864 alloc := mock.Alloc() 1865 alloc.Job = job 1866 alloc.JobID = job.ID 1867 alloc.NodeID = nodes[i].ID 1868 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1869 alloc.DeploymentID = d.ID 1870 alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} 1871 allocs = append(allocs, alloc) 1872 } 1873 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1874 1875 // Update the job 1876 job2 := mock.Job() 1877 job2.ID = job.ID 1878 desiredUpdates := 4 1879 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1880 MaxParallel: desiredUpdates, 1881 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1882 MinHealthyTime: 10 * time.Second, 1883 HealthyDeadline: 10 * time.Minute, 1884 } 1885 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1886 1887 // Create a mock evaluation to deal with drain 1888 eval := &structs.Evaluation{ 1889 Namespace: structs.DefaultNamespace, 1890 ID: uuid.Generate(), 1891 Priority: 50, 1892 TriggeredBy: structs.EvalTriggerJobRegister, 1893 JobID: job.ID, 1894 Status: structs.EvalStatusPending, 1895 } 1896 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1897 1898 // Process the evaluation 1899 err := h.Process(NewServiceScheduler, eval) 1900 if err != nil { 1901 t.Fatalf("err: %v", err) 1902 } 1903 1904 // Ensure a single plan 1905 if len(h.Plans) != 1 { 1906 t.Fatalf("bad: %#v", h.Plans) 1907 } 1908 plan := h.Plans[0] 1909 1910 // Ensure the plan did not evict any allocs 1911 var update []*structs.Allocation 1912 for _, updateList := range plan.NodeUpdate { 1913 update = append(update, updateList...) 1914 } 1915 if len(update) != 0 { 1916 t.Fatalf("bad: %#v", plan) 1917 } 1918 1919 // Ensure the plan updated the existing allocs 1920 var planned []*structs.Allocation 1921 for _, allocList := range plan.NodeAllocation { 1922 planned = append(planned, allocList...) 1923 } 1924 if len(planned) != 10 { 1925 t.Fatalf("bad: %#v", plan) 1926 } 1927 for _, p := range planned { 1928 if p.Job != job2 { 1929 t.Fatalf("should update job") 1930 } 1931 } 1932 1933 // Lookup the allocations by JobID 1934 ws := memdb.NewWatchSet() 1935 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1936 noErr(t, err) 1937 1938 // Ensure all allocations placed 1939 if len(out) != 10 { 1940 t.Fatalf("bad: %#v", out) 1941 } 1942 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1943 1944 // Verify the network did not change 1945 rp := structs.Port{Label: "admin", Value: 5000} 1946 for _, alloc := range out { 1947 for _, resources := range alloc.TaskResources { 1948 if resources.Networks[0].ReservedPorts[0] != rp { 1949 t.Fatalf("bad: %#v", alloc) 1950 } 1951 } 1952 } 1953 1954 // Verify the deployment id was changed and health cleared 1955 for _, alloc := range out { 1956 if alloc.DeploymentID == d.ID { 1957 t.Fatalf("bad: deployment id not cleared") 1958 } else if alloc.DeploymentStatus != nil { 1959 t.Fatalf("bad: deployment status not cleared") 1960 } 1961 } 1962 } 1963 1964 func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { 1965 h := NewHarness(t) 1966 1967 // Create some nodes 1968 var nodes []*structs.Node 1969 for i := 0; i < 10; i++ { 1970 node := mock.Node() 1971 node.Meta["rack"] = fmt.Sprintf("rack%d", i) 1972 nodes = append(nodes, node) 1973 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1974 } 1975 1976 // Create a job that uses distinct property and has count higher than what is 1977 // possible. 1978 job := mock.Job() 1979 job.TaskGroups[0].Count = 11 1980 job.Constraints = append(job.Constraints, 1981 &structs.Constraint{ 1982 Operand: structs.ConstraintDistinctProperty, 1983 LTarget: "${meta.rack}", 1984 }) 1985 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1986 1987 oldJob := job.Copy() 1988 oldJob.JobModifyIndex -= 1 1989 oldJob.TaskGroups[0].Count = 4 1990 1991 // Place 4 of 10 1992 var allocs []*structs.Allocation 1993 for i := 0; i < 4; i++ { 1994 alloc := mock.Alloc() 1995 alloc.Job = oldJob 1996 alloc.JobID = job.ID 1997 alloc.NodeID = nodes[i].ID 1998 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1999 allocs = append(allocs, alloc) 2000 } 2001 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2002 2003 // Create a mock evaluation to register the job 2004 eval := &structs.Evaluation{ 2005 Namespace: structs.DefaultNamespace, 2006 ID: uuid.Generate(), 2007 Priority: job.Priority, 2008 TriggeredBy: structs.EvalTriggerJobRegister, 2009 JobID: job.ID, 2010 Status: structs.EvalStatusPending, 2011 } 2012 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2013 2014 // Process the evaluation 2015 err := h.Process(NewServiceScheduler, eval) 2016 if err != nil { 2017 t.Fatalf("err: %v", err) 2018 } 2019 2020 // Ensure a single plan 2021 if len(h.Plans) != 1 { 2022 t.Fatalf("bad: %#v", h.Plans) 2023 } 2024 plan := h.Plans[0] 2025 2026 // Ensure the plan doesn't have annotations. 2027 if plan.Annotations != nil { 2028 t.Fatalf("expected no annotations") 2029 } 2030 2031 // Ensure the eval hasn't spawned blocked eval 2032 if len(h.CreateEvals) != 1 { 2033 t.Fatalf("bad: %#v", h.CreateEvals) 2034 } 2035 2036 // Ensure the plan failed to alloc 2037 outEval := h.Evals[0] 2038 if len(outEval.FailedTGAllocs) != 1 { 2039 t.Fatalf("bad: %+v", outEval) 2040 } 2041 2042 // Ensure the plan allocated 2043 var planned []*structs.Allocation 2044 for _, allocList := range plan.NodeAllocation { 2045 planned = append(planned, allocList...) 2046 } 2047 if len(planned) != 10 { 2048 t.Fatalf("bad: %#v", planned) 2049 } 2050 2051 // Lookup the allocations by JobID 2052 ws := memdb.NewWatchSet() 2053 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2054 noErr(t, err) 2055 2056 // Ensure all allocations placed 2057 if len(out) != 10 { 2058 t.Fatalf("bad: %#v", out) 2059 } 2060 2061 // Ensure different node was used per. 2062 used := make(map[string]struct{}) 2063 for _, alloc := range out { 2064 if _, ok := used[alloc.NodeID]; ok { 2065 t.Fatalf("Node collision %v", alloc.NodeID) 2066 } 2067 used[alloc.NodeID] = struct{}{} 2068 } 2069 2070 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2071 } 2072 2073 func TestServiceSched_JobDeregister_Purged(t *testing.T) { 2074 h := NewHarness(t) 2075 2076 // Generate a fake job with allocations 2077 job := mock.Job() 2078 2079 var allocs []*structs.Allocation 2080 for i := 0; i < 10; i++ { 2081 alloc := mock.Alloc() 2082 alloc.Job = job 2083 alloc.JobID = job.ID 2084 allocs = append(allocs, alloc) 2085 } 2086 for _, alloc := range allocs { 2087 h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) 2088 } 2089 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2090 2091 // Create a mock evaluation to deregister the job 2092 eval := &structs.Evaluation{ 2093 Namespace: structs.DefaultNamespace, 2094 ID: uuid.Generate(), 2095 Priority: 50, 2096 TriggeredBy: structs.EvalTriggerJobDeregister, 2097 JobID: job.ID, 2098 Status: structs.EvalStatusPending, 2099 } 2100 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2101 2102 // Process the evaluation 2103 err := h.Process(NewServiceScheduler, eval) 2104 if err != nil { 2105 t.Fatalf("err: %v", err) 2106 } 2107 2108 // Ensure a single plan 2109 if len(h.Plans) != 1 { 2110 t.Fatalf("bad: %#v", h.Plans) 2111 } 2112 plan := h.Plans[0] 2113 2114 // Ensure the plan evicted all nodes 2115 if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) { 2116 t.Fatalf("bad: %#v", plan) 2117 } 2118 2119 // Lookup the allocations by JobID 2120 ws := memdb.NewWatchSet() 2121 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2122 noErr(t, err) 2123 2124 // Ensure that the job field on the allocation is still populated 2125 for _, alloc := range out { 2126 if alloc.Job == nil { 2127 t.Fatalf("bad: %#v", alloc) 2128 } 2129 } 2130 2131 // Ensure no remaining allocations 2132 out, _ = structs.FilterTerminalAllocs(out) 2133 if len(out) != 0 { 2134 t.Fatalf("bad: %#v", out) 2135 } 2136 2137 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2138 } 2139 2140 func TestServiceSched_JobDeregister_Stopped(t *testing.T) { 2141 h := NewHarness(t) 2142 2143 // Generate a fake job with allocations 2144 job := mock.Job() 2145 job.Stop = true 2146 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2147 2148 var allocs []*structs.Allocation 2149 for i := 0; i < 10; i++ { 2150 alloc := mock.Alloc() 2151 alloc.Job = job 2152 alloc.JobID = job.ID 2153 allocs = append(allocs, alloc) 2154 } 2155 for _, alloc := range allocs { 2156 h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) 2157 } 2158 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2159 2160 // Create a mock evaluation to deregister the job 2161 eval := &structs.Evaluation{ 2162 Namespace: structs.DefaultNamespace, 2163 ID: uuid.Generate(), 2164 Priority: 50, 2165 TriggeredBy: structs.EvalTriggerJobDeregister, 2166 JobID: job.ID, 2167 Status: structs.EvalStatusPending, 2168 } 2169 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2170 2171 // Process the evaluation 2172 err := h.Process(NewServiceScheduler, eval) 2173 if err != nil { 2174 t.Fatalf("err: %v", err) 2175 } 2176 2177 // Ensure a single plan 2178 if len(h.Plans) != 1 { 2179 t.Fatalf("bad: %#v", h.Plans) 2180 } 2181 plan := h.Plans[0] 2182 2183 // Ensure the plan evicted all nodes 2184 if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) { 2185 t.Fatalf("bad: %#v", plan) 2186 } 2187 2188 // Lookup the allocations by JobID 2189 ws := memdb.NewWatchSet() 2190 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2191 noErr(t, err) 2192 2193 // Ensure that the job field on the allocation is still populated 2194 for _, alloc := range out { 2195 if alloc.Job == nil { 2196 t.Fatalf("bad: %#v", alloc) 2197 } 2198 } 2199 2200 // Ensure no remaining allocations 2201 out, _ = structs.FilterTerminalAllocs(out) 2202 if len(out) != 0 { 2203 t.Fatalf("bad: %#v", out) 2204 } 2205 2206 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2207 } 2208 2209 func TestServiceSched_NodeDown(t *testing.T) { 2210 h := NewHarness(t) 2211 2212 // Register a node 2213 node := mock.Node() 2214 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2215 2216 // Generate a fake job with allocations and an update policy. 2217 job := mock.Job() 2218 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2219 2220 var allocs []*structs.Allocation 2221 for i := 0; i < 10; i++ { 2222 alloc := mock.Alloc() 2223 alloc.Job = job 2224 alloc.JobID = job.ID 2225 alloc.NodeID = node.ID 2226 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2227 allocs = append(allocs, alloc) 2228 } 2229 2230 // Cover each terminal case and ensure it doesn't change to lost 2231 allocs[7].DesiredStatus = structs.AllocDesiredStatusRun 2232 allocs[7].ClientStatus = structs.AllocClientStatusLost 2233 allocs[8].DesiredStatus = structs.AllocDesiredStatusRun 2234 allocs[8].ClientStatus = structs.AllocClientStatusFailed 2235 allocs[9].DesiredStatus = structs.AllocDesiredStatusRun 2236 allocs[9].ClientStatus = structs.AllocClientStatusComplete 2237 2238 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2239 2240 // Mark some allocs as running 2241 ws := memdb.NewWatchSet() 2242 for i := 0; i < 4; i++ { 2243 out, _ := h.State.AllocByID(ws, allocs[i].ID) 2244 out.ClientStatus = structs.AllocClientStatusRunning 2245 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) 2246 } 2247 2248 // Mark the node as down 2249 noErr(t, h.State.UpdateNodeStatus(h.NextIndex(), node.ID, structs.NodeStatusDown)) 2250 2251 // Create a mock evaluation to deal with drain 2252 eval := &structs.Evaluation{ 2253 Namespace: structs.DefaultNamespace, 2254 ID: uuid.Generate(), 2255 Priority: 50, 2256 TriggeredBy: structs.EvalTriggerNodeUpdate, 2257 JobID: job.ID, 2258 NodeID: node.ID, 2259 Status: structs.EvalStatusPending, 2260 } 2261 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2262 2263 // Process the evaluation 2264 err := h.Process(NewServiceScheduler, eval) 2265 if err != nil { 2266 t.Fatalf("err: %v", err) 2267 } 2268 2269 // Ensure a single plan 2270 if len(h.Plans) != 1 { 2271 t.Fatalf("bad: %#v", h.Plans) 2272 } 2273 plan := h.Plans[0] 2274 2275 // Test the scheduler marked all non-terminal allocations as lost 2276 if len(plan.NodeUpdate[node.ID]) != 7 { 2277 t.Fatalf("bad: %#v", plan) 2278 } 2279 2280 for _, out := range plan.NodeUpdate[node.ID] { 2281 if out.ClientStatus != structs.AllocClientStatusLost && out.DesiredStatus != structs.AllocDesiredStatusStop { 2282 t.Fatalf("bad alloc: %#v", out) 2283 } 2284 } 2285 2286 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2287 } 2288 2289 func TestServiceSched_NodeUpdate(t *testing.T) { 2290 h := NewHarness(t) 2291 2292 // Register a node 2293 node := mock.Node() 2294 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2295 2296 // Generate a fake job with allocations and an update policy. 2297 job := mock.Job() 2298 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2299 2300 var allocs []*structs.Allocation 2301 for i := 0; i < 10; i++ { 2302 alloc := mock.Alloc() 2303 alloc.Job = job 2304 alloc.JobID = job.ID 2305 alloc.NodeID = node.ID 2306 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2307 allocs = append(allocs, alloc) 2308 } 2309 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2310 2311 // Mark some allocs as running 2312 ws := memdb.NewWatchSet() 2313 for i := 0; i < 4; i++ { 2314 out, _ := h.State.AllocByID(ws, allocs[i].ID) 2315 out.ClientStatus = structs.AllocClientStatusRunning 2316 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) 2317 } 2318 2319 // Create a mock evaluation which won't trigger any new placements 2320 eval := &structs.Evaluation{ 2321 Namespace: structs.DefaultNamespace, 2322 ID: uuid.Generate(), 2323 Priority: 50, 2324 TriggeredBy: structs.EvalTriggerNodeUpdate, 2325 JobID: job.ID, 2326 NodeID: node.ID, 2327 Status: structs.EvalStatusPending, 2328 } 2329 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2330 2331 // Process the evaluation 2332 err := h.Process(NewServiceScheduler, eval) 2333 if err != nil { 2334 t.Fatalf("err: %v", err) 2335 } 2336 if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { 2337 t.Fatalf("bad queued allocations: %v", h.Evals[0].QueuedAllocations) 2338 } 2339 2340 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2341 } 2342 2343 func TestServiceSched_NodeDrain(t *testing.T) { 2344 h := NewHarness(t) 2345 2346 // Register a draining node 2347 node := mock.Node() 2348 node.Drain = true 2349 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2350 2351 // Create some nodes 2352 for i := 0; i < 10; i++ { 2353 node := mock.Node() 2354 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2355 } 2356 2357 // Generate a fake job with allocations and an update policy. 2358 job := mock.Job() 2359 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2360 2361 var allocs []*structs.Allocation 2362 for i := 0; i < 10; i++ { 2363 alloc := mock.Alloc() 2364 alloc.Job = job 2365 alloc.JobID = job.ID 2366 alloc.NodeID = node.ID 2367 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2368 allocs = append(allocs, alloc) 2369 } 2370 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2371 2372 // Create a mock evaluation to deal with drain 2373 eval := &structs.Evaluation{ 2374 Namespace: structs.DefaultNamespace, 2375 ID: uuid.Generate(), 2376 Priority: 50, 2377 TriggeredBy: structs.EvalTriggerNodeUpdate, 2378 JobID: job.ID, 2379 NodeID: node.ID, 2380 Status: structs.EvalStatusPending, 2381 } 2382 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2383 2384 // Process the evaluation 2385 err := h.Process(NewServiceScheduler, eval) 2386 if err != nil { 2387 t.Fatalf("err: %v", err) 2388 } 2389 2390 // Ensure a single plan 2391 if len(h.Plans) != 1 { 2392 t.Fatalf("bad: %#v", h.Plans) 2393 } 2394 plan := h.Plans[0] 2395 2396 // Ensure the plan evicted all allocs 2397 if len(plan.NodeUpdate[node.ID]) != len(allocs) { 2398 t.Fatalf("bad: %#v", plan) 2399 } 2400 2401 // Ensure the plan allocated 2402 var planned []*structs.Allocation 2403 for _, allocList := range plan.NodeAllocation { 2404 planned = append(planned, allocList...) 2405 } 2406 if len(planned) != 10 { 2407 t.Fatalf("bad: %#v", plan) 2408 } 2409 2410 // Lookup the allocations by JobID 2411 ws := memdb.NewWatchSet() 2412 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2413 noErr(t, err) 2414 2415 // Ensure all allocations placed 2416 out, _ = structs.FilterTerminalAllocs(out) 2417 if len(out) != 10 { 2418 t.Fatalf("bad: %#v", out) 2419 } 2420 2421 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2422 } 2423 2424 func TestServiceSched_NodeDrain_Down(t *testing.T) { 2425 h := NewHarness(t) 2426 2427 // Register a draining node 2428 node := mock.Node() 2429 node.Drain = true 2430 node.Status = structs.NodeStatusDown 2431 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2432 2433 // Generate a fake job with allocations 2434 job := mock.Job() 2435 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2436 2437 var allocs []*structs.Allocation 2438 for i := 0; i < 10; i++ { 2439 alloc := mock.Alloc() 2440 alloc.Job = job 2441 alloc.JobID = job.ID 2442 alloc.NodeID = node.ID 2443 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2444 allocs = append(allocs, alloc) 2445 } 2446 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2447 2448 // Set the desired state of the allocs to stop 2449 var stop []*structs.Allocation 2450 for i := 0; i < 10; i++ { 2451 newAlloc := allocs[i].Copy() 2452 newAlloc.ClientStatus = structs.AllocDesiredStatusStop 2453 stop = append(stop, newAlloc) 2454 } 2455 noErr(t, h.State.UpsertAllocs(h.NextIndex(), stop)) 2456 2457 // Mark some of the allocations as running 2458 var running []*structs.Allocation 2459 for i := 4; i < 6; i++ { 2460 newAlloc := stop[i].Copy() 2461 newAlloc.ClientStatus = structs.AllocClientStatusRunning 2462 running = append(running, newAlloc) 2463 } 2464 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running)) 2465 2466 // Mark some of the allocations as complete 2467 var complete []*structs.Allocation 2468 for i := 6; i < 10; i++ { 2469 newAlloc := stop[i].Copy() 2470 newAlloc.TaskStates = make(map[string]*structs.TaskState) 2471 newAlloc.TaskStates["web"] = &structs.TaskState{ 2472 State: structs.TaskStateDead, 2473 Events: []*structs.TaskEvent{ 2474 { 2475 Type: structs.TaskTerminated, 2476 ExitCode: 0, 2477 }, 2478 }, 2479 } 2480 newAlloc.ClientStatus = structs.AllocClientStatusComplete 2481 complete = append(complete, newAlloc) 2482 } 2483 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete)) 2484 2485 // Create a mock evaluation to deal with the node update 2486 eval := &structs.Evaluation{ 2487 Namespace: structs.DefaultNamespace, 2488 ID: uuid.Generate(), 2489 Priority: 50, 2490 TriggeredBy: structs.EvalTriggerNodeUpdate, 2491 JobID: job.ID, 2492 NodeID: node.ID, 2493 Status: structs.EvalStatusPending, 2494 } 2495 2496 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2497 2498 // Process the evaluation 2499 err := h.Process(NewServiceScheduler, eval) 2500 if err != nil { 2501 t.Fatalf("err: %v", err) 2502 } 2503 2504 // Ensure a single plan 2505 if len(h.Plans) != 1 { 2506 t.Fatalf("bad: %#v", h.Plans) 2507 } 2508 plan := h.Plans[0] 2509 2510 // Ensure the plan evicted non terminal allocs 2511 if len(plan.NodeUpdate[node.ID]) != 6 { 2512 t.Fatalf("bad: %#v", plan) 2513 } 2514 2515 // Ensure that all the allocations which were in running or pending state 2516 // has been marked as lost 2517 var lostAllocs []string 2518 for _, alloc := range plan.NodeUpdate[node.ID] { 2519 lostAllocs = append(lostAllocs, alloc.ID) 2520 } 2521 sort.Strings(lostAllocs) 2522 2523 var expectedLostAllocs []string 2524 for i := 0; i < 6; i++ { 2525 expectedLostAllocs = append(expectedLostAllocs, allocs[i].ID) 2526 } 2527 sort.Strings(expectedLostAllocs) 2528 2529 if !reflect.DeepEqual(expectedLostAllocs, lostAllocs) { 2530 t.Fatalf("expected: %v, actual: %v", expectedLostAllocs, lostAllocs) 2531 } 2532 2533 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2534 } 2535 2536 func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { 2537 h := NewHarness(t) 2538 2539 // Register a draining node 2540 node := mock.Node() 2541 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2542 2543 // Generate a fake job with allocations and an update policy. 2544 job := mock.Job() 2545 job.TaskGroups[0].Count = 2 2546 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2547 2548 var allocs []*structs.Allocation 2549 for i := 0; i < 2; i++ { 2550 alloc := mock.Alloc() 2551 alloc.Job = job 2552 alloc.JobID = job.ID 2553 alloc.NodeID = node.ID 2554 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2555 allocs = append(allocs, alloc) 2556 } 2557 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2558 2559 node.Drain = true 2560 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2561 2562 // Create a mock evaluation to deal with drain 2563 eval := &structs.Evaluation{ 2564 Namespace: structs.DefaultNamespace, 2565 ID: uuid.Generate(), 2566 Priority: 50, 2567 TriggeredBy: structs.EvalTriggerNodeUpdate, 2568 JobID: job.ID, 2569 NodeID: node.ID, 2570 Status: structs.EvalStatusPending, 2571 } 2572 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2573 2574 // Process the evaluation 2575 err := h.Process(NewServiceScheduler, eval) 2576 if err != nil { 2577 t.Fatalf("err: %v", err) 2578 } 2579 2580 queued := h.Evals[0].QueuedAllocations["web"] 2581 if queued != 2 { 2582 t.Fatalf("expected: %v, actual: %v", 2, queued) 2583 } 2584 } 2585 2586 func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) { 2587 h := NewHarness(t) 2588 2589 // Register a draining node 2590 node := mock.Node() 2591 node.Drain = true 2592 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2593 2594 // Create some nodes 2595 for i := 0; i < 10; i++ { 2596 node := mock.Node() 2597 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2598 } 2599 2600 // Generate a fake job with allocations and an update policy. 2601 job := mock.Job() 2602 mp := 5 2603 u := structs.DefaultUpdateStrategy.Copy() 2604 u.MaxParallel = mp 2605 u.Stagger = time.Second 2606 job.TaskGroups[0].Update = u 2607 2608 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2609 2610 var allocs []*structs.Allocation 2611 for i := 0; i < 10; i++ { 2612 alloc := mock.Alloc() 2613 alloc.Job = job 2614 alloc.JobID = job.ID 2615 alloc.NodeID = node.ID 2616 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2617 allocs = append(allocs, alloc) 2618 } 2619 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2620 2621 // Create a mock evaluation to deal with drain 2622 eval := &structs.Evaluation{ 2623 Namespace: structs.DefaultNamespace, 2624 ID: uuid.Generate(), 2625 Priority: 50, 2626 TriggeredBy: structs.EvalTriggerNodeUpdate, 2627 JobID: job.ID, 2628 NodeID: node.ID, 2629 Status: structs.EvalStatusPending, 2630 } 2631 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2632 2633 // Process the evaluation 2634 err := h.Process(NewServiceScheduler, eval) 2635 if err != nil { 2636 t.Fatalf("err: %v", err) 2637 } 2638 2639 // Ensure a single plan 2640 if len(h.Plans) != 1 { 2641 t.Fatalf("bad: %#v", h.Plans) 2642 } 2643 plan := h.Plans[0] 2644 2645 // Ensure the plan evicted all allocs 2646 if len(plan.NodeUpdate[node.ID]) != mp { 2647 t.Fatalf("bad: %#v", plan) 2648 } 2649 2650 // Ensure the plan allocated 2651 var planned []*structs.Allocation 2652 for _, allocList := range plan.NodeAllocation { 2653 planned = append(planned, allocList...) 2654 } 2655 if len(planned) != mp { 2656 t.Fatalf("bad: %#v", plan) 2657 } 2658 2659 // Ensure there is a followup eval. 2660 if len(h.CreateEvals) != 1 || 2661 h.CreateEvals[0].TriggeredBy != structs.EvalTriggerRollingUpdate { 2662 t.Fatalf("bad: %#v", h.CreateEvals) 2663 } 2664 2665 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2666 } 2667 2668 func TestServiceSched_RetryLimit(t *testing.T) { 2669 h := NewHarness(t) 2670 h.Planner = &RejectPlan{h} 2671 2672 // Create some nodes 2673 for i := 0; i < 10; i++ { 2674 node := mock.Node() 2675 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2676 } 2677 2678 // Create a job 2679 job := mock.Job() 2680 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2681 2682 // Create a mock evaluation to register the job 2683 eval := &structs.Evaluation{ 2684 Namespace: structs.DefaultNamespace, 2685 ID: uuid.Generate(), 2686 Priority: job.Priority, 2687 TriggeredBy: structs.EvalTriggerJobRegister, 2688 JobID: job.ID, 2689 Status: structs.EvalStatusPending, 2690 } 2691 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2692 2693 // Process the evaluation 2694 err := h.Process(NewServiceScheduler, eval) 2695 if err != nil { 2696 t.Fatalf("err: %v", err) 2697 } 2698 2699 // Ensure multiple plans 2700 if len(h.Plans) == 0 { 2701 t.Fatalf("bad: %#v", h.Plans) 2702 } 2703 2704 // Lookup the allocations by JobID 2705 ws := memdb.NewWatchSet() 2706 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2707 noErr(t, err) 2708 2709 // Ensure no allocations placed 2710 if len(out) != 0 { 2711 t.Fatalf("bad: %#v", out) 2712 } 2713 2714 // Should hit the retry limit 2715 h.AssertEvalStatus(t, structs.EvalStatusFailed) 2716 } 2717 2718 func TestServiceSched_Reschedule_Once(t *testing.T) { 2719 h := NewHarness(t) 2720 2721 // Create some nodes 2722 var nodes []*structs.Node 2723 for i := 0; i < 10; i++ { 2724 node := mock.Node() 2725 nodes = append(nodes, node) 2726 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2727 } 2728 2729 // Generate a fake job with allocations and an update policy. 2730 job := mock.Job() 2731 job.TaskGroups[0].Count = 2 2732 job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ 2733 Attempts: 1, 2734 Interval: 15 * time.Minute, 2735 } 2736 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2737 2738 var allocs []*structs.Allocation 2739 for i := 0; i < 2; i++ { 2740 alloc := mock.Alloc() 2741 alloc.Job = job 2742 alloc.JobID = job.ID 2743 alloc.NodeID = nodes[i].ID 2744 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2745 allocs = append(allocs, alloc) 2746 } 2747 // Mark one of the allocations as failed 2748 allocs[1].ClientStatus = structs.AllocClientStatusFailed 2749 failedAllocID := allocs[1].ID 2750 successAllocID := allocs[0].ID 2751 2752 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2753 2754 // Create a mock evaluation 2755 eval := &structs.Evaluation{ 2756 Namespace: structs.DefaultNamespace, 2757 ID: uuid.Generate(), 2758 Priority: 50, 2759 TriggeredBy: structs.EvalTriggerNodeUpdate, 2760 JobID: job.ID, 2761 Status: structs.EvalStatusPending, 2762 } 2763 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2764 2765 // Process the evaluation 2766 err := h.Process(NewServiceScheduler, eval) 2767 if err != nil { 2768 t.Fatalf("err: %v", err) 2769 } 2770 2771 // Ensure multiple plans 2772 if len(h.Plans) == 0 { 2773 t.Fatalf("bad: %#v", h.Plans) 2774 } 2775 2776 // Lookup the allocations by JobID 2777 ws := memdb.NewWatchSet() 2778 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2779 noErr(t, err) 2780 2781 // Verify that one new allocation got created with its restart tracker info 2782 assert := assert.New(t) 2783 assert.Equal(3, len(out)) 2784 var newAlloc *structs.Allocation 2785 for _, alloc := range out { 2786 if alloc.ID != successAllocID && alloc.ID != failedAllocID { 2787 newAlloc = alloc 2788 } 2789 } 2790 assert.Equal(failedAllocID, newAlloc.PreviousAllocation) 2791 assert.Equal(1, len(newAlloc.RescheduleTracker.Events)) 2792 assert.Equal(failedAllocID, newAlloc.RescheduleTracker.Events[0].PrevAllocID) 2793 2794 // Mark this alloc as failed again, should not get rescheduled 2795 newAlloc.ClientStatus = structs.AllocClientStatusFailed 2796 2797 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc})) 2798 2799 // Create another mock evaluation 2800 eval = &structs.Evaluation{ 2801 Namespace: structs.DefaultNamespace, 2802 ID: uuid.Generate(), 2803 Priority: 50, 2804 TriggeredBy: structs.EvalTriggerNodeUpdate, 2805 JobID: job.ID, 2806 Status: structs.EvalStatusPending, 2807 } 2808 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2809 2810 // Process the evaluation 2811 err = h.Process(NewServiceScheduler, eval) 2812 assert.Nil(err) 2813 // Verify no new allocs were created this time 2814 out, err = h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2815 noErr(t, err) 2816 assert.Equal(3, len(out)) 2817 2818 } 2819 2820 func TestServiceSched_Reschedule_Multiple(t *testing.T) { 2821 h := NewHarness(t) 2822 2823 // Create some nodes 2824 var nodes []*structs.Node 2825 for i := 0; i < 10; i++ { 2826 node := mock.Node() 2827 nodes = append(nodes, node) 2828 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2829 } 2830 2831 maxRestartAttempts := 3 2832 // Generate a fake job with allocations and an update policy. 2833 job := mock.Job() 2834 job.TaskGroups[0].Count = 2 2835 job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ 2836 Attempts: maxRestartAttempts, 2837 Interval: 30 * time.Minute, 2838 } 2839 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2840 2841 var allocs []*structs.Allocation 2842 for i := 0; i < 2; i++ { 2843 alloc := mock.Alloc() 2844 alloc.ClientStatus = structs.AllocClientStatusRunning 2845 alloc.Job = job 2846 alloc.JobID = job.ID 2847 alloc.NodeID = nodes[i].ID 2848 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2849 allocs = append(allocs, alloc) 2850 } 2851 // Mark one of the allocations as failed 2852 allocs[1].ClientStatus = structs.AllocClientStatusFailed 2853 2854 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2855 2856 // Create a mock evaluation 2857 eval := &structs.Evaluation{ 2858 Namespace: structs.DefaultNamespace, 2859 ID: uuid.Generate(), 2860 Priority: 50, 2861 TriggeredBy: structs.EvalTriggerNodeUpdate, 2862 JobID: job.ID, 2863 Status: structs.EvalStatusPending, 2864 } 2865 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2866 2867 expectedNumAllocs := 3 2868 expectedNumReschedTrackers := 1 2869 2870 failedAllocId := allocs[1].ID 2871 failedNodeID := allocs[1].NodeID 2872 2873 assert := assert.New(t) 2874 for i := 0; i < maxRestartAttempts; i++ { 2875 // Process the evaluation 2876 err := h.Process(NewServiceScheduler, eval) 2877 noErr(t, err) 2878 2879 // Ensure multiple plans 2880 if len(h.Plans) == 0 { 2881 t.Fatalf("bad: %#v", h.Plans) 2882 } 2883 2884 // Lookup the allocations by JobID 2885 ws := memdb.NewWatchSet() 2886 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2887 noErr(t, err) 2888 2889 // Verify that a new allocation got created with its restart tracker info 2890 assert.Equal(expectedNumAllocs, len(out)) 2891 2892 // Find the new alloc with ClientStatusPending 2893 var pendingAllocs []*structs.Allocation 2894 var prevFailedAlloc *structs.Allocation 2895 2896 for _, alloc := range out { 2897 if alloc.ClientStatus == structs.AllocClientStatusPending { 2898 pendingAllocs = append(pendingAllocs, alloc) 2899 } 2900 if alloc.ID == failedAllocId { 2901 prevFailedAlloc = alloc 2902 } 2903 } 2904 assert.Equal(1, len(pendingAllocs)) 2905 newAlloc := pendingAllocs[0] 2906 assert.Equal(expectedNumReschedTrackers, len(newAlloc.RescheduleTracker.Events)) 2907 2908 // Verify the previous NodeID in the most recent reschedule event 2909 reschedEvents := newAlloc.RescheduleTracker.Events 2910 assert.Equal(failedAllocId, reschedEvents[len(reschedEvents)-1].PrevAllocID) 2911 assert.Equal(failedNodeID, reschedEvents[len(reschedEvents)-1].PrevNodeID) 2912 2913 // Verify that the next alloc of the failed alloc is the newly rescheduled alloc 2914 assert.Equal(newAlloc.ID, prevFailedAlloc.NextAllocation) 2915 2916 // Mark this alloc as failed again 2917 newAlloc.ClientStatus = structs.AllocClientStatusFailed 2918 2919 failedAllocId = newAlloc.ID 2920 failedNodeID = newAlloc.NodeID 2921 2922 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{newAlloc})) 2923 2924 // Create another mock evaluation 2925 eval = &structs.Evaluation{ 2926 Namespace: structs.DefaultNamespace, 2927 ID: uuid.Generate(), 2928 Priority: 50, 2929 TriggeredBy: structs.EvalTriggerNodeUpdate, 2930 JobID: job.ID, 2931 Status: structs.EvalStatusPending, 2932 } 2933 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2934 expectedNumAllocs += 1 2935 expectedNumReschedTrackers += 1 2936 } 2937 2938 // Process last eval again, should not reschedule 2939 err := h.Process(NewServiceScheduler, eval) 2940 assert.Nil(err) 2941 2942 // Verify no new allocs were created because restart attempts were exhausted 2943 ws := memdb.NewWatchSet() 2944 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2945 noErr(t, err) 2946 assert.Equal(5, len(out)) // 2 original, plus 3 reschedule attempts 2947 } 2948 2949 // Tests that deployments with failed allocs don't result in placements 2950 func TestDeployment_FailedAllocs_NoReschedule(t *testing.T) { 2951 h := NewHarness(t) 2952 require := require.New(t) 2953 // Create some nodes 2954 var nodes []*structs.Node 2955 for i := 0; i < 10; i++ { 2956 node := mock.Node() 2957 nodes = append(nodes, node) 2958 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2959 } 2960 2961 // Generate a fake job with allocations and a reschedule policy. 2962 job := mock.Job() 2963 job.TaskGroups[0].Count = 2 2964 job.TaskGroups[0].ReschedulePolicy = &structs.ReschedulePolicy{ 2965 Attempts: 1, 2966 Interval: 15 * time.Minute, 2967 } 2968 jobIndex := h.NextIndex() 2969 require.Nil(h.State.UpsertJob(jobIndex, job)) 2970 2971 deployment := mock.Deployment() 2972 deployment.JobID = job.ID 2973 deployment.JobCreateIndex = jobIndex 2974 deployment.JobVersion = job.Version 2975 2976 require.Nil(h.State.UpsertDeployment(h.NextIndex(), deployment)) 2977 2978 var allocs []*structs.Allocation 2979 for i := 0; i < 2; i++ { 2980 alloc := mock.Alloc() 2981 alloc.Job = job 2982 alloc.JobID = job.ID 2983 alloc.NodeID = nodes[i].ID 2984 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2985 alloc.DeploymentID = deployment.ID 2986 allocs = append(allocs, alloc) 2987 } 2988 // Mark one of the allocations as failed 2989 allocs[1].ClientStatus = structs.AllocClientStatusFailed 2990 2991 require.Nil(h.State.UpsertAllocs(h.NextIndex(), allocs)) 2992 2993 // Create a mock evaluation 2994 eval := &structs.Evaluation{ 2995 Namespace: structs.DefaultNamespace, 2996 ID: uuid.Generate(), 2997 Priority: 50, 2998 TriggeredBy: structs.EvalTriggerNodeUpdate, 2999 JobID: job.ID, 3000 Status: structs.EvalStatusPending, 3001 } 3002 require.Nil(h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3003 3004 // Process the evaluation 3005 require.Nil(h.Process(NewServiceScheduler, eval)) 3006 3007 // Verify no plan created 3008 require.Equal(0, len(h.Plans)) 3009 3010 } 3011 3012 func TestBatchSched_Run_CompleteAlloc(t *testing.T) { 3013 h := NewHarness(t) 3014 3015 // Create a node 3016 node := mock.Node() 3017 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3018 3019 // Create a job 3020 job := mock.Job() 3021 job.Type = structs.JobTypeBatch 3022 job.TaskGroups[0].Count = 1 3023 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3024 3025 // Create a complete alloc 3026 alloc := mock.Alloc() 3027 alloc.Job = job 3028 alloc.JobID = job.ID 3029 alloc.NodeID = node.ID 3030 alloc.Name = "my-job.web[0]" 3031 alloc.ClientStatus = structs.AllocClientStatusComplete 3032 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3033 3034 // Create a mock evaluation to register the job 3035 eval := &structs.Evaluation{ 3036 Namespace: structs.DefaultNamespace, 3037 ID: uuid.Generate(), 3038 Priority: job.Priority, 3039 TriggeredBy: structs.EvalTriggerJobRegister, 3040 JobID: job.ID, 3041 Status: structs.EvalStatusPending, 3042 } 3043 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3044 3045 // Process the evaluation 3046 err := h.Process(NewBatchScheduler, eval) 3047 if err != nil { 3048 t.Fatalf("err: %v", err) 3049 } 3050 3051 // Ensure no plan as it should be a no-op 3052 if len(h.Plans) != 0 { 3053 t.Fatalf("bad: %#v", h.Plans) 3054 } 3055 3056 // Lookup the allocations by JobID 3057 ws := memdb.NewWatchSet() 3058 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 3059 noErr(t, err) 3060 3061 // Ensure no allocations placed 3062 if len(out) != 1 { 3063 t.Fatalf("bad: %#v", out) 3064 } 3065 3066 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3067 } 3068 3069 func TestBatchSched_Run_FailedAlloc(t *testing.T) { 3070 h := NewHarness(t) 3071 3072 // Create a node 3073 node := mock.Node() 3074 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3075 3076 // Create a job 3077 job := mock.Job() 3078 job.Type = structs.JobTypeBatch 3079 job.TaskGroups[0].Count = 1 3080 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3081 3082 // Create a failed alloc 3083 alloc := mock.Alloc() 3084 alloc.Job = job 3085 alloc.JobID = job.ID 3086 alloc.NodeID = node.ID 3087 alloc.Name = "my-job.web[0]" 3088 alloc.ClientStatus = structs.AllocClientStatusFailed 3089 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3090 3091 // Create a mock evaluation to register the job 3092 eval := &structs.Evaluation{ 3093 Namespace: structs.DefaultNamespace, 3094 ID: uuid.Generate(), 3095 Priority: job.Priority, 3096 TriggeredBy: structs.EvalTriggerJobRegister, 3097 JobID: job.ID, 3098 Status: structs.EvalStatusPending, 3099 } 3100 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3101 3102 // Process the evaluation 3103 err := h.Process(NewBatchScheduler, eval) 3104 if err != nil { 3105 t.Fatalf("err: %v", err) 3106 } 3107 3108 // Ensure a plan 3109 if len(h.Plans) != 1 { 3110 t.Fatalf("bad: %#v", h.Plans) 3111 } 3112 3113 // Lookup the allocations by JobID 3114 ws := memdb.NewWatchSet() 3115 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 3116 noErr(t, err) 3117 3118 // Ensure a replacement alloc was placed. 3119 if len(out) != 2 { 3120 t.Fatalf("bad: %#v", out) 3121 } 3122 3123 // Ensure that the scheduler is recording the correct number of queued 3124 // allocations 3125 queued := h.Evals[0].QueuedAllocations["web"] 3126 if queued != 0 { 3127 t.Fatalf("expected: %v, actual: %v", 1, queued) 3128 } 3129 3130 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3131 } 3132 3133 func TestBatchSched_Run_LostAlloc(t *testing.T) { 3134 h := NewHarness(t) 3135 3136 // Create a node 3137 node := mock.Node() 3138 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3139 3140 // Create a job 3141 job := mock.Job() 3142 job.ID = "my-job" 3143 job.Type = structs.JobTypeBatch 3144 job.TaskGroups[0].Count = 3 3145 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3146 3147 // Desired = 3 3148 // Mark one as lost and then schedule 3149 // [(0, run, running), (1, run, running), (1, stop, lost)] 3150 3151 // Create two running allocations 3152 var allocs []*structs.Allocation 3153 for i := 0; i <= 1; i++ { 3154 alloc := mock.Alloc() 3155 alloc.Job = job 3156 alloc.JobID = job.ID 3157 alloc.NodeID = node.ID 3158 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 3159 alloc.ClientStatus = structs.AllocClientStatusRunning 3160 allocs = append(allocs, alloc) 3161 } 3162 3163 // Create a failed alloc 3164 alloc := mock.Alloc() 3165 alloc.Job = job 3166 alloc.JobID = job.ID 3167 alloc.NodeID = node.ID 3168 alloc.Name = "my-job.web[1]" 3169 alloc.DesiredStatus = structs.AllocDesiredStatusStop 3170 alloc.ClientStatus = structs.AllocClientStatusComplete 3171 allocs = append(allocs, alloc) 3172 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3173 3174 // Create a mock evaluation to register the job 3175 eval := &structs.Evaluation{ 3176 Namespace: structs.DefaultNamespace, 3177 ID: uuid.Generate(), 3178 Priority: job.Priority, 3179 TriggeredBy: structs.EvalTriggerJobRegister, 3180 JobID: job.ID, 3181 Status: structs.EvalStatusPending, 3182 } 3183 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3184 3185 // Process the evaluation 3186 err := h.Process(NewBatchScheduler, eval) 3187 if err != nil { 3188 t.Fatalf("err: %v", err) 3189 } 3190 3191 // Ensure a plan 3192 if len(h.Plans) != 1 { 3193 t.Fatalf("bad: %#v", h.Plans) 3194 } 3195 3196 // Lookup the allocations by JobID 3197 ws := memdb.NewWatchSet() 3198 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 3199 noErr(t, err) 3200 3201 // Ensure a replacement alloc was placed. 3202 if len(out) != 4 { 3203 t.Fatalf("bad: %#v", out) 3204 } 3205 3206 // Assert that we have the correct number of each alloc name 3207 expected := map[string]int{ 3208 "my-job.web[0]": 1, 3209 "my-job.web[1]": 2, 3210 "my-job.web[2]": 1, 3211 } 3212 actual := make(map[string]int, 3) 3213 for _, alloc := range out { 3214 actual[alloc.Name] += 1 3215 } 3216 require.Equal(t, actual, expected) 3217 3218 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3219 } 3220 3221 func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { 3222 h := NewHarness(t) 3223 3224 node := mock.Node() 3225 node.Drain = true 3226 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3227 3228 // Create a job 3229 job := mock.Job() 3230 job.Type = structs.JobTypeBatch 3231 job.TaskGroups[0].Count = 1 3232 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3233 3234 // Create a failed alloc 3235 alloc := mock.Alloc() 3236 alloc.Job = job 3237 alloc.JobID = job.ID 3238 alloc.NodeID = node.ID 3239 alloc.Name = "my-job.web[0]" 3240 alloc.ClientStatus = structs.AllocClientStatusFailed 3241 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3242 3243 // Create a mock evaluation to register the job 3244 eval := &structs.Evaluation{ 3245 Namespace: structs.DefaultNamespace, 3246 ID: uuid.Generate(), 3247 Priority: job.Priority, 3248 TriggeredBy: structs.EvalTriggerJobRegister, 3249 JobID: job.ID, 3250 Status: structs.EvalStatusPending, 3251 } 3252 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3253 3254 // Process the evaluation 3255 err := h.Process(NewBatchScheduler, eval) 3256 if err != nil { 3257 t.Fatalf("err: %v", err) 3258 } 3259 3260 // Ensure that the scheduler is recording the correct number of queued 3261 // allocations 3262 queued := h.Evals[0].QueuedAllocations["web"] 3263 if queued != 1 { 3264 t.Fatalf("expected: %v, actual: %v", 1, queued) 3265 } 3266 } 3267 3268 func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { 3269 h := NewHarness(t) 3270 3271 // Create two nodes, one that is drained and has a successfully finished 3272 // alloc and a fresh undrained one 3273 node := mock.Node() 3274 node.Drain = true 3275 node2 := mock.Node() 3276 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3277 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 3278 3279 // Create a job 3280 job := mock.Job() 3281 job.Type = structs.JobTypeBatch 3282 job.TaskGroups[0].Count = 1 3283 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3284 3285 // Create a successful alloc 3286 alloc := mock.Alloc() 3287 alloc.Job = job 3288 alloc.JobID = job.ID 3289 alloc.NodeID = node.ID 3290 alloc.Name = "my-job.web[0]" 3291 alloc.ClientStatus = structs.AllocClientStatusComplete 3292 alloc.TaskStates = map[string]*structs.TaskState{ 3293 "web": { 3294 State: structs.TaskStateDead, 3295 Events: []*structs.TaskEvent{ 3296 { 3297 Type: structs.TaskTerminated, 3298 ExitCode: 0, 3299 }, 3300 }, 3301 }, 3302 } 3303 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3304 3305 // Create a mock evaluation to rerun the job 3306 eval := &structs.Evaluation{ 3307 Namespace: structs.DefaultNamespace, 3308 ID: uuid.Generate(), 3309 Priority: job.Priority, 3310 TriggeredBy: structs.EvalTriggerJobRegister, 3311 JobID: job.ID, 3312 Status: structs.EvalStatusPending, 3313 } 3314 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3315 3316 // Process the evaluation 3317 err := h.Process(NewBatchScheduler, eval) 3318 if err != nil { 3319 t.Fatalf("err: %v", err) 3320 } 3321 3322 // Ensure no plan 3323 if len(h.Plans) != 0 { 3324 t.Fatalf("bad: %#v", h.Plans) 3325 } 3326 3327 // Lookup the allocations by JobID 3328 ws := memdb.NewWatchSet() 3329 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 3330 noErr(t, err) 3331 3332 // Ensure no replacement alloc was placed. 3333 if len(out) != 1 { 3334 t.Fatalf("bad: %#v", out) 3335 } 3336 3337 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3338 } 3339 3340 // This test checks that terminal allocations that receive an in-place updated 3341 // are not added to the plan 3342 func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { 3343 h := NewHarness(t) 3344 3345 // Create some nodes 3346 var nodes []*structs.Node 3347 for i := 0; i < 10; i++ { 3348 node := mock.Node() 3349 nodes = append(nodes, node) 3350 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3351 } 3352 3353 // Generate a fake job with allocations 3354 job := mock.Job() 3355 job.Type = structs.JobTypeBatch 3356 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3357 3358 var allocs []*structs.Allocation 3359 for i := 0; i < 10; i++ { 3360 alloc := mock.Alloc() 3361 alloc.Job = job 3362 alloc.JobID = job.ID 3363 alloc.NodeID = nodes[i].ID 3364 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 3365 alloc.ClientStatus = structs.AllocClientStatusComplete 3366 allocs = append(allocs, alloc) 3367 } 3368 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3369 3370 // Create a mock evaluation to trigger the job 3371 eval := &structs.Evaluation{ 3372 Namespace: structs.DefaultNamespace, 3373 ID: uuid.Generate(), 3374 Priority: 50, 3375 TriggeredBy: structs.EvalTriggerJobRegister, 3376 JobID: job.ID, 3377 Status: structs.EvalStatusPending, 3378 } 3379 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3380 3381 // Process the evaluation 3382 err := h.Process(NewBatchScheduler, eval) 3383 if err != nil { 3384 t.Fatalf("err: %v", err) 3385 } 3386 3387 // Ensure no plan 3388 if len(h.Plans) != 0 { 3389 t.Fatalf("bad: %#v", h.Plans[0]) 3390 } 3391 } 3392 3393 // This test ensures that terminal jobs from older versions are ignored. 3394 func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { 3395 h := NewHarness(t) 3396 3397 // Create some nodes 3398 var nodes []*structs.Node 3399 for i := 0; i < 10; i++ { 3400 node := mock.Node() 3401 nodes = append(nodes, node) 3402 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3403 } 3404 3405 // Generate a fake job with allocations 3406 job := mock.Job() 3407 job.Type = structs.JobTypeBatch 3408 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3409 3410 var allocs []*structs.Allocation 3411 for i := 0; i < 10; i++ { 3412 alloc := mock.Alloc() 3413 alloc.Job = job 3414 alloc.JobID = job.ID 3415 alloc.NodeID = nodes[i].ID 3416 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 3417 alloc.ClientStatus = structs.AllocClientStatusComplete 3418 allocs = append(allocs, alloc) 3419 } 3420 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3421 3422 // Update the job 3423 job2 := mock.Job() 3424 job2.ID = job.ID 3425 job2.Type = structs.JobTypeBatch 3426 job2.Version++ 3427 job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} 3428 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 3429 3430 allocs = nil 3431 for i := 0; i < 10; i++ { 3432 alloc := mock.Alloc() 3433 alloc.Job = job2 3434 alloc.JobID = job2.ID 3435 alloc.NodeID = nodes[i].ID 3436 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 3437 alloc.ClientStatus = structs.AllocClientStatusComplete 3438 alloc.TaskStates = map[string]*structs.TaskState{ 3439 "web": { 3440 State: structs.TaskStateDead, 3441 Events: []*structs.TaskEvent{ 3442 { 3443 Type: structs.TaskTerminated, 3444 ExitCode: 0, 3445 }, 3446 }, 3447 }, 3448 } 3449 allocs = append(allocs, alloc) 3450 } 3451 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3452 3453 // Create a mock evaluation to deal with drain 3454 eval := &structs.Evaluation{ 3455 Namespace: structs.DefaultNamespace, 3456 ID: uuid.Generate(), 3457 Priority: 50, 3458 TriggeredBy: structs.EvalTriggerJobRegister, 3459 JobID: job.ID, 3460 Status: structs.EvalStatusPending, 3461 } 3462 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3463 3464 // Process the evaluation 3465 err := h.Process(NewBatchScheduler, eval) 3466 if err != nil { 3467 t.Fatalf("err: %v", err) 3468 } 3469 3470 // Ensure a plan 3471 if len(h.Plans) != 0 { 3472 t.Fatalf("bad: %#v", h.Plans) 3473 } 3474 } 3475 3476 // This test asserts that an allocation from an old job that is running on a 3477 // drained node is cleaned up. 3478 func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { 3479 h := NewHarness(t) 3480 3481 // Create two nodes, one that is drained and has a successfully finished 3482 // alloc and a fresh undrained one 3483 node := mock.Node() 3484 node.Drain = true 3485 node2 := mock.Node() 3486 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3487 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 3488 3489 // Create a job 3490 job := mock.Job() 3491 job.Type = structs.JobTypeBatch 3492 job.TaskGroups[0].Count = 1 3493 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3494 3495 // Create a running alloc 3496 alloc := mock.Alloc() 3497 alloc.Job = job 3498 alloc.JobID = job.ID 3499 alloc.NodeID = node.ID 3500 alloc.Name = "my-job.web[0]" 3501 alloc.ClientStatus = structs.AllocClientStatusRunning 3502 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3503 3504 // Create an update job 3505 job2 := job.Copy() 3506 job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} 3507 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 3508 3509 // Create a mock evaluation to register the job 3510 eval := &structs.Evaluation{ 3511 Namespace: structs.DefaultNamespace, 3512 ID: uuid.Generate(), 3513 Priority: job.Priority, 3514 TriggeredBy: structs.EvalTriggerJobRegister, 3515 JobID: job.ID, 3516 Status: structs.EvalStatusPending, 3517 } 3518 3519 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3520 3521 // Process the evaluation 3522 err := h.Process(NewBatchScheduler, eval) 3523 if err != nil { 3524 t.Fatalf("err: %v", err) 3525 } 3526 3527 // Ensure a plan 3528 if len(h.Plans) != 1 { 3529 t.Fatalf("bad: %#v", h.Plans) 3530 } 3531 3532 plan := h.Plans[0] 3533 3534 // Ensure the plan evicted 1 3535 if len(plan.NodeUpdate[node.ID]) != 1 { 3536 t.Fatalf("bad: %#v", plan) 3537 } 3538 3539 // Ensure the plan places 1 3540 if len(plan.NodeAllocation[node2.ID]) != 1 { 3541 t.Fatalf("bad: %#v", plan) 3542 } 3543 3544 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3545 } 3546 3547 // This test asserts that an allocation from a job that is complete on a 3548 // drained node is ignored up. 3549 func TestBatchSched_NodeDrain_Complete(t *testing.T) { 3550 h := NewHarness(t) 3551 3552 // Create two nodes, one that is drained and has a successfully finished 3553 // alloc and a fresh undrained one 3554 node := mock.Node() 3555 node.Drain = true 3556 node2 := mock.Node() 3557 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3558 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 3559 3560 // Create a job 3561 job := mock.Job() 3562 job.Type = structs.JobTypeBatch 3563 job.TaskGroups[0].Count = 1 3564 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3565 3566 // Create a complete alloc 3567 alloc := mock.Alloc() 3568 alloc.Job = job 3569 alloc.JobID = job.ID 3570 alloc.NodeID = node.ID 3571 alloc.Name = "my-job.web[0]" 3572 alloc.ClientStatus = structs.AllocClientStatusComplete 3573 alloc.TaskStates = make(map[string]*structs.TaskState) 3574 alloc.TaskStates["web"] = &structs.TaskState{ 3575 State: structs.TaskStateDead, 3576 Events: []*structs.TaskEvent{ 3577 { 3578 Type: structs.TaskTerminated, 3579 ExitCode: 0, 3580 }, 3581 }, 3582 } 3583 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3584 3585 // Create a mock evaluation to register the job 3586 eval := &structs.Evaluation{ 3587 Namespace: structs.DefaultNamespace, 3588 ID: uuid.Generate(), 3589 Priority: job.Priority, 3590 TriggeredBy: structs.EvalTriggerJobRegister, 3591 JobID: job.ID, 3592 Status: structs.EvalStatusPending, 3593 } 3594 3595 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3596 3597 // Process the evaluation 3598 err := h.Process(NewBatchScheduler, eval) 3599 if err != nil { 3600 t.Fatalf("err: %v", err) 3601 } 3602 3603 // Ensure no plan 3604 if len(h.Plans) != 0 { 3605 t.Fatalf("bad: %#v", h.Plans) 3606 } 3607 3608 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3609 } 3610 3611 // This is a slightly odd test but it ensures that we handle a scale down of a 3612 // task group's count and that it works even if all the allocs have the same 3613 // name. 3614 func TestBatchSched_ScaleDown_SameName(t *testing.T) { 3615 h := NewHarness(t) 3616 3617 // Create a node 3618 node := mock.Node() 3619 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3620 3621 // Create a job 3622 job := mock.Job() 3623 job.Type = structs.JobTypeBatch 3624 job.TaskGroups[0].Count = 1 3625 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3626 3627 // Create a few running alloc 3628 var allocs []*structs.Allocation 3629 for i := 0; i < 5; i++ { 3630 alloc := mock.Alloc() 3631 alloc.Job = job 3632 alloc.JobID = job.ID 3633 alloc.NodeID = node.ID 3634 alloc.Name = "my-job.web[0]" 3635 alloc.ClientStatus = structs.AllocClientStatusRunning 3636 allocs = append(allocs, alloc) 3637 } 3638 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3639 3640 // Create a mock evaluation to register the job 3641 eval := &structs.Evaluation{ 3642 Namespace: structs.DefaultNamespace, 3643 ID: uuid.Generate(), 3644 Priority: job.Priority, 3645 TriggeredBy: structs.EvalTriggerJobRegister, 3646 JobID: job.ID, 3647 Status: structs.EvalStatusPending, 3648 } 3649 3650 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3651 3652 // Process the evaluation 3653 err := h.Process(NewBatchScheduler, eval) 3654 if err != nil { 3655 t.Fatalf("err: %v", err) 3656 } 3657 3658 // Ensure a plan 3659 if len(h.Plans) != 1 { 3660 t.Fatalf("bad: %#v", h.Plans) 3661 } 3662 3663 plan := h.Plans[0] 3664 3665 // Ensure the plan evicted 4 of the 5 3666 if len(plan.NodeUpdate[node.ID]) != 4 { 3667 t.Fatalf("bad: %#v", plan) 3668 } 3669 3670 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3671 } 3672 3673 func TestGenericSched_ChainedAlloc(t *testing.T) { 3674 h := NewHarness(t) 3675 3676 // Create some nodes 3677 for i := 0; i < 10; i++ { 3678 node := mock.Node() 3679 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3680 } 3681 3682 // Create a job 3683 job := mock.Job() 3684 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3685 3686 // Create a mock evaluation to register the job 3687 eval := &structs.Evaluation{ 3688 Namespace: structs.DefaultNamespace, 3689 ID: uuid.Generate(), 3690 Priority: job.Priority, 3691 TriggeredBy: structs.EvalTriggerJobRegister, 3692 JobID: job.ID, 3693 Status: structs.EvalStatusPending, 3694 } 3695 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3696 // Process the evaluation 3697 if err := h.Process(NewServiceScheduler, eval); err != nil { 3698 t.Fatalf("err: %v", err) 3699 } 3700 3701 var allocIDs []string 3702 for _, allocList := range h.Plans[0].NodeAllocation { 3703 for _, alloc := range allocList { 3704 allocIDs = append(allocIDs, alloc.ID) 3705 } 3706 } 3707 sort.Strings(allocIDs) 3708 3709 // Create a new harness to invoke the scheduler again 3710 h1 := NewHarnessWithState(t, h.State) 3711 job1 := mock.Job() 3712 job1.ID = job.ID 3713 job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" 3714 job1.TaskGroups[0].Count = 12 3715 noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) 3716 3717 // Create a mock evaluation to update the job 3718 eval1 := &structs.Evaluation{ 3719 Namespace: structs.DefaultNamespace, 3720 ID: uuid.Generate(), 3721 Priority: job1.Priority, 3722 TriggeredBy: structs.EvalTriggerJobRegister, 3723 JobID: job1.ID, 3724 Status: structs.EvalStatusPending, 3725 } 3726 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) 3727 3728 // Process the evaluation 3729 if err := h1.Process(NewServiceScheduler, eval1); err != nil { 3730 t.Fatalf("err: %v", err) 3731 } 3732 3733 plan := h1.Plans[0] 3734 3735 // Collect all the chained allocation ids and the new allocations which 3736 // don't have any chained allocations 3737 var prevAllocs []string 3738 var newAllocs []string 3739 for _, allocList := range plan.NodeAllocation { 3740 for _, alloc := range allocList { 3741 if alloc.PreviousAllocation == "" { 3742 newAllocs = append(newAllocs, alloc.ID) 3743 continue 3744 } 3745 prevAllocs = append(prevAllocs, alloc.PreviousAllocation) 3746 } 3747 } 3748 sort.Strings(prevAllocs) 3749 3750 // Ensure that the new allocations has their corresponding original 3751 // allocation ids 3752 if !reflect.DeepEqual(prevAllocs, allocIDs) { 3753 t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs)) 3754 } 3755 3756 // Ensuring two new allocations don't have any chained allocations 3757 if len(newAllocs) != 2 { 3758 t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs)) 3759 } 3760 } 3761 3762 func TestServiceSched_NodeDrain_Sticky(t *testing.T) { 3763 h := NewHarness(t) 3764 3765 // Register a draining node 3766 node := mock.Node() 3767 node.Drain = true 3768 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3769 3770 // Create an alloc on the draining node 3771 alloc := mock.Alloc() 3772 alloc.Name = "my-job.web[0]" 3773 alloc.DesiredStatus = structs.AllocDesiredStatusStop 3774 alloc.NodeID = node.ID 3775 alloc.Job.TaskGroups[0].Count = 1 3776 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true 3777 noErr(t, h.State.UpsertJob(h.NextIndex(), alloc.Job)) 3778 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3779 3780 // Create a mock evaluation to deal with drain 3781 eval := &structs.Evaluation{ 3782 Namespace: structs.DefaultNamespace, 3783 ID: uuid.Generate(), 3784 Priority: 50, 3785 TriggeredBy: structs.EvalTriggerNodeUpdate, 3786 JobID: alloc.Job.ID, 3787 NodeID: node.ID, 3788 Status: structs.EvalStatusPending, 3789 } 3790 3791 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3792 3793 // Process the evaluation 3794 err := h.Process(NewServiceScheduler, eval) 3795 if err != nil { 3796 t.Fatalf("err: %v", err) 3797 } 3798 3799 // Ensure a single plan 3800 if len(h.Plans) != 1 { 3801 t.Fatalf("bad: %#v", h.Plans) 3802 } 3803 plan := h.Plans[0] 3804 3805 // Ensure the plan evicted all allocs 3806 if len(plan.NodeUpdate[node.ID]) != 1 { 3807 t.Fatalf("bad: %#v", plan) 3808 } 3809 3810 // Ensure the plan didn't create any new allocations 3811 var planned []*structs.Allocation 3812 for _, allocList := range plan.NodeAllocation { 3813 planned = append(planned, allocList...) 3814 } 3815 if len(planned) != 0 { 3816 t.Fatalf("bad: %#v", plan) 3817 } 3818 3819 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3820 } 3821 3822 // This test ensures that when a job is stopped, the scheduler properly cancels 3823 // an outstanding deployment. 3824 func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { 3825 h := NewHarness(t) 3826 3827 // Generate a fake job 3828 job := mock.Job() 3829 job.JobModifyIndex = job.CreateIndex + 1 3830 job.ModifyIndex = job.CreateIndex + 1 3831 job.Stop = true 3832 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3833 3834 // Create a deployment 3835 d := mock.Deployment() 3836 d.JobID = job.ID 3837 d.JobCreateIndex = job.CreateIndex 3838 d.JobModifyIndex = job.JobModifyIndex - 1 3839 noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) 3840 3841 // Create a mock evaluation to deregister the job 3842 eval := &structs.Evaluation{ 3843 Namespace: structs.DefaultNamespace, 3844 ID: uuid.Generate(), 3845 Priority: 50, 3846 TriggeredBy: structs.EvalTriggerJobDeregister, 3847 JobID: job.ID, 3848 Status: structs.EvalStatusPending, 3849 } 3850 3851 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3852 3853 // Process the evaluation 3854 err := h.Process(NewServiceScheduler, eval) 3855 if err != nil { 3856 t.Fatalf("err: %v", err) 3857 } 3858 3859 // Ensure a single plan 3860 if len(h.Plans) != 1 { 3861 t.Fatalf("bad: %#v", h.Plans) 3862 } 3863 plan := h.Plans[0] 3864 3865 // Ensure the plan cancelled the existing deployment 3866 ws := memdb.NewWatchSet() 3867 out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) 3868 noErr(t, err) 3869 3870 if out == nil { 3871 t.Fatalf("No deployment for job") 3872 } 3873 if out.ID != d.ID { 3874 t.Fatalf("Latest deployment for job is different than original deployment") 3875 } 3876 if out.Status != structs.DeploymentStatusCancelled { 3877 t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled) 3878 } 3879 if out.StatusDescription != structs.DeploymentStatusDescriptionStoppedJob { 3880 t.Fatalf("Deployment status description is %q, want %q", 3881 out.StatusDescription, structs.DeploymentStatusDescriptionStoppedJob) 3882 } 3883 3884 // Ensure the plan didn't allocate anything 3885 var planned []*structs.Allocation 3886 for _, allocList := range plan.NodeAllocation { 3887 planned = append(planned, allocList...) 3888 } 3889 if len(planned) != 0 { 3890 t.Fatalf("bad: %#v", plan) 3891 } 3892 3893 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3894 } 3895 3896 // This test ensures that when a job is updated and had an old deployment, the scheduler properly cancels 3897 // the deployment. 3898 func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { 3899 h := NewHarness(t) 3900 3901 // Generate a fake job 3902 job := mock.Job() 3903 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3904 3905 // Create a deployment for an old version of the job 3906 d := mock.Deployment() 3907 d.JobID = job.ID 3908 noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) 3909 3910 // Upsert again to bump job version 3911 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3912 3913 // Create a mock evaluation to kick the job 3914 eval := &structs.Evaluation{ 3915 Namespace: structs.DefaultNamespace, 3916 ID: uuid.Generate(), 3917 Priority: 50, 3918 TriggeredBy: structs.EvalTriggerJobRegister, 3919 JobID: job.ID, 3920 Status: structs.EvalStatusPending, 3921 } 3922 3923 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3924 3925 // Process the evaluation 3926 err := h.Process(NewServiceScheduler, eval) 3927 if err != nil { 3928 t.Fatalf("err: %v", err) 3929 } 3930 3931 // Ensure a single plan 3932 if len(h.Plans) != 1 { 3933 t.Fatalf("bad: %#v", h.Plans) 3934 } 3935 plan := h.Plans[0] 3936 3937 // Ensure the plan cancelled the existing deployment 3938 ws := memdb.NewWatchSet() 3939 out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) 3940 noErr(t, err) 3941 3942 if out == nil { 3943 t.Fatalf("No deployment for job") 3944 } 3945 if out.ID != d.ID { 3946 t.Fatalf("Latest deployment for job is different than original deployment") 3947 } 3948 if out.Status != structs.DeploymentStatusCancelled { 3949 t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled) 3950 } 3951 if out.StatusDescription != structs.DeploymentStatusDescriptionNewerJob { 3952 t.Fatalf("Deployment status description is %q, want %q", 3953 out.StatusDescription, structs.DeploymentStatusDescriptionNewerJob) 3954 } 3955 // Ensure the plan didn't allocate anything 3956 var planned []*structs.Allocation 3957 for _, allocList := range plan.NodeAllocation { 3958 planned = append(planned, allocList...) 3959 } 3960 if len(planned) != 0 { 3961 t.Fatalf("bad: %#v", plan) 3962 } 3963 3964 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3965 }