github.com/blixtra/nomad@v0.7.2-0.20171221000451-da9a1d7bb050/scheduler/generic_sched_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "reflect" 6 "sort" 7 "testing" 8 "time" 9 10 memdb "github.com/hashicorp/go-memdb" 11 "github.com/hashicorp/nomad/helper" 12 "github.com/hashicorp/nomad/helper/uuid" 13 "github.com/hashicorp/nomad/nomad/mock" 14 "github.com/hashicorp/nomad/nomad/structs" 15 "github.com/stretchr/testify/assert" 16 ) 17 18 func TestServiceSched_JobRegister(t *testing.T) { 19 h := NewHarness(t) 20 21 // Create some nodes 22 for i := 0; i < 10; i++ { 23 node := mock.Node() 24 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 25 } 26 27 // Create a job 28 job := mock.Job() 29 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 30 31 // Create a mock evaluation to register the job 32 eval := &structs.Evaluation{ 33 Namespace: structs.DefaultNamespace, 34 ID: uuid.Generate(), 35 Priority: job.Priority, 36 TriggeredBy: structs.EvalTriggerJobRegister, 37 JobID: job.ID, 38 Status: structs.EvalStatusPending, 39 } 40 41 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 42 43 // Process the evaluation 44 err := h.Process(NewServiceScheduler, eval) 45 if err != nil { 46 t.Fatalf("err: %v", err) 47 } 48 49 // Ensure a single plan 50 if len(h.Plans) != 1 { 51 t.Fatalf("bad: %#v", h.Plans) 52 } 53 plan := h.Plans[0] 54 55 // Ensure the plan doesn't have annotations. 56 if plan.Annotations != nil { 57 t.Fatalf("expected no annotations") 58 } 59 60 // Ensure the eval has no spawned blocked eval 61 if len(h.CreateEvals) != 0 { 62 t.Fatalf("bad: %#v", h.CreateEvals) 63 if h.Evals[0].BlockedEval != "" { 64 t.Fatalf("bad: %#v", h.Evals[0]) 65 } 66 } 67 68 // Ensure the plan allocated 69 var planned []*structs.Allocation 70 for _, allocList := range plan.NodeAllocation { 71 planned = append(planned, allocList...) 72 } 73 if len(planned) != 10 { 74 t.Fatalf("bad: %#v", plan) 75 } 76 77 // Lookup the allocations by JobID 78 ws := memdb.NewWatchSet() 79 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 80 noErr(t, err) 81 82 // Ensure all allocations placed 83 if len(out) != 10 { 84 t.Fatalf("bad: %#v", out) 85 } 86 87 // Ensure different ports were used. 88 used := make(map[int]struct{}) 89 for _, alloc := range out { 90 for _, resource := range alloc.TaskResources { 91 for _, port := range resource.Networks[0].DynamicPorts { 92 if _, ok := used[port.Value]; ok { 93 t.Fatalf("Port collision %v", port.Value) 94 } 95 used[port.Value] = struct{}{} 96 } 97 } 98 } 99 100 h.AssertEvalStatus(t, structs.EvalStatusComplete) 101 } 102 103 func TestServiceSched_JobRegister_StickyAllocs(t *testing.T) { 104 h := NewHarness(t) 105 106 // Create some nodes 107 for i := 0; i < 10; i++ { 108 node := mock.Node() 109 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 110 } 111 112 // Create a job 113 job := mock.Job() 114 job.TaskGroups[0].EphemeralDisk.Sticky = true 115 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 116 117 // Create a mock evaluation to register the job 118 eval := &structs.Evaluation{ 119 Namespace: structs.DefaultNamespace, 120 ID: uuid.Generate(), 121 Priority: job.Priority, 122 TriggeredBy: structs.EvalTriggerJobRegister, 123 JobID: job.ID, 124 Status: structs.EvalStatusPending, 125 } 126 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 127 128 // Process the evaluation 129 if err := h.Process(NewServiceScheduler, eval); err != nil { 130 t.Fatalf("err: %v", err) 131 } 132 133 // Ensure the plan allocated 134 plan := h.Plans[0] 135 planned := make(map[string]*structs.Allocation) 136 for _, allocList := range plan.NodeAllocation { 137 for _, alloc := range allocList { 138 planned[alloc.ID] = alloc 139 } 140 } 141 if len(planned) != 10 { 142 t.Fatalf("bad: %#v", plan) 143 } 144 145 // Update the job to force a rolling upgrade 146 updated := job.Copy() 147 updated.TaskGroups[0].Tasks[0].Resources.CPU += 10 148 noErr(t, h.State.UpsertJob(h.NextIndex(), updated)) 149 150 // Create a mock evaluation to handle the update 151 eval = &structs.Evaluation{ 152 Namespace: structs.DefaultNamespace, 153 ID: uuid.Generate(), 154 Priority: job.Priority, 155 TriggeredBy: structs.EvalTriggerNodeUpdate, 156 JobID: job.ID, 157 Status: structs.EvalStatusPending, 158 } 159 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 160 h1 := NewHarnessWithState(t, h.State) 161 if err := h1.Process(NewServiceScheduler, eval); err != nil { 162 t.Fatalf("err: %v", err) 163 } 164 165 // Ensure we have created only one new allocation 166 // Ensure a single plan 167 if len(h1.Plans) != 1 { 168 t.Fatalf("bad: %#v", h1.Plans) 169 } 170 plan = h1.Plans[0] 171 var newPlanned []*structs.Allocation 172 for _, allocList := range plan.NodeAllocation { 173 newPlanned = append(newPlanned, allocList...) 174 } 175 if len(newPlanned) != 10 { 176 t.Fatalf("bad plan: %#v", plan) 177 } 178 // Ensure that the new allocations were placed on the same node as the older 179 // ones 180 for _, new := range newPlanned { 181 if new.PreviousAllocation == "" { 182 t.Fatalf("new alloc %q doesn't have a previous allocation", new.ID) 183 } 184 185 old, ok := planned[new.PreviousAllocation] 186 if !ok { 187 t.Fatalf("new alloc %q previous allocation doesn't match any prior placed alloc (%q)", new.ID, new.PreviousAllocation) 188 } 189 if new.NodeID != old.NodeID { 190 t.Fatalf("new alloc and old alloc node doesn't match; got %q; want %q", new.NodeID, old.NodeID) 191 } 192 } 193 } 194 195 func TestServiceSched_JobRegister_DiskConstraints(t *testing.T) { 196 h := NewHarness(t) 197 198 // Create a node 199 node := mock.Node() 200 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 201 202 // Create a job with count 2 and disk as 60GB so that only one allocation 203 // can fit 204 job := mock.Job() 205 job.TaskGroups[0].Count = 2 206 job.TaskGroups[0].EphemeralDisk.SizeMB = 88 * 1024 207 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 208 209 // Create a mock evaluation to register the job 210 eval := &structs.Evaluation{ 211 Namespace: structs.DefaultNamespace, 212 ID: uuid.Generate(), 213 Priority: job.Priority, 214 TriggeredBy: structs.EvalTriggerJobRegister, 215 JobID: job.ID, 216 Status: structs.EvalStatusPending, 217 } 218 219 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 220 221 // Process the evaluation 222 err := h.Process(NewServiceScheduler, eval) 223 if err != nil { 224 t.Fatalf("err: %v", err) 225 } 226 227 // Ensure a single plan 228 if len(h.Plans) != 1 { 229 t.Fatalf("bad: %#v", h.Plans) 230 } 231 plan := h.Plans[0] 232 233 // Ensure the plan doesn't have annotations. 234 if plan.Annotations != nil { 235 t.Fatalf("expected no annotations") 236 } 237 238 // Ensure the eval has a blocked eval 239 if len(h.CreateEvals) != 1 { 240 t.Fatalf("bad: %#v", h.CreateEvals) 241 } 242 243 // Ensure the plan allocated only one allocation 244 var planned []*structs.Allocation 245 for _, allocList := range plan.NodeAllocation { 246 planned = append(planned, allocList...) 247 } 248 if len(planned) != 1 { 249 t.Fatalf("bad: %#v", plan) 250 } 251 252 // Lookup the allocations by JobID 253 ws := memdb.NewWatchSet() 254 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 255 noErr(t, err) 256 257 // Ensure only one allocation was placed 258 if len(out) != 1 { 259 t.Fatalf("bad: %#v", out) 260 } 261 262 h.AssertEvalStatus(t, structs.EvalStatusComplete) 263 } 264 265 func TestServiceSched_JobRegister_DistinctHosts(t *testing.T) { 266 h := NewHarness(t) 267 268 // Create some nodes 269 for i := 0; i < 10; i++ { 270 node := mock.Node() 271 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 272 } 273 274 // Create a job that uses distinct host and has count 1 higher than what is 275 // possible. 276 job := mock.Job() 277 job.TaskGroups[0].Count = 11 278 job.Constraints = append(job.Constraints, &structs.Constraint{Operand: structs.ConstraintDistinctHosts}) 279 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 280 281 // Create a mock evaluation to register the job 282 eval := &structs.Evaluation{ 283 Namespace: structs.DefaultNamespace, 284 ID: uuid.Generate(), 285 Priority: job.Priority, 286 TriggeredBy: structs.EvalTriggerJobRegister, 287 JobID: job.ID, 288 Status: structs.EvalStatusPending, 289 } 290 291 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 292 293 // Process the evaluation 294 err := h.Process(NewServiceScheduler, eval) 295 if err != nil { 296 t.Fatalf("err: %v", err) 297 } 298 299 // Ensure a single plan 300 if len(h.Plans) != 1 { 301 t.Fatalf("bad: %#v", h.Plans) 302 } 303 plan := h.Plans[0] 304 305 // Ensure the eval has spawned blocked eval 306 if len(h.CreateEvals) != 1 { 307 t.Fatalf("bad: %#v", h.CreateEvals) 308 } 309 310 // Ensure the plan failed to alloc 311 outEval := h.Evals[0] 312 if len(outEval.FailedTGAllocs) != 1 { 313 t.Fatalf("bad: %+v", outEval) 314 } 315 316 // Ensure the plan allocated 317 var planned []*structs.Allocation 318 for _, allocList := range plan.NodeAllocation { 319 planned = append(planned, allocList...) 320 } 321 if len(planned) != 10 { 322 t.Fatalf("bad: %#v", plan) 323 } 324 325 // Lookup the allocations by JobID 326 ws := memdb.NewWatchSet() 327 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 328 noErr(t, err) 329 330 // Ensure all allocations placed 331 if len(out) != 10 { 332 t.Fatalf("bad: %#v", out) 333 } 334 335 // Ensure different node was used per. 336 used := make(map[string]struct{}) 337 for _, alloc := range out { 338 if _, ok := used[alloc.NodeID]; ok { 339 t.Fatalf("Node collision %v", alloc.NodeID) 340 } 341 used[alloc.NodeID] = struct{}{} 342 } 343 344 h.AssertEvalStatus(t, structs.EvalStatusComplete) 345 } 346 347 func TestServiceSched_JobRegister_DistinctProperty(t *testing.T) { 348 h := NewHarness(t) 349 350 // Create some nodes 351 for i := 0; i < 10; i++ { 352 node := mock.Node() 353 rack := "rack2" 354 if i < 5 { 355 rack = "rack1" 356 } 357 node.Meta["rack"] = rack 358 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 359 } 360 361 // Create a job that uses distinct property and has count higher than what is 362 // possible. 363 job := mock.Job() 364 job.TaskGroups[0].Count = 8 365 job.Constraints = append(job.Constraints, 366 &structs.Constraint{ 367 Operand: structs.ConstraintDistinctProperty, 368 LTarget: "${meta.rack}", 369 RTarget: "2", 370 }) 371 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 372 373 // Create a mock evaluation to register the job 374 eval := &structs.Evaluation{ 375 Namespace: structs.DefaultNamespace, 376 ID: uuid.Generate(), 377 Priority: job.Priority, 378 TriggeredBy: structs.EvalTriggerJobRegister, 379 JobID: job.ID, 380 Status: structs.EvalStatusPending, 381 } 382 383 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 384 385 // Process the evaluation 386 err := h.Process(NewServiceScheduler, eval) 387 if err != nil { 388 t.Fatalf("err: %v", err) 389 } 390 391 // Ensure a single plan 392 if len(h.Plans) != 1 { 393 t.Fatalf("bad: %#v", h.Plans) 394 } 395 plan := h.Plans[0] 396 397 // Ensure the plan doesn't have annotations. 398 if plan.Annotations != nil { 399 t.Fatalf("expected no annotations") 400 } 401 402 // Ensure the eval has spawned blocked eval 403 if len(h.CreateEvals) != 1 { 404 t.Fatalf("bad: %#v", h.CreateEvals) 405 } 406 407 // Ensure the plan failed to alloc 408 outEval := h.Evals[0] 409 if len(outEval.FailedTGAllocs) != 1 { 410 t.Fatalf("bad: %+v", outEval) 411 } 412 413 // Ensure the plan allocated 414 var planned []*structs.Allocation 415 for _, allocList := range plan.NodeAllocation { 416 planned = append(planned, allocList...) 417 } 418 if len(planned) != 4 { 419 t.Fatalf("bad: %#v", plan) 420 } 421 422 // Lookup the allocations by JobID 423 ws := memdb.NewWatchSet() 424 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 425 noErr(t, err) 426 427 // Ensure all allocations placed 428 if len(out) != 4 { 429 t.Fatalf("bad: %#v", out) 430 } 431 432 // Ensure each node was only used twice 433 used := make(map[string]uint64) 434 for _, alloc := range out { 435 if count, _ := used[alloc.NodeID]; count > 2 { 436 t.Fatalf("Node %v used too much: %d", alloc.NodeID, count) 437 } 438 used[alloc.NodeID]++ 439 } 440 441 h.AssertEvalStatus(t, structs.EvalStatusComplete) 442 } 443 444 func TestServiceSched_JobRegister_DistinctProperty_TaskGroup(t *testing.T) { 445 h := NewHarness(t) 446 447 // Create some nodes 448 for i := 0; i < 2; i++ { 449 node := mock.Node() 450 node.Meta["ssd"] = "true" 451 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 452 } 453 454 // Create a job that uses distinct property only on one task group. 455 job := mock.Job() 456 job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy()) 457 job.TaskGroups[0].Count = 1 458 job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints, 459 &structs.Constraint{ 460 Operand: structs.ConstraintDistinctProperty, 461 LTarget: "${meta.ssd}", 462 }) 463 464 job.TaskGroups[1].Name = "tg2" 465 job.TaskGroups[1].Count = 2 466 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 467 468 // Create a mock evaluation to register the job 469 eval := &structs.Evaluation{ 470 Namespace: structs.DefaultNamespace, 471 ID: uuid.Generate(), 472 Priority: job.Priority, 473 TriggeredBy: structs.EvalTriggerJobRegister, 474 JobID: job.ID, 475 Status: structs.EvalStatusPending, 476 } 477 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 478 479 // Process the evaluation 480 err := h.Process(NewServiceScheduler, eval) 481 if err != nil { 482 t.Fatalf("err: %v", err) 483 } 484 485 // Ensure a single plan 486 if len(h.Plans) != 1 { 487 t.Fatalf("bad: %#v", h.Plans) 488 } 489 plan := h.Plans[0] 490 491 // Ensure the plan doesn't have annotations. 492 if plan.Annotations != nil { 493 t.Fatalf("expected no annotations") 494 } 495 496 // Ensure the eval hasn't spawned blocked eval 497 if len(h.CreateEvals) != 0 { 498 t.Fatalf("bad: %#v", h.CreateEvals[0]) 499 } 500 501 // Ensure the plan allocated 502 var planned []*structs.Allocation 503 for _, allocList := range plan.NodeAllocation { 504 planned = append(planned, allocList...) 505 } 506 if len(planned) != 3 { 507 t.Fatalf("bad: %#v", plan) 508 } 509 510 // Lookup the allocations by JobID 511 ws := memdb.NewWatchSet() 512 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 513 noErr(t, err) 514 515 // Ensure all allocations placed 516 if len(out) != 3 { 517 t.Fatalf("bad: %#v", out) 518 } 519 520 h.AssertEvalStatus(t, structs.EvalStatusComplete) 521 } 522 523 func TestServiceSched_JobRegister_DistinctProperty_TaskGroup_Incr(t *testing.T) { 524 h := NewHarness(t) 525 assert := assert.New(t) 526 527 // Create a job that uses distinct property over the node-id 528 job := mock.Job() 529 job.TaskGroups[0].Count = 3 530 job.TaskGroups[0].Constraints = append(job.TaskGroups[0].Constraints, 531 &structs.Constraint{ 532 Operand: structs.ConstraintDistinctProperty, 533 LTarget: "${node.unique.id}", 534 }) 535 assert.Nil(h.State.UpsertJob(h.NextIndex(), job), "UpsertJob") 536 537 // Create some nodes 538 var nodes []*structs.Node 539 for i := 0; i < 6; i++ { 540 node := mock.Node() 541 nodes = append(nodes, node) 542 assert.Nil(h.State.UpsertNode(h.NextIndex(), node), "UpsertNode") 543 } 544 545 // Create some allocations 546 var allocs []*structs.Allocation 547 for i := 0; i < 3; i++ { 548 alloc := mock.Alloc() 549 alloc.Job = job 550 alloc.JobID = job.ID 551 alloc.NodeID = nodes[i].ID 552 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 553 allocs = append(allocs, alloc) 554 } 555 assert.Nil(h.State.UpsertAllocs(h.NextIndex(), allocs), "UpsertAllocs") 556 557 // Update the count 558 job2 := job.Copy() 559 job2.TaskGroups[0].Count = 6 560 assert.Nil(h.State.UpsertJob(h.NextIndex(), job2), "UpsertJob") 561 562 // Create a mock evaluation to register the job 563 eval := &structs.Evaluation{ 564 Namespace: structs.DefaultNamespace, 565 ID: uuid.Generate(), 566 Priority: job.Priority, 567 TriggeredBy: structs.EvalTriggerJobRegister, 568 JobID: job.ID, 569 Status: structs.EvalStatusPending, 570 } 571 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 572 573 // Process the evaluation 574 assert.Nil(h.Process(NewServiceScheduler, eval), "Process") 575 576 // Ensure a single plan 577 assert.Len(h.Plans, 1, "Number of plans") 578 plan := h.Plans[0] 579 580 // Ensure the plan doesn't have annotations. 581 assert.Nil(plan.Annotations, "Plan.Annotations") 582 583 // Ensure the eval hasn't spawned blocked eval 584 assert.Len(h.CreateEvals, 0, "Created Evals") 585 586 // Ensure the plan allocated 587 var planned []*structs.Allocation 588 for _, allocList := range plan.NodeAllocation { 589 planned = append(planned, allocList...) 590 } 591 assert.Len(planned, 6, "Planned Allocations") 592 593 // Lookup the allocations by JobID 594 ws := memdb.NewWatchSet() 595 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 596 assert.Nil(err, "AllocsByJob") 597 598 // Ensure all allocations placed 599 assert.Len(out, 6, "Placed Allocations") 600 601 h.AssertEvalStatus(t, structs.EvalStatusComplete) 602 } 603 604 func TestServiceSched_JobRegister_Annotate(t *testing.T) { 605 h := NewHarness(t) 606 607 // Create some nodes 608 for i := 0; i < 10; i++ { 609 node := mock.Node() 610 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 611 } 612 613 // Create a job 614 job := mock.Job() 615 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 616 617 // Create a mock evaluation to register the job 618 eval := &structs.Evaluation{ 619 Namespace: structs.DefaultNamespace, 620 ID: uuid.Generate(), 621 Priority: job.Priority, 622 TriggeredBy: structs.EvalTriggerJobRegister, 623 JobID: job.ID, 624 AnnotatePlan: true, 625 Status: structs.EvalStatusPending, 626 } 627 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 628 629 // Process the evaluation 630 err := h.Process(NewServiceScheduler, eval) 631 if err != nil { 632 t.Fatalf("err: %v", err) 633 } 634 635 // Ensure a single plan 636 if len(h.Plans) != 1 { 637 t.Fatalf("bad: %#v", h.Plans) 638 } 639 plan := h.Plans[0] 640 641 // Ensure the plan allocated 642 var planned []*structs.Allocation 643 for _, allocList := range plan.NodeAllocation { 644 planned = append(planned, allocList...) 645 } 646 if len(planned) != 10 { 647 t.Fatalf("bad: %#v", plan) 648 } 649 650 // Lookup the allocations by JobID 651 ws := memdb.NewWatchSet() 652 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 653 noErr(t, err) 654 655 // Ensure all allocations placed 656 if len(out) != 10 { 657 t.Fatalf("bad: %#v", out) 658 } 659 660 h.AssertEvalStatus(t, structs.EvalStatusComplete) 661 662 // Ensure the plan had annotations. 663 if plan.Annotations == nil { 664 t.Fatalf("expected annotations") 665 } 666 667 desiredTGs := plan.Annotations.DesiredTGUpdates 668 if l := len(desiredTGs); l != 1 { 669 t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1) 670 } 671 672 desiredChanges, ok := desiredTGs["web"] 673 if !ok { 674 t.Fatalf("expected task group web to have desired changes") 675 } 676 677 expected := &structs.DesiredUpdates{Place: 10} 678 if !reflect.DeepEqual(desiredChanges, expected) { 679 t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected) 680 } 681 } 682 683 func TestServiceSched_JobRegister_CountZero(t *testing.T) { 684 h := NewHarness(t) 685 686 // Create some nodes 687 for i := 0; i < 10; i++ { 688 node := mock.Node() 689 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 690 } 691 692 // Create a job and set the task group count to zero. 693 job := mock.Job() 694 job.TaskGroups[0].Count = 0 695 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 696 697 // Create a mock evaluation to register the job 698 eval := &structs.Evaluation{ 699 Namespace: structs.DefaultNamespace, 700 ID: uuid.Generate(), 701 Priority: job.Priority, 702 TriggeredBy: structs.EvalTriggerJobRegister, 703 JobID: job.ID, 704 Status: structs.EvalStatusPending, 705 } 706 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 707 708 // Process the evaluation 709 err := h.Process(NewServiceScheduler, eval) 710 if err != nil { 711 t.Fatalf("err: %v", err) 712 } 713 714 // Ensure there was no plan 715 if len(h.Plans) != 0 { 716 t.Fatalf("bad: %#v", h.Plans) 717 } 718 719 // Lookup the allocations by JobID 720 ws := memdb.NewWatchSet() 721 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 722 noErr(t, err) 723 724 // Ensure no allocations placed 725 if len(out) != 0 { 726 t.Fatalf("bad: %#v", out) 727 } 728 729 h.AssertEvalStatus(t, structs.EvalStatusComplete) 730 } 731 732 func TestServiceSched_JobRegister_AllocFail(t *testing.T) { 733 h := NewHarness(t) 734 735 // Create NO nodes 736 // Create a job 737 job := mock.Job() 738 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 739 740 // Create a mock evaluation to register the job 741 eval := &structs.Evaluation{ 742 Namespace: structs.DefaultNamespace, 743 ID: uuid.Generate(), 744 Priority: job.Priority, 745 TriggeredBy: structs.EvalTriggerJobRegister, 746 JobID: job.ID, 747 Status: structs.EvalStatusPending, 748 } 749 750 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 751 752 // Process the evaluation 753 err := h.Process(NewServiceScheduler, eval) 754 if err != nil { 755 t.Fatalf("err: %v", err) 756 } 757 758 // Ensure no plan 759 if len(h.Plans) != 0 { 760 t.Fatalf("bad: %#v", h.Plans) 761 } 762 763 // Ensure there is a follow up eval. 764 if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked { 765 t.Fatalf("bad: %#v", h.CreateEvals) 766 } 767 768 if len(h.Evals) != 1 { 769 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 770 } 771 outEval := h.Evals[0] 772 773 // Ensure the eval has its spawned blocked eval 774 if outEval.BlockedEval != h.CreateEvals[0].ID { 775 t.Fatalf("bad: %#v", outEval) 776 } 777 778 // Ensure the plan failed to alloc 779 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 780 t.Fatalf("bad: %#v", outEval) 781 } 782 783 metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name] 784 if !ok { 785 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 786 } 787 788 // Check the coalesced failures 789 if metrics.CoalescedFailures != 9 { 790 t.Fatalf("bad: %#v", metrics) 791 } 792 793 // Check the available nodes 794 if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 0 { 795 t.Fatalf("bad: %#v", metrics) 796 } 797 798 // Check queued allocations 799 queued := outEval.QueuedAllocations["web"] 800 if queued != 10 { 801 t.Fatalf("expected queued: %v, actual: %v", 10, queued) 802 } 803 h.AssertEvalStatus(t, structs.EvalStatusComplete) 804 } 805 806 func TestServiceSched_JobRegister_CreateBlockedEval(t *testing.T) { 807 h := NewHarness(t) 808 809 // Create a full node 810 node := mock.Node() 811 node.Reserved = node.Resources 812 node.ComputeClass() 813 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 814 815 // Create an ineligible node 816 node2 := mock.Node() 817 node2.Attributes["kernel.name"] = "windows" 818 node2.ComputeClass() 819 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 820 821 // Create a jobs 822 job := mock.Job() 823 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 824 825 // Create a mock evaluation to register the job 826 eval := &structs.Evaluation{ 827 Namespace: structs.DefaultNamespace, 828 ID: uuid.Generate(), 829 Priority: job.Priority, 830 TriggeredBy: structs.EvalTriggerJobRegister, 831 JobID: job.ID, 832 Status: structs.EvalStatusPending, 833 } 834 835 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 836 837 // Process the evaluation 838 err := h.Process(NewServiceScheduler, eval) 839 if err != nil { 840 t.Fatalf("err: %v", err) 841 } 842 843 // Ensure no plan 844 if len(h.Plans) != 0 { 845 t.Fatalf("bad: %#v", h.Plans) 846 } 847 848 // Ensure the plan has created a follow up eval. 849 if len(h.CreateEvals) != 1 { 850 t.Fatalf("bad: %#v", h.CreateEvals) 851 } 852 853 created := h.CreateEvals[0] 854 if created.Status != structs.EvalStatusBlocked { 855 t.Fatalf("bad: %#v", created) 856 } 857 858 classes := created.ClassEligibility 859 if len(classes) != 2 || !classes[node.ComputedClass] || classes[node2.ComputedClass] { 860 t.Fatalf("bad: %#v", classes) 861 } 862 863 if created.EscapedComputedClass { 864 t.Fatalf("bad: %#v", created) 865 } 866 867 // Ensure there is a follow up eval. 868 if len(h.CreateEvals) != 1 || h.CreateEvals[0].Status != structs.EvalStatusBlocked { 869 t.Fatalf("bad: %#v", h.CreateEvals) 870 } 871 872 if len(h.Evals) != 1 { 873 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 874 } 875 outEval := h.Evals[0] 876 877 // Ensure the plan failed to alloc 878 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 879 t.Fatalf("bad: %#v", outEval) 880 } 881 882 metrics, ok := outEval.FailedTGAllocs[job.TaskGroups[0].Name] 883 if !ok { 884 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 885 } 886 887 // Check the coalesced failures 888 if metrics.CoalescedFailures != 9 { 889 t.Fatalf("bad: %#v", metrics) 890 } 891 892 // Check the available nodes 893 if count, ok := metrics.NodesAvailable["dc1"]; !ok || count != 2 { 894 t.Fatalf("bad: %#v", metrics) 895 } 896 897 h.AssertEvalStatus(t, structs.EvalStatusComplete) 898 } 899 900 func TestServiceSched_JobRegister_FeasibleAndInfeasibleTG(t *testing.T) { 901 h := NewHarness(t) 902 903 // Create one node 904 node := mock.Node() 905 node.NodeClass = "class_0" 906 noErr(t, node.ComputeClass()) 907 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 908 909 // Create a job that constrains on a node class 910 job := mock.Job() 911 job.TaskGroups[0].Count = 2 912 job.TaskGroups[0].Constraints = append(job.Constraints, 913 &structs.Constraint{ 914 LTarget: "${node.class}", 915 RTarget: "class_0", 916 Operand: "=", 917 }, 918 ) 919 tg2 := job.TaskGroups[0].Copy() 920 tg2.Name = "web2" 921 tg2.Constraints[1].RTarget = "class_1" 922 job.TaskGroups = append(job.TaskGroups, tg2) 923 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 924 925 // Create a mock evaluation to register the job 926 eval := &structs.Evaluation{ 927 Namespace: structs.DefaultNamespace, 928 ID: uuid.Generate(), 929 Priority: job.Priority, 930 TriggeredBy: structs.EvalTriggerJobRegister, 931 JobID: job.ID, 932 Status: structs.EvalStatusPending, 933 } 934 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 935 // Process the evaluation 936 err := h.Process(NewServiceScheduler, eval) 937 if err != nil { 938 t.Fatalf("err: %v", err) 939 } 940 941 // Ensure a single plan 942 if len(h.Plans) != 1 { 943 t.Fatalf("bad: %#v", h.Plans) 944 } 945 plan := h.Plans[0] 946 947 // Ensure the plan allocated 948 var planned []*structs.Allocation 949 for _, allocList := range plan.NodeAllocation { 950 planned = append(planned, allocList...) 951 } 952 if len(planned) != 2 { 953 t.Fatalf("bad: %#v", plan) 954 } 955 956 // Ensure two allocations placed 957 ws := memdb.NewWatchSet() 958 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 959 noErr(t, err) 960 if len(out) != 2 { 961 t.Fatalf("bad: %#v", out) 962 } 963 964 if len(h.Evals) != 1 { 965 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 966 } 967 outEval := h.Evals[0] 968 969 // Ensure the eval has its spawned blocked eval 970 if outEval.BlockedEval != h.CreateEvals[0].ID { 971 t.Fatalf("bad: %#v", outEval) 972 } 973 974 // Ensure the plan failed to alloc one tg 975 if outEval == nil || len(outEval.FailedTGAllocs) != 1 { 976 t.Fatalf("bad: %#v", outEval) 977 } 978 979 metrics, ok := outEval.FailedTGAllocs[tg2.Name] 980 if !ok { 981 t.Fatalf("no failed metrics: %#v", outEval.FailedTGAllocs) 982 } 983 984 // Check the coalesced failures 985 if metrics.CoalescedFailures != tg2.Count-1 { 986 t.Fatalf("bad: %#v", metrics) 987 } 988 989 h.AssertEvalStatus(t, structs.EvalStatusComplete) 990 } 991 992 // This test just ensures the scheduler handles the eval type to avoid 993 // regressions. 994 func TestServiceSched_EvaluateMaxPlanEval(t *testing.T) { 995 h := NewHarness(t) 996 997 // Create a job and set the task group count to zero. 998 job := mock.Job() 999 job.TaskGroups[0].Count = 0 1000 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1001 1002 // Create a mock blocked evaluation 1003 eval := &structs.Evaluation{ 1004 Namespace: structs.DefaultNamespace, 1005 ID: uuid.Generate(), 1006 Status: structs.EvalStatusBlocked, 1007 Priority: job.Priority, 1008 TriggeredBy: structs.EvalTriggerMaxPlans, 1009 JobID: job.ID, 1010 } 1011 1012 // Insert it into the state store 1013 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1014 1015 // Process the evaluation 1016 err := h.Process(NewServiceScheduler, eval) 1017 if err != nil { 1018 t.Fatalf("err: %v", err) 1019 } 1020 1021 // Ensure there was no plan 1022 if len(h.Plans) != 0 { 1023 t.Fatalf("bad: %#v", h.Plans) 1024 } 1025 1026 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1027 } 1028 1029 func TestServiceSched_Plan_Partial_Progress(t *testing.T) { 1030 h := NewHarness(t) 1031 1032 // Create a node 1033 node := mock.Node() 1034 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1035 1036 // Create a job with a high resource ask so that all the allocations can't 1037 // be placed on a single node. 1038 job := mock.Job() 1039 job.TaskGroups[0].Count = 3 1040 job.TaskGroups[0].Tasks[0].Resources.CPU = 3600 1041 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1042 1043 // Create a mock evaluation to register the job 1044 eval := &structs.Evaluation{ 1045 Namespace: structs.DefaultNamespace, 1046 ID: uuid.Generate(), 1047 Priority: job.Priority, 1048 TriggeredBy: structs.EvalTriggerJobRegister, 1049 JobID: job.ID, 1050 Status: structs.EvalStatusPending, 1051 } 1052 1053 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1054 1055 // Process the evaluation 1056 err := h.Process(NewServiceScheduler, eval) 1057 if err != nil { 1058 t.Fatalf("err: %v", err) 1059 } 1060 1061 // Ensure a single plan 1062 if len(h.Plans) != 1 { 1063 t.Fatalf("bad: %#v", h.Plans) 1064 } 1065 plan := h.Plans[0] 1066 1067 // Ensure the plan doesn't have annotations. 1068 if plan.Annotations != nil { 1069 t.Fatalf("expected no annotations") 1070 } 1071 1072 // Ensure the plan allocated 1073 var planned []*structs.Allocation 1074 for _, allocList := range plan.NodeAllocation { 1075 planned = append(planned, allocList...) 1076 } 1077 if len(planned) != 1 { 1078 t.Fatalf("bad: %#v", plan) 1079 } 1080 1081 // Lookup the allocations by JobID 1082 ws := memdb.NewWatchSet() 1083 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1084 noErr(t, err) 1085 1086 // Ensure only one allocations placed 1087 if len(out) != 1 { 1088 t.Fatalf("bad: %#v", out) 1089 } 1090 1091 queued := h.Evals[0].QueuedAllocations["web"] 1092 if queued != 2 { 1093 t.Fatalf("expected: %v, actual: %v", 2, queued) 1094 } 1095 1096 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1097 } 1098 1099 func TestServiceSched_EvaluateBlockedEval(t *testing.T) { 1100 h := NewHarness(t) 1101 1102 // Create a job 1103 job := mock.Job() 1104 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1105 1106 // Create a mock blocked evaluation 1107 eval := &structs.Evaluation{ 1108 Namespace: structs.DefaultNamespace, 1109 ID: uuid.Generate(), 1110 Status: structs.EvalStatusBlocked, 1111 Priority: job.Priority, 1112 TriggeredBy: structs.EvalTriggerJobRegister, 1113 JobID: job.ID, 1114 } 1115 1116 // Insert it into the state store 1117 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1118 1119 // Process the evaluation 1120 err := h.Process(NewServiceScheduler, eval) 1121 if err != nil { 1122 t.Fatalf("err: %v", err) 1123 } 1124 1125 // Ensure there was no plan 1126 if len(h.Plans) != 0 { 1127 t.Fatalf("bad: %#v", h.Plans) 1128 } 1129 1130 // Ensure that the eval was reblocked 1131 if len(h.ReblockEvals) != 1 { 1132 t.Fatalf("bad: %#v", h.ReblockEvals) 1133 } 1134 if h.ReblockEvals[0].ID != eval.ID { 1135 t.Fatalf("expect same eval to be reblocked; got %q; want %q", h.ReblockEvals[0].ID, eval.ID) 1136 } 1137 1138 // Ensure the eval status was not updated 1139 if len(h.Evals) != 0 { 1140 t.Fatalf("Existing eval should not have status set") 1141 } 1142 } 1143 1144 func TestServiceSched_EvaluateBlockedEval_Finished(t *testing.T) { 1145 h := NewHarness(t) 1146 1147 // Create some nodes 1148 for i := 0; i < 10; i++ { 1149 node := mock.Node() 1150 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1151 } 1152 1153 // Create a job and set the task group count to zero. 1154 job := mock.Job() 1155 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1156 1157 // Create a mock blocked evaluation 1158 eval := &structs.Evaluation{ 1159 Namespace: structs.DefaultNamespace, 1160 ID: uuid.Generate(), 1161 Status: structs.EvalStatusBlocked, 1162 Priority: job.Priority, 1163 TriggeredBy: structs.EvalTriggerJobRegister, 1164 JobID: job.ID, 1165 } 1166 1167 // Insert it into the state store 1168 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1169 1170 // Process the evaluation 1171 err := h.Process(NewServiceScheduler, eval) 1172 if err != nil { 1173 t.Fatalf("err: %v", err) 1174 } 1175 1176 // Ensure a single plan 1177 if len(h.Plans) != 1 { 1178 t.Fatalf("bad: %#v", h.Plans) 1179 } 1180 plan := h.Plans[0] 1181 1182 // Ensure the plan doesn't have annotations. 1183 if plan.Annotations != nil { 1184 t.Fatalf("expected no annotations") 1185 } 1186 1187 // Ensure the eval has no spawned blocked eval 1188 if len(h.Evals) != 1 { 1189 t.Fatalf("bad: %#v", h.Evals) 1190 if h.Evals[0].BlockedEval != "" { 1191 t.Fatalf("bad: %#v", h.Evals[0]) 1192 } 1193 } 1194 1195 // Ensure the plan allocated 1196 var planned []*structs.Allocation 1197 for _, allocList := range plan.NodeAllocation { 1198 planned = append(planned, allocList...) 1199 } 1200 if len(planned) != 10 { 1201 t.Fatalf("bad: %#v", plan) 1202 } 1203 1204 // Lookup the allocations by JobID 1205 ws := memdb.NewWatchSet() 1206 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1207 noErr(t, err) 1208 1209 // Ensure all allocations placed 1210 if len(out) != 10 { 1211 t.Fatalf("bad: %#v", out) 1212 } 1213 1214 // Ensure the eval was not reblocked 1215 if len(h.ReblockEvals) != 0 { 1216 t.Fatalf("Existing eval should not have been reblocked as it placed all allocations") 1217 } 1218 1219 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1220 1221 // Ensure queued allocations is zero 1222 queued := h.Evals[0].QueuedAllocations["web"] 1223 if queued != 0 { 1224 t.Fatalf("expected queued: %v, actual: %v", 0, queued) 1225 } 1226 } 1227 1228 func TestServiceSched_JobModify(t *testing.T) { 1229 h := NewHarness(t) 1230 1231 // Create some nodes 1232 var nodes []*structs.Node 1233 for i := 0; i < 10; i++ { 1234 node := mock.Node() 1235 nodes = append(nodes, node) 1236 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1237 } 1238 1239 // Generate a fake job with allocations 1240 job := mock.Job() 1241 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1242 1243 var allocs []*structs.Allocation 1244 for i := 0; i < 10; i++ { 1245 alloc := mock.Alloc() 1246 alloc.Job = job 1247 alloc.JobID = job.ID 1248 alloc.NodeID = nodes[i].ID 1249 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1250 allocs = append(allocs, alloc) 1251 } 1252 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1253 1254 // Add a few terminal status allocations, these should be ignored 1255 var terminal []*structs.Allocation 1256 for i := 0; i < 5; i++ { 1257 alloc := mock.Alloc() 1258 alloc.Job = job 1259 alloc.JobID = job.ID 1260 alloc.NodeID = nodes[i].ID 1261 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1262 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1263 terminal = append(terminal, alloc) 1264 } 1265 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 1266 1267 // Update the job 1268 job2 := mock.Job() 1269 job2.ID = job.ID 1270 1271 // Update the task, such that it cannot be done in-place 1272 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1273 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1274 1275 // Create a mock evaluation to deal with drain 1276 eval := &structs.Evaluation{ 1277 Namespace: structs.DefaultNamespace, 1278 ID: uuid.Generate(), 1279 Priority: 50, 1280 TriggeredBy: structs.EvalTriggerJobRegister, 1281 JobID: job.ID, 1282 Status: structs.EvalStatusPending, 1283 } 1284 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1285 1286 // Process the evaluation 1287 err := h.Process(NewServiceScheduler, eval) 1288 if err != nil { 1289 t.Fatalf("err: %v", err) 1290 } 1291 1292 // Ensure a single plan 1293 if len(h.Plans) != 1 { 1294 t.Fatalf("bad: %#v", h.Plans) 1295 } 1296 plan := h.Plans[0] 1297 1298 // Ensure the plan evicted all allocs 1299 var update []*structs.Allocation 1300 for _, updateList := range plan.NodeUpdate { 1301 update = append(update, updateList...) 1302 } 1303 if len(update) != len(allocs) { 1304 t.Fatalf("bad: %#v", plan) 1305 } 1306 1307 // Ensure the plan allocated 1308 var planned []*structs.Allocation 1309 for _, allocList := range plan.NodeAllocation { 1310 planned = append(planned, allocList...) 1311 } 1312 if len(planned) != 10 { 1313 t.Fatalf("bad: %#v", plan) 1314 } 1315 1316 // Lookup the allocations by JobID 1317 ws := memdb.NewWatchSet() 1318 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1319 noErr(t, err) 1320 1321 // Ensure all allocations placed 1322 out, _ = structs.FilterTerminalAllocs(out) 1323 if len(out) != 10 { 1324 t.Fatalf("bad: %#v", out) 1325 } 1326 1327 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1328 } 1329 1330 // Have a single node and submit a job. Increment the count such that all fit 1331 // on the node but the node doesn't have enough resources to fit the new count + 1332 // 1. This tests that we properly discount the resources of existing allocs. 1333 func TestServiceSched_JobModify_IncrCount_NodeLimit(t *testing.T) { 1334 h := NewHarness(t) 1335 1336 // Create one node 1337 node := mock.Node() 1338 node.Resources.CPU = 1000 1339 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1340 1341 // Generate a fake job with one allocation 1342 job := mock.Job() 1343 job.TaskGroups[0].Tasks[0].Resources.CPU = 256 1344 job2 := job.Copy() 1345 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1346 1347 var allocs []*structs.Allocation 1348 alloc := mock.Alloc() 1349 alloc.Job = job 1350 alloc.JobID = job.ID 1351 alloc.NodeID = node.ID 1352 alloc.Name = "my-job.web[0]" 1353 alloc.Resources.CPU = 256 1354 allocs = append(allocs, alloc) 1355 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1356 1357 // Update the job to count 3 1358 job2.TaskGroups[0].Count = 3 1359 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1360 1361 // Create a mock evaluation to deal with drain 1362 eval := &structs.Evaluation{ 1363 Namespace: structs.DefaultNamespace, 1364 ID: uuid.Generate(), 1365 Priority: 50, 1366 TriggeredBy: structs.EvalTriggerJobRegister, 1367 JobID: job.ID, 1368 Status: structs.EvalStatusPending, 1369 } 1370 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1371 1372 // Process the evaluation 1373 err := h.Process(NewServiceScheduler, eval) 1374 if err != nil { 1375 t.Fatalf("err: %v", err) 1376 } 1377 1378 // Ensure a single plan 1379 if len(h.Plans) != 1 { 1380 t.Fatalf("bad: %#v", h.Plans) 1381 } 1382 plan := h.Plans[0] 1383 1384 // Ensure the plan didn't evicted the alloc 1385 var update []*structs.Allocation 1386 for _, updateList := range plan.NodeUpdate { 1387 update = append(update, updateList...) 1388 } 1389 if len(update) != 0 { 1390 t.Fatalf("bad: %#v", plan) 1391 } 1392 1393 // Ensure the plan allocated 1394 var planned []*structs.Allocation 1395 for _, allocList := range plan.NodeAllocation { 1396 planned = append(planned, allocList...) 1397 } 1398 if len(planned) != 3 { 1399 t.Fatalf("bad: %#v", plan) 1400 } 1401 1402 // Ensure the plan had no failures 1403 if len(h.Evals) != 1 { 1404 t.Fatalf("incorrect number of updated eval: %#v", h.Evals) 1405 } 1406 outEval := h.Evals[0] 1407 if outEval == nil || len(outEval.FailedTGAllocs) != 0 { 1408 t.Fatalf("bad: %#v", outEval) 1409 } 1410 1411 // Lookup the allocations by JobID 1412 ws := memdb.NewWatchSet() 1413 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1414 noErr(t, err) 1415 1416 // Ensure all allocations placed 1417 out, _ = structs.FilterTerminalAllocs(out) 1418 if len(out) != 3 { 1419 t.Fatalf("bad: %#v", out) 1420 } 1421 1422 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1423 } 1424 1425 func TestServiceSched_JobModify_CountZero(t *testing.T) { 1426 h := NewHarness(t) 1427 1428 // Create some nodes 1429 var nodes []*structs.Node 1430 for i := 0; i < 10; i++ { 1431 node := mock.Node() 1432 nodes = append(nodes, node) 1433 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1434 } 1435 1436 // Generate a fake job with allocations 1437 job := mock.Job() 1438 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1439 1440 var allocs []*structs.Allocation 1441 for i := 0; i < 10; i++ { 1442 alloc := mock.Alloc() 1443 alloc.Job = job 1444 alloc.JobID = job.ID 1445 alloc.NodeID = nodes[i].ID 1446 alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) 1447 allocs = append(allocs, alloc) 1448 } 1449 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1450 1451 // Add a few terminal status allocations, these should be ignored 1452 var terminal []*structs.Allocation 1453 for i := 0; i < 5; i++ { 1454 alloc := mock.Alloc() 1455 alloc.Job = job 1456 alloc.JobID = job.ID 1457 alloc.NodeID = nodes[i].ID 1458 alloc.Name = structs.AllocName(alloc.JobID, alloc.TaskGroup, uint(i)) 1459 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1460 terminal = append(terminal, alloc) 1461 } 1462 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 1463 1464 // Update the job to be count zero 1465 job2 := mock.Job() 1466 job2.ID = job.ID 1467 job2.TaskGroups[0].Count = 0 1468 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1469 1470 // Create a mock evaluation to deal with drain 1471 eval := &structs.Evaluation{ 1472 Namespace: structs.DefaultNamespace, 1473 ID: uuid.Generate(), 1474 Priority: 50, 1475 TriggeredBy: structs.EvalTriggerJobRegister, 1476 JobID: job.ID, 1477 Status: structs.EvalStatusPending, 1478 } 1479 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1480 1481 // Process the evaluation 1482 err := h.Process(NewServiceScheduler, eval) 1483 if err != nil { 1484 t.Fatalf("err: %v", err) 1485 } 1486 1487 // Ensure a single plan 1488 if len(h.Plans) != 1 { 1489 t.Fatalf("bad: %#v", h.Plans) 1490 } 1491 plan := h.Plans[0] 1492 1493 // Ensure the plan evicted all allocs 1494 var update []*structs.Allocation 1495 for _, updateList := range plan.NodeUpdate { 1496 update = append(update, updateList...) 1497 } 1498 if len(update) != len(allocs) { 1499 t.Fatalf("bad: %#v", plan) 1500 } 1501 1502 // Ensure the plan didn't allocated 1503 var planned []*structs.Allocation 1504 for _, allocList := range plan.NodeAllocation { 1505 planned = append(planned, allocList...) 1506 } 1507 if len(planned) != 0 { 1508 t.Fatalf("bad: %#v", plan) 1509 } 1510 1511 // Lookup the allocations by JobID 1512 ws := memdb.NewWatchSet() 1513 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1514 noErr(t, err) 1515 1516 // Ensure all allocations placed 1517 out, _ = structs.FilterTerminalAllocs(out) 1518 if len(out) != 0 { 1519 t.Fatalf("bad: %#v", out) 1520 } 1521 1522 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1523 } 1524 1525 func TestServiceSched_JobModify_Rolling(t *testing.T) { 1526 h := NewHarness(t) 1527 1528 // Create some nodes 1529 var nodes []*structs.Node 1530 for i := 0; i < 10; i++ { 1531 node := mock.Node() 1532 nodes = append(nodes, node) 1533 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1534 } 1535 1536 // Generate a fake job with allocations 1537 job := mock.Job() 1538 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1539 1540 var allocs []*structs.Allocation 1541 for i := 0; i < 10; i++ { 1542 alloc := mock.Alloc() 1543 alloc.Job = job 1544 alloc.JobID = job.ID 1545 alloc.NodeID = nodes[i].ID 1546 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1547 allocs = append(allocs, alloc) 1548 } 1549 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1550 1551 // Update the job 1552 job2 := mock.Job() 1553 job2.ID = job.ID 1554 desiredUpdates := 4 1555 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1556 MaxParallel: desiredUpdates, 1557 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1558 MinHealthyTime: 10 * time.Second, 1559 HealthyDeadline: 10 * time.Minute, 1560 } 1561 1562 // Update the task, such that it cannot be done in-place 1563 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1564 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1565 1566 // Create a mock evaluation to deal with drain 1567 eval := &structs.Evaluation{ 1568 Namespace: structs.DefaultNamespace, 1569 ID: uuid.Generate(), 1570 Priority: 50, 1571 TriggeredBy: structs.EvalTriggerJobRegister, 1572 JobID: job.ID, 1573 Status: structs.EvalStatusPending, 1574 } 1575 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1576 1577 // Process the evaluation 1578 err := h.Process(NewServiceScheduler, eval) 1579 if err != nil { 1580 t.Fatalf("err: %v", err) 1581 } 1582 1583 // Ensure a single plan 1584 if len(h.Plans) != 1 { 1585 t.Fatalf("bad: %#v", h.Plans) 1586 } 1587 plan := h.Plans[0] 1588 1589 // Ensure the plan evicted only MaxParallel 1590 var update []*structs.Allocation 1591 for _, updateList := range plan.NodeUpdate { 1592 update = append(update, updateList...) 1593 } 1594 if len(update) != desiredUpdates { 1595 t.Fatalf("bad: got %d; want %d: %#v", len(update), desiredUpdates, plan) 1596 } 1597 1598 // Ensure the plan allocated 1599 var planned []*structs.Allocation 1600 for _, allocList := range plan.NodeAllocation { 1601 planned = append(planned, allocList...) 1602 } 1603 if len(planned) != desiredUpdates { 1604 t.Fatalf("bad: %#v", plan) 1605 } 1606 1607 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1608 1609 // Check that the deployment id is attached to the eval 1610 if h.Evals[0].DeploymentID == "" { 1611 t.Fatalf("Eval not annotated with deployment id") 1612 } 1613 1614 // Ensure a deployment was created 1615 if plan.Deployment == nil { 1616 t.Fatalf("bad: %#v", plan) 1617 } 1618 state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] 1619 if !ok { 1620 t.Fatalf("bad: %#v", plan) 1621 } 1622 if state.DesiredTotal != 10 && state.DesiredCanaries != 0 { 1623 t.Fatalf("bad: %#v", state) 1624 } 1625 } 1626 1627 // This tests that the old allocation is stopped before placing. 1628 // It is critical to test that the updated job attempts to place more 1629 // allocations as this allows us to assert that destructive changes are done 1630 // first. 1631 func TestServiceSched_JobModify_Rolling_FullNode(t *testing.T) { 1632 h := NewHarness(t) 1633 1634 // Create a node 1635 node := mock.Node() 1636 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1637 1638 resourceAsk := node.Resources.Copy() 1639 resourceAsk.CPU -= node.Reserved.CPU 1640 resourceAsk.MemoryMB -= node.Reserved.MemoryMB 1641 resourceAsk.DiskMB -= node.Reserved.DiskMB 1642 resourceAsk.Networks = nil 1643 1644 // Generate a fake job with one alloc that consumes the whole node 1645 job := mock.Job() 1646 job.TaskGroups[0].Count = 1 1647 job.TaskGroups[0].Tasks[0].Resources = resourceAsk 1648 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1649 1650 alloc := mock.Alloc() 1651 alloc.Resources = resourceAsk 1652 alloc.Job = job 1653 alloc.JobID = job.ID 1654 alloc.NodeID = node.ID 1655 alloc.Name = "my-job.web[0]" 1656 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 1657 1658 // Update the job to place more versions of the task group, drop the count 1659 // and force destructive updates 1660 job2 := job.Copy() 1661 job2.TaskGroups[0].Count = 5 1662 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1663 MaxParallel: 5, 1664 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1665 MinHealthyTime: 10 * time.Second, 1666 HealthyDeadline: 10 * time.Minute, 1667 } 1668 job2.TaskGroups[0].Tasks[0].Resources = mock.Alloc().Resources 1669 1670 // Update the task, such that it cannot be done in-place 1671 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1672 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1673 1674 eval := &structs.Evaluation{ 1675 Namespace: structs.DefaultNamespace, 1676 ID: uuid.Generate(), 1677 Priority: 50, 1678 TriggeredBy: structs.EvalTriggerJobRegister, 1679 JobID: job.ID, 1680 Status: structs.EvalStatusPending, 1681 } 1682 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1683 1684 // Process the evaluation 1685 err := h.Process(NewServiceScheduler, eval) 1686 if err != nil { 1687 t.Fatalf("err: %v", err) 1688 } 1689 1690 // Ensure a single plan 1691 if len(h.Plans) != 1 { 1692 t.Fatalf("bad: %#v", h.Plans) 1693 } 1694 plan := h.Plans[0] 1695 1696 // Ensure the plan evicted only MaxParallel 1697 var update []*structs.Allocation 1698 for _, updateList := range plan.NodeUpdate { 1699 update = append(update, updateList...) 1700 } 1701 if len(update) != 1 { 1702 t.Fatalf("bad: got %d; want %d: %#v", len(update), 1, plan) 1703 } 1704 1705 // Ensure the plan allocated 1706 var planned []*structs.Allocation 1707 for _, allocList := range plan.NodeAllocation { 1708 planned = append(planned, allocList...) 1709 } 1710 if len(planned) != 1 { 1711 t.Fatalf("bad: %#v", plan) 1712 } 1713 1714 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1715 1716 // Check that the deployment id is attached to the eval 1717 if h.Evals[0].DeploymentID == "" { 1718 t.Fatalf("Eval not annotated with deployment id") 1719 } 1720 1721 // Ensure a deployment was created 1722 if plan.Deployment == nil { 1723 t.Fatalf("bad: %#v", plan) 1724 } 1725 state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] 1726 if !ok { 1727 t.Fatalf("bad: %#v", plan) 1728 } 1729 if state.DesiredTotal != 1 && state.DesiredCanaries != 0 { 1730 t.Fatalf("bad: %#v", state) 1731 } 1732 } 1733 1734 func TestServiceSched_JobModify_Canaries(t *testing.T) { 1735 h := NewHarness(t) 1736 1737 // Create some nodes 1738 var nodes []*structs.Node 1739 for i := 0; i < 10; i++ { 1740 node := mock.Node() 1741 nodes = append(nodes, node) 1742 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1743 } 1744 1745 // Generate a fake job with allocations 1746 job := mock.Job() 1747 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1748 1749 var allocs []*structs.Allocation 1750 for i := 0; i < 10; i++ { 1751 alloc := mock.Alloc() 1752 alloc.Job = job 1753 alloc.JobID = job.ID 1754 alloc.NodeID = nodes[i].ID 1755 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1756 allocs = append(allocs, alloc) 1757 } 1758 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1759 1760 // Update the job 1761 job2 := mock.Job() 1762 job2.ID = job.ID 1763 desiredUpdates := 2 1764 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1765 MaxParallel: desiredUpdates, 1766 Canary: desiredUpdates, 1767 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1768 MinHealthyTime: 10 * time.Second, 1769 HealthyDeadline: 10 * time.Minute, 1770 } 1771 1772 // Update the task, such that it cannot be done in-place 1773 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 1774 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1775 1776 // Create a mock evaluation to deal with drain 1777 eval := &structs.Evaluation{ 1778 Namespace: structs.DefaultNamespace, 1779 ID: uuid.Generate(), 1780 Priority: 50, 1781 TriggeredBy: structs.EvalTriggerJobRegister, 1782 JobID: job.ID, 1783 Status: structs.EvalStatusPending, 1784 } 1785 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1786 1787 // Process the evaluation 1788 err := h.Process(NewServiceScheduler, eval) 1789 if err != nil { 1790 t.Fatalf("err: %v", err) 1791 } 1792 1793 // Ensure a single plan 1794 if len(h.Plans) != 1 { 1795 t.Fatalf("bad: %#v", h.Plans) 1796 } 1797 plan := h.Plans[0] 1798 1799 // Ensure the plan evicted nothing 1800 var update []*structs.Allocation 1801 for _, updateList := range plan.NodeUpdate { 1802 update = append(update, updateList...) 1803 } 1804 if len(update) != 0 { 1805 t.Fatalf("bad: got %d; want %d: %#v", len(update), 0, plan) 1806 } 1807 1808 // Ensure the plan allocated 1809 var planned []*structs.Allocation 1810 for _, allocList := range plan.NodeAllocation { 1811 planned = append(planned, allocList...) 1812 } 1813 if len(planned) != desiredUpdates { 1814 t.Fatalf("bad: %#v", plan) 1815 } 1816 1817 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1818 1819 // Check that the deployment id is attached to the eval 1820 if h.Evals[0].DeploymentID == "" { 1821 t.Fatalf("Eval not annotated with deployment id") 1822 } 1823 1824 // Ensure a deployment was created 1825 if plan.Deployment == nil { 1826 t.Fatalf("bad: %#v", plan) 1827 } 1828 state, ok := plan.Deployment.TaskGroups[job.TaskGroups[0].Name] 1829 if !ok { 1830 t.Fatalf("bad: %#v", plan) 1831 } 1832 if state.DesiredTotal != 10 && state.DesiredCanaries != desiredUpdates { 1833 t.Fatalf("bad: %#v", state) 1834 } 1835 1836 // Assert the canaries were added to the placed list 1837 if len(state.PlacedCanaries) != desiredUpdates { 1838 t.Fatalf("bad: %#v", state) 1839 } 1840 } 1841 1842 func TestServiceSched_JobModify_InPlace(t *testing.T) { 1843 h := NewHarness(t) 1844 1845 // Create some nodes 1846 var nodes []*structs.Node 1847 for i := 0; i < 10; i++ { 1848 node := mock.Node() 1849 nodes = append(nodes, node) 1850 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1851 } 1852 1853 // Generate a fake job with allocations and create an older deployment 1854 job := mock.Job() 1855 d := mock.Deployment() 1856 d.JobID = job.ID 1857 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1858 noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) 1859 1860 // Create allocs that are part of the old deployment 1861 var allocs []*structs.Allocation 1862 for i := 0; i < 10; i++ { 1863 alloc := mock.Alloc() 1864 alloc.Job = job 1865 alloc.JobID = job.ID 1866 alloc.NodeID = nodes[i].ID 1867 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1868 alloc.DeploymentID = d.ID 1869 alloc.DeploymentStatus = &structs.AllocDeploymentStatus{Healthy: helper.BoolToPtr(true)} 1870 allocs = append(allocs, alloc) 1871 } 1872 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 1873 1874 // Update the job 1875 job2 := mock.Job() 1876 job2.ID = job.ID 1877 desiredUpdates := 4 1878 job2.TaskGroups[0].Update = &structs.UpdateStrategy{ 1879 MaxParallel: desiredUpdates, 1880 HealthCheck: structs.UpdateStrategyHealthCheck_Checks, 1881 MinHealthyTime: 10 * time.Second, 1882 HealthyDeadline: 10 * time.Minute, 1883 } 1884 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 1885 1886 // Create a mock evaluation to deal with drain 1887 eval := &structs.Evaluation{ 1888 Namespace: structs.DefaultNamespace, 1889 ID: uuid.Generate(), 1890 Priority: 50, 1891 TriggeredBy: structs.EvalTriggerJobRegister, 1892 JobID: job.ID, 1893 Status: structs.EvalStatusPending, 1894 } 1895 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 1896 1897 // Process the evaluation 1898 err := h.Process(NewServiceScheduler, eval) 1899 if err != nil { 1900 t.Fatalf("err: %v", err) 1901 } 1902 1903 // Ensure a single plan 1904 if len(h.Plans) != 1 { 1905 t.Fatalf("bad: %#v", h.Plans) 1906 } 1907 plan := h.Plans[0] 1908 1909 // Ensure the plan did not evict any allocs 1910 var update []*structs.Allocation 1911 for _, updateList := range plan.NodeUpdate { 1912 update = append(update, updateList...) 1913 } 1914 if len(update) != 0 { 1915 t.Fatalf("bad: %#v", plan) 1916 } 1917 1918 // Ensure the plan updated the existing allocs 1919 var planned []*structs.Allocation 1920 for _, allocList := range plan.NodeAllocation { 1921 planned = append(planned, allocList...) 1922 } 1923 if len(planned) != 10 { 1924 t.Fatalf("bad: %#v", plan) 1925 } 1926 for _, p := range planned { 1927 if p.Job != job2 { 1928 t.Fatalf("should update job") 1929 } 1930 } 1931 1932 // Lookup the allocations by JobID 1933 ws := memdb.NewWatchSet() 1934 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 1935 noErr(t, err) 1936 1937 // Ensure all allocations placed 1938 if len(out) != 10 { 1939 t.Fatalf("bad: %#v", out) 1940 } 1941 h.AssertEvalStatus(t, structs.EvalStatusComplete) 1942 1943 // Verify the network did not change 1944 rp := structs.Port{Label: "admin", Value: 5000} 1945 for _, alloc := range out { 1946 for _, resources := range alloc.TaskResources { 1947 if resources.Networks[0].ReservedPorts[0] != rp { 1948 t.Fatalf("bad: %#v", alloc) 1949 } 1950 } 1951 } 1952 1953 // Verify the deployment id was changed and health cleared 1954 for _, alloc := range out { 1955 if alloc.DeploymentID == d.ID { 1956 t.Fatalf("bad: deployment id not cleared") 1957 } else if alloc.DeploymentStatus != nil { 1958 t.Fatalf("bad: deployment status not cleared") 1959 } 1960 } 1961 } 1962 1963 func TestServiceSched_JobModify_DistinctProperty(t *testing.T) { 1964 h := NewHarness(t) 1965 1966 // Create some nodes 1967 var nodes []*structs.Node 1968 for i := 0; i < 10; i++ { 1969 node := mock.Node() 1970 node.Meta["rack"] = fmt.Sprintf("rack%d", i) 1971 nodes = append(nodes, node) 1972 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 1973 } 1974 1975 // Create a job that uses distinct property and has count higher than what is 1976 // possible. 1977 job := mock.Job() 1978 job.TaskGroups[0].Count = 11 1979 job.Constraints = append(job.Constraints, 1980 &structs.Constraint{ 1981 Operand: structs.ConstraintDistinctProperty, 1982 LTarget: "${meta.rack}", 1983 }) 1984 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 1985 1986 oldJob := job.Copy() 1987 oldJob.JobModifyIndex -= 1 1988 oldJob.TaskGroups[0].Count = 4 1989 1990 // Place 4 of 10 1991 var allocs []*structs.Allocation 1992 for i := 0; i < 4; i++ { 1993 alloc := mock.Alloc() 1994 alloc.Job = oldJob 1995 alloc.JobID = job.ID 1996 alloc.NodeID = nodes[i].ID 1997 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 1998 allocs = append(allocs, alloc) 1999 } 2000 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2001 2002 // Create a mock evaluation to register the job 2003 eval := &structs.Evaluation{ 2004 Namespace: structs.DefaultNamespace, 2005 ID: uuid.Generate(), 2006 Priority: job.Priority, 2007 TriggeredBy: structs.EvalTriggerJobRegister, 2008 JobID: job.ID, 2009 Status: structs.EvalStatusPending, 2010 } 2011 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2012 2013 // Process the evaluation 2014 err := h.Process(NewServiceScheduler, eval) 2015 if err != nil { 2016 t.Fatalf("err: %v", err) 2017 } 2018 2019 // Ensure a single plan 2020 if len(h.Plans) != 1 { 2021 t.Fatalf("bad: %#v", h.Plans) 2022 } 2023 plan := h.Plans[0] 2024 2025 // Ensure the plan doesn't have annotations. 2026 if plan.Annotations != nil { 2027 t.Fatalf("expected no annotations") 2028 } 2029 2030 // Ensure the eval hasn't spawned blocked eval 2031 if len(h.CreateEvals) != 1 { 2032 t.Fatalf("bad: %#v", h.CreateEvals) 2033 } 2034 2035 // Ensure the plan failed to alloc 2036 outEval := h.Evals[0] 2037 if len(outEval.FailedTGAllocs) != 1 { 2038 t.Fatalf("bad: %+v", outEval) 2039 } 2040 2041 // Ensure the plan allocated 2042 var planned []*structs.Allocation 2043 for _, allocList := range plan.NodeAllocation { 2044 planned = append(planned, allocList...) 2045 } 2046 if len(planned) != 10 { 2047 t.Fatalf("bad: %#v", planned) 2048 } 2049 2050 // Lookup the allocations by JobID 2051 ws := memdb.NewWatchSet() 2052 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2053 noErr(t, err) 2054 2055 // Ensure all allocations placed 2056 if len(out) != 10 { 2057 t.Fatalf("bad: %#v", out) 2058 } 2059 2060 // Ensure different node was used per. 2061 used := make(map[string]struct{}) 2062 for _, alloc := range out { 2063 if _, ok := used[alloc.NodeID]; ok { 2064 t.Fatalf("Node collision %v", alloc.NodeID) 2065 } 2066 used[alloc.NodeID] = struct{}{} 2067 } 2068 2069 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2070 } 2071 2072 func TestServiceSched_JobDeregister_Purged(t *testing.T) { 2073 h := NewHarness(t) 2074 2075 // Generate a fake job with allocations 2076 job := mock.Job() 2077 2078 var allocs []*structs.Allocation 2079 for i := 0; i < 10; i++ { 2080 alloc := mock.Alloc() 2081 alloc.Job = job 2082 alloc.JobID = job.ID 2083 allocs = append(allocs, alloc) 2084 } 2085 for _, alloc := range allocs { 2086 h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) 2087 } 2088 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2089 2090 // Create a mock evaluation to deregister the job 2091 eval := &structs.Evaluation{ 2092 Namespace: structs.DefaultNamespace, 2093 ID: uuid.Generate(), 2094 Priority: 50, 2095 TriggeredBy: structs.EvalTriggerJobDeregister, 2096 JobID: job.ID, 2097 Status: structs.EvalStatusPending, 2098 } 2099 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2100 2101 // Process the evaluation 2102 err := h.Process(NewServiceScheduler, eval) 2103 if err != nil { 2104 t.Fatalf("err: %v", err) 2105 } 2106 2107 // Ensure a single plan 2108 if len(h.Plans) != 1 { 2109 t.Fatalf("bad: %#v", h.Plans) 2110 } 2111 plan := h.Plans[0] 2112 2113 // Ensure the plan evicted all nodes 2114 if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) { 2115 t.Fatalf("bad: %#v", plan) 2116 } 2117 2118 // Lookup the allocations by JobID 2119 ws := memdb.NewWatchSet() 2120 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2121 noErr(t, err) 2122 2123 // Ensure that the job field on the allocation is still populated 2124 for _, alloc := range out { 2125 if alloc.Job == nil { 2126 t.Fatalf("bad: %#v", alloc) 2127 } 2128 } 2129 2130 // Ensure no remaining allocations 2131 out, _ = structs.FilterTerminalAllocs(out) 2132 if len(out) != 0 { 2133 t.Fatalf("bad: %#v", out) 2134 } 2135 2136 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2137 } 2138 2139 func TestServiceSched_JobDeregister_Stopped(t *testing.T) { 2140 h := NewHarness(t) 2141 2142 // Generate a fake job with allocations 2143 job := mock.Job() 2144 job.Stop = true 2145 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2146 2147 var allocs []*structs.Allocation 2148 for i := 0; i < 10; i++ { 2149 alloc := mock.Alloc() 2150 alloc.Job = job 2151 alloc.JobID = job.ID 2152 allocs = append(allocs, alloc) 2153 } 2154 for _, alloc := range allocs { 2155 h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)) 2156 } 2157 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2158 2159 // Create a mock evaluation to deregister the job 2160 eval := &structs.Evaluation{ 2161 Namespace: structs.DefaultNamespace, 2162 ID: uuid.Generate(), 2163 Priority: 50, 2164 TriggeredBy: structs.EvalTriggerJobDeregister, 2165 JobID: job.ID, 2166 Status: structs.EvalStatusPending, 2167 } 2168 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2169 2170 // Process the evaluation 2171 err := h.Process(NewServiceScheduler, eval) 2172 if err != nil { 2173 t.Fatalf("err: %v", err) 2174 } 2175 2176 // Ensure a single plan 2177 if len(h.Plans) != 1 { 2178 t.Fatalf("bad: %#v", h.Plans) 2179 } 2180 plan := h.Plans[0] 2181 2182 // Ensure the plan evicted all nodes 2183 if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) { 2184 t.Fatalf("bad: %#v", plan) 2185 } 2186 2187 // Lookup the allocations by JobID 2188 ws := memdb.NewWatchSet() 2189 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2190 noErr(t, err) 2191 2192 // Ensure that the job field on the allocation is still populated 2193 for _, alloc := range out { 2194 if alloc.Job == nil { 2195 t.Fatalf("bad: %#v", alloc) 2196 } 2197 } 2198 2199 // Ensure no remaining allocations 2200 out, _ = structs.FilterTerminalAllocs(out) 2201 if len(out) != 0 { 2202 t.Fatalf("bad: %#v", out) 2203 } 2204 2205 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2206 } 2207 2208 func TestServiceSched_NodeDown(t *testing.T) { 2209 h := NewHarness(t) 2210 2211 // Register a node 2212 node := mock.Node() 2213 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2214 2215 // Generate a fake job with allocations and an update policy. 2216 job := mock.Job() 2217 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2218 2219 var allocs []*structs.Allocation 2220 for i := 0; i < 10; i++ { 2221 alloc := mock.Alloc() 2222 alloc.Job = job 2223 alloc.JobID = job.ID 2224 alloc.NodeID = node.ID 2225 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2226 allocs = append(allocs, alloc) 2227 } 2228 2229 // Cover each terminal case and ensure it doesn't change to lost 2230 allocs[7].DesiredStatus = structs.AllocDesiredStatusRun 2231 allocs[7].ClientStatus = structs.AllocClientStatusLost 2232 allocs[8].DesiredStatus = structs.AllocDesiredStatusRun 2233 allocs[8].ClientStatus = structs.AllocClientStatusFailed 2234 allocs[9].DesiredStatus = structs.AllocDesiredStatusRun 2235 allocs[9].ClientStatus = structs.AllocClientStatusComplete 2236 2237 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2238 2239 // Mark some allocs as running 2240 ws := memdb.NewWatchSet() 2241 for i := 0; i < 4; i++ { 2242 out, _ := h.State.AllocByID(ws, allocs[i].ID) 2243 out.ClientStatus = structs.AllocClientStatusRunning 2244 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) 2245 } 2246 2247 // Mark the node as down 2248 noErr(t, h.State.UpdateNodeStatus(h.NextIndex(), node.ID, structs.NodeStatusDown)) 2249 2250 // Create a mock evaluation to deal with drain 2251 eval := &structs.Evaluation{ 2252 Namespace: structs.DefaultNamespace, 2253 ID: uuid.Generate(), 2254 Priority: 50, 2255 TriggeredBy: structs.EvalTriggerNodeUpdate, 2256 JobID: job.ID, 2257 NodeID: node.ID, 2258 Status: structs.EvalStatusPending, 2259 } 2260 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2261 2262 // Process the evaluation 2263 err := h.Process(NewServiceScheduler, eval) 2264 if err != nil { 2265 t.Fatalf("err: %v", err) 2266 } 2267 2268 // Ensure a single plan 2269 if len(h.Plans) != 1 { 2270 t.Fatalf("bad: %#v", h.Plans) 2271 } 2272 plan := h.Plans[0] 2273 2274 // Test the scheduler marked all non-terminal allocations as lost 2275 if len(plan.NodeUpdate[node.ID]) != 7 { 2276 t.Fatalf("bad: %#v", plan) 2277 } 2278 2279 for _, out := range plan.NodeUpdate[node.ID] { 2280 if out.ClientStatus != structs.AllocClientStatusLost && out.DesiredStatus != structs.AllocDesiredStatusStop { 2281 t.Fatalf("bad alloc: %#v", out) 2282 } 2283 } 2284 2285 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2286 } 2287 2288 func TestServiceSched_NodeUpdate(t *testing.T) { 2289 h := NewHarness(t) 2290 2291 // Register a node 2292 node := mock.Node() 2293 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2294 2295 // Generate a fake job with allocations and an update policy. 2296 job := mock.Job() 2297 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2298 2299 var allocs []*structs.Allocation 2300 for i := 0; i < 10; i++ { 2301 alloc := mock.Alloc() 2302 alloc.Job = job 2303 alloc.JobID = job.ID 2304 alloc.NodeID = node.ID 2305 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2306 allocs = append(allocs, alloc) 2307 } 2308 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2309 2310 // Mark some allocs as running 2311 ws := memdb.NewWatchSet() 2312 for i := 0; i < 4; i++ { 2313 out, _ := h.State.AllocByID(ws, allocs[i].ID) 2314 out.ClientStatus = structs.AllocClientStatusRunning 2315 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{out})) 2316 } 2317 2318 // Create a mock evaluation which won't trigger any new placements 2319 eval := &structs.Evaluation{ 2320 Namespace: structs.DefaultNamespace, 2321 ID: uuid.Generate(), 2322 Priority: 50, 2323 TriggeredBy: structs.EvalTriggerNodeUpdate, 2324 JobID: job.ID, 2325 NodeID: node.ID, 2326 Status: structs.EvalStatusPending, 2327 } 2328 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2329 2330 // Process the evaluation 2331 err := h.Process(NewServiceScheduler, eval) 2332 if err != nil { 2333 t.Fatalf("err: %v", err) 2334 } 2335 if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 { 2336 t.Fatalf("bad queued allocations: %v", h.Evals[0].QueuedAllocations) 2337 } 2338 2339 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2340 } 2341 2342 func TestServiceSched_NodeDrain(t *testing.T) { 2343 h := NewHarness(t) 2344 2345 // Register a draining node 2346 node := mock.Node() 2347 node.Drain = true 2348 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2349 2350 // Create some nodes 2351 for i := 0; i < 10; i++ { 2352 node := mock.Node() 2353 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2354 } 2355 2356 // Generate a fake job with allocations and an update policy. 2357 job := mock.Job() 2358 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2359 2360 var allocs []*structs.Allocation 2361 for i := 0; i < 10; i++ { 2362 alloc := mock.Alloc() 2363 alloc.Job = job 2364 alloc.JobID = job.ID 2365 alloc.NodeID = node.ID 2366 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2367 allocs = append(allocs, alloc) 2368 } 2369 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2370 2371 // Create a mock evaluation to deal with drain 2372 eval := &structs.Evaluation{ 2373 Namespace: structs.DefaultNamespace, 2374 ID: uuid.Generate(), 2375 Priority: 50, 2376 TriggeredBy: structs.EvalTriggerNodeUpdate, 2377 JobID: job.ID, 2378 NodeID: node.ID, 2379 Status: structs.EvalStatusPending, 2380 } 2381 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2382 2383 // Process the evaluation 2384 err := h.Process(NewServiceScheduler, eval) 2385 if err != nil { 2386 t.Fatalf("err: %v", err) 2387 } 2388 2389 // Ensure a single plan 2390 if len(h.Plans) != 1 { 2391 t.Fatalf("bad: %#v", h.Plans) 2392 } 2393 plan := h.Plans[0] 2394 2395 // Ensure the plan evicted all allocs 2396 if len(plan.NodeUpdate[node.ID]) != len(allocs) { 2397 t.Fatalf("bad: %#v", plan) 2398 } 2399 2400 // Ensure the plan allocated 2401 var planned []*structs.Allocation 2402 for _, allocList := range plan.NodeAllocation { 2403 planned = append(planned, allocList...) 2404 } 2405 if len(planned) != 10 { 2406 t.Fatalf("bad: %#v", plan) 2407 } 2408 2409 // Lookup the allocations by JobID 2410 ws := memdb.NewWatchSet() 2411 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2412 noErr(t, err) 2413 2414 // Ensure all allocations placed 2415 out, _ = structs.FilterTerminalAllocs(out) 2416 if len(out) != 10 { 2417 t.Fatalf("bad: %#v", out) 2418 } 2419 2420 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2421 } 2422 2423 func TestServiceSched_NodeDrain_Down(t *testing.T) { 2424 h := NewHarness(t) 2425 2426 // Register a draining node 2427 node := mock.Node() 2428 node.Drain = true 2429 node.Status = structs.NodeStatusDown 2430 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2431 2432 // Generate a fake job with allocations 2433 job := mock.Job() 2434 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2435 2436 var allocs []*structs.Allocation 2437 for i := 0; i < 10; i++ { 2438 alloc := mock.Alloc() 2439 alloc.Job = job 2440 alloc.JobID = job.ID 2441 alloc.NodeID = node.ID 2442 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2443 allocs = append(allocs, alloc) 2444 } 2445 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2446 2447 // Set the desired state of the allocs to stop 2448 var stop []*structs.Allocation 2449 for i := 0; i < 10; i++ { 2450 newAlloc := allocs[i].Copy() 2451 newAlloc.ClientStatus = structs.AllocDesiredStatusStop 2452 stop = append(stop, newAlloc) 2453 } 2454 noErr(t, h.State.UpsertAllocs(h.NextIndex(), stop)) 2455 2456 // Mark some of the allocations as running 2457 var running []*structs.Allocation 2458 for i := 4; i < 6; i++ { 2459 newAlloc := stop[i].Copy() 2460 newAlloc.ClientStatus = structs.AllocClientStatusRunning 2461 running = append(running, newAlloc) 2462 } 2463 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), running)) 2464 2465 // Mark some of the allocations as complete 2466 var complete []*structs.Allocation 2467 for i := 6; i < 10; i++ { 2468 newAlloc := stop[i].Copy() 2469 newAlloc.ClientStatus = structs.AllocClientStatusComplete 2470 complete = append(complete, newAlloc) 2471 } 2472 noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), complete)) 2473 2474 // Create a mock evaluation to deal with the node update 2475 eval := &structs.Evaluation{ 2476 Namespace: structs.DefaultNamespace, 2477 ID: uuid.Generate(), 2478 Priority: 50, 2479 TriggeredBy: structs.EvalTriggerNodeUpdate, 2480 JobID: job.ID, 2481 NodeID: node.ID, 2482 Status: structs.EvalStatusPending, 2483 } 2484 2485 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2486 2487 // Process the evaluation 2488 err := h.Process(NewServiceScheduler, eval) 2489 if err != nil { 2490 t.Fatalf("err: %v", err) 2491 } 2492 2493 // Ensure a single plan 2494 if len(h.Plans) != 1 { 2495 t.Fatalf("bad: %#v", h.Plans) 2496 } 2497 plan := h.Plans[0] 2498 2499 // Ensure the plan evicted non terminal allocs 2500 if len(plan.NodeUpdate[node.ID]) != 6 { 2501 t.Fatalf("bad: %#v", plan) 2502 } 2503 2504 // Ensure that all the allocations which were in running or pending state 2505 // has been marked as lost 2506 var lostAllocs []string 2507 for _, alloc := range plan.NodeUpdate[node.ID] { 2508 lostAllocs = append(lostAllocs, alloc.ID) 2509 } 2510 sort.Strings(lostAllocs) 2511 2512 var expectedLostAllocs []string 2513 for i := 0; i < 6; i++ { 2514 expectedLostAllocs = append(expectedLostAllocs, allocs[i].ID) 2515 } 2516 sort.Strings(expectedLostAllocs) 2517 2518 if !reflect.DeepEqual(expectedLostAllocs, lostAllocs) { 2519 t.Fatalf("expected: %v, actual: %v", expectedLostAllocs, lostAllocs) 2520 } 2521 2522 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2523 } 2524 2525 func TestServiceSched_NodeDrain_Queued_Allocations(t *testing.T) { 2526 h := NewHarness(t) 2527 2528 // Register a draining node 2529 node := mock.Node() 2530 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2531 2532 // Generate a fake job with allocations and an update policy. 2533 job := mock.Job() 2534 job.TaskGroups[0].Count = 2 2535 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2536 2537 var allocs []*structs.Allocation 2538 for i := 0; i < 2; i++ { 2539 alloc := mock.Alloc() 2540 alloc.Job = job 2541 alloc.JobID = job.ID 2542 alloc.NodeID = node.ID 2543 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2544 allocs = append(allocs, alloc) 2545 } 2546 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2547 2548 node.Drain = true 2549 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2550 2551 // Create a mock evaluation to deal with drain 2552 eval := &structs.Evaluation{ 2553 Namespace: structs.DefaultNamespace, 2554 ID: uuid.Generate(), 2555 Priority: 50, 2556 TriggeredBy: structs.EvalTriggerNodeUpdate, 2557 JobID: job.ID, 2558 NodeID: node.ID, 2559 Status: structs.EvalStatusPending, 2560 } 2561 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2562 2563 // Process the evaluation 2564 err := h.Process(NewServiceScheduler, eval) 2565 if err != nil { 2566 t.Fatalf("err: %v", err) 2567 } 2568 2569 queued := h.Evals[0].QueuedAllocations["web"] 2570 if queued != 2 { 2571 t.Fatalf("expected: %v, actual: %v", 2, queued) 2572 } 2573 } 2574 2575 func TestServiceSched_NodeDrain_UpdateStrategy(t *testing.T) { 2576 h := NewHarness(t) 2577 2578 // Register a draining node 2579 node := mock.Node() 2580 node.Drain = true 2581 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2582 2583 // Create some nodes 2584 for i := 0; i < 10; i++ { 2585 node := mock.Node() 2586 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2587 } 2588 2589 // Generate a fake job with allocations and an update policy. 2590 job := mock.Job() 2591 mp := 5 2592 u := structs.DefaultUpdateStrategy.Copy() 2593 u.MaxParallel = mp 2594 u.Stagger = time.Second 2595 job.TaskGroups[0].Update = u 2596 2597 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2598 2599 var allocs []*structs.Allocation 2600 for i := 0; i < 10; i++ { 2601 alloc := mock.Alloc() 2602 alloc.Job = job 2603 alloc.JobID = job.ID 2604 alloc.NodeID = node.ID 2605 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2606 allocs = append(allocs, alloc) 2607 } 2608 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2609 2610 // Create a mock evaluation to deal with drain 2611 eval := &structs.Evaluation{ 2612 Namespace: structs.DefaultNamespace, 2613 ID: uuid.Generate(), 2614 Priority: 50, 2615 TriggeredBy: structs.EvalTriggerNodeUpdate, 2616 JobID: job.ID, 2617 NodeID: node.ID, 2618 Status: structs.EvalStatusPending, 2619 } 2620 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2621 2622 // Process the evaluation 2623 err := h.Process(NewServiceScheduler, eval) 2624 if err != nil { 2625 t.Fatalf("err: %v", err) 2626 } 2627 2628 // Ensure a single plan 2629 if len(h.Plans) != 1 { 2630 t.Fatalf("bad: %#v", h.Plans) 2631 } 2632 plan := h.Plans[0] 2633 2634 // Ensure the plan evicted all allocs 2635 if len(plan.NodeUpdate[node.ID]) != mp { 2636 t.Fatalf("bad: %#v", plan) 2637 } 2638 2639 // Ensure the plan allocated 2640 var planned []*structs.Allocation 2641 for _, allocList := range plan.NodeAllocation { 2642 planned = append(planned, allocList...) 2643 } 2644 if len(planned) != mp { 2645 t.Fatalf("bad: %#v", plan) 2646 } 2647 2648 // Ensure there is a followup eval. 2649 if len(h.CreateEvals) != 1 || 2650 h.CreateEvals[0].TriggeredBy != structs.EvalTriggerRollingUpdate { 2651 t.Fatalf("bad: %#v", h.CreateEvals) 2652 } 2653 2654 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2655 } 2656 2657 func TestServiceSched_RetryLimit(t *testing.T) { 2658 h := NewHarness(t) 2659 h.Planner = &RejectPlan{h} 2660 2661 // Create some nodes 2662 for i := 0; i < 10; i++ { 2663 node := mock.Node() 2664 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2665 } 2666 2667 // Create a job 2668 job := mock.Job() 2669 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2670 2671 // Create a mock evaluation to register the job 2672 eval := &structs.Evaluation{ 2673 Namespace: structs.DefaultNamespace, 2674 ID: uuid.Generate(), 2675 Priority: job.Priority, 2676 TriggeredBy: structs.EvalTriggerJobRegister, 2677 JobID: job.ID, 2678 Status: structs.EvalStatusPending, 2679 } 2680 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2681 2682 // Process the evaluation 2683 err := h.Process(NewServiceScheduler, eval) 2684 if err != nil { 2685 t.Fatalf("err: %v", err) 2686 } 2687 2688 // Ensure multiple plans 2689 if len(h.Plans) == 0 { 2690 t.Fatalf("bad: %#v", h.Plans) 2691 } 2692 2693 // Lookup the allocations by JobID 2694 ws := memdb.NewWatchSet() 2695 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2696 noErr(t, err) 2697 2698 // Ensure no allocations placed 2699 if len(out) != 0 { 2700 t.Fatalf("bad: %#v", out) 2701 } 2702 2703 // Should hit the retry limit 2704 h.AssertEvalStatus(t, structs.EvalStatusFailed) 2705 } 2706 2707 func TestBatchSched_Run_CompleteAlloc(t *testing.T) { 2708 h := NewHarness(t) 2709 2710 // Create a node 2711 node := mock.Node() 2712 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2713 2714 // Create a job 2715 job := mock.Job() 2716 job.Type = structs.JobTypeBatch 2717 job.TaskGroups[0].Count = 1 2718 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2719 2720 // Create a complete alloc 2721 alloc := mock.Alloc() 2722 alloc.Job = job 2723 alloc.JobID = job.ID 2724 alloc.NodeID = node.ID 2725 alloc.Name = "my-job.web[0]" 2726 alloc.ClientStatus = structs.AllocClientStatusComplete 2727 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2728 2729 // Create a mock evaluation to register the job 2730 eval := &structs.Evaluation{ 2731 Namespace: structs.DefaultNamespace, 2732 ID: uuid.Generate(), 2733 Priority: job.Priority, 2734 TriggeredBy: structs.EvalTriggerJobRegister, 2735 JobID: job.ID, 2736 Status: structs.EvalStatusPending, 2737 } 2738 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2739 2740 // Process the evaluation 2741 err := h.Process(NewBatchScheduler, eval) 2742 if err != nil { 2743 t.Fatalf("err: %v", err) 2744 } 2745 2746 // Ensure no plan as it should be a no-op 2747 if len(h.Plans) != 0 { 2748 t.Fatalf("bad: %#v", h.Plans) 2749 } 2750 2751 // Lookup the allocations by JobID 2752 ws := memdb.NewWatchSet() 2753 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2754 noErr(t, err) 2755 2756 // Ensure no allocations placed 2757 if len(out) != 1 { 2758 t.Fatalf("bad: %#v", out) 2759 } 2760 2761 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2762 } 2763 2764 func TestBatchSched_Run_FailedAlloc(t *testing.T) { 2765 h := NewHarness(t) 2766 2767 // Create a node 2768 node := mock.Node() 2769 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2770 2771 // Create a job 2772 job := mock.Job() 2773 job.Type = structs.JobTypeBatch 2774 job.TaskGroups[0].Count = 1 2775 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2776 2777 // Create a failed alloc 2778 alloc := mock.Alloc() 2779 alloc.Job = job 2780 alloc.JobID = job.ID 2781 alloc.NodeID = node.ID 2782 alloc.Name = "my-job.web[0]" 2783 alloc.ClientStatus = structs.AllocClientStatusFailed 2784 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2785 2786 // Create a mock evaluation to register the job 2787 eval := &structs.Evaluation{ 2788 Namespace: structs.DefaultNamespace, 2789 ID: uuid.Generate(), 2790 Priority: job.Priority, 2791 TriggeredBy: structs.EvalTriggerJobRegister, 2792 JobID: job.ID, 2793 Status: structs.EvalStatusPending, 2794 } 2795 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2796 2797 // Process the evaluation 2798 err := h.Process(NewBatchScheduler, eval) 2799 if err != nil { 2800 t.Fatalf("err: %v", err) 2801 } 2802 2803 // Ensure a plan 2804 if len(h.Plans) != 1 { 2805 t.Fatalf("bad: %#v", h.Plans) 2806 } 2807 2808 // Lookup the allocations by JobID 2809 ws := memdb.NewWatchSet() 2810 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2811 noErr(t, err) 2812 2813 // Ensure a replacement alloc was placed. 2814 if len(out) != 2 { 2815 t.Fatalf("bad: %#v", out) 2816 } 2817 2818 // Ensure that the scheduler is recording the correct number of queued 2819 // allocations 2820 queued := h.Evals[0].QueuedAllocations["web"] 2821 if queued != 0 { 2822 t.Fatalf("expected: %v, actual: %v", 1, queued) 2823 } 2824 2825 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2826 } 2827 2828 func TestBatchSched_Run_FailedAllocQueuedAllocations(t *testing.T) { 2829 h := NewHarness(t) 2830 2831 node := mock.Node() 2832 node.Drain = true 2833 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2834 2835 // Create a job 2836 job := mock.Job() 2837 job.Type = structs.JobTypeBatch 2838 job.TaskGroups[0].Count = 1 2839 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2840 2841 // Create a failed alloc 2842 alloc := mock.Alloc() 2843 alloc.Job = job 2844 alloc.JobID = job.ID 2845 alloc.NodeID = node.ID 2846 alloc.Name = "my-job.web[0]" 2847 alloc.ClientStatus = structs.AllocClientStatusFailed 2848 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2849 2850 // Create a mock evaluation to register the job 2851 eval := &structs.Evaluation{ 2852 Namespace: structs.DefaultNamespace, 2853 ID: uuid.Generate(), 2854 Priority: job.Priority, 2855 TriggeredBy: structs.EvalTriggerJobRegister, 2856 JobID: job.ID, 2857 Status: structs.EvalStatusPending, 2858 } 2859 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2860 2861 // Process the evaluation 2862 err := h.Process(NewBatchScheduler, eval) 2863 if err != nil { 2864 t.Fatalf("err: %v", err) 2865 } 2866 2867 // Ensure that the scheduler is recording the correct number of queued 2868 // allocations 2869 queued := h.Evals[0].QueuedAllocations["web"] 2870 if queued != 1 { 2871 t.Fatalf("expected: %v, actual: %v", 1, queued) 2872 } 2873 } 2874 2875 func TestBatchSched_ReRun_SuccessfullyFinishedAlloc(t *testing.T) { 2876 h := NewHarness(t) 2877 2878 // Create two nodes, one that is drained and has a successfully finished 2879 // alloc and a fresh undrained one 2880 node := mock.Node() 2881 node.Drain = true 2882 node2 := mock.Node() 2883 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2884 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 2885 2886 // Create a job 2887 job := mock.Job() 2888 job.Type = structs.JobTypeBatch 2889 job.TaskGroups[0].Count = 1 2890 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2891 2892 // Create a successful alloc 2893 alloc := mock.Alloc() 2894 alloc.Job = job 2895 alloc.JobID = job.ID 2896 alloc.NodeID = node.ID 2897 alloc.Name = "my-job.web[0]" 2898 alloc.ClientStatus = structs.AllocClientStatusComplete 2899 alloc.TaskStates = map[string]*structs.TaskState{ 2900 "web": { 2901 State: structs.TaskStateDead, 2902 Events: []*structs.TaskEvent{ 2903 { 2904 Type: structs.TaskTerminated, 2905 ExitCode: 0, 2906 }, 2907 }, 2908 }, 2909 } 2910 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 2911 2912 // Create a mock evaluation to rerun the job 2913 eval := &structs.Evaluation{ 2914 Namespace: structs.DefaultNamespace, 2915 ID: uuid.Generate(), 2916 Priority: job.Priority, 2917 TriggeredBy: structs.EvalTriggerJobRegister, 2918 JobID: job.ID, 2919 Status: structs.EvalStatusPending, 2920 } 2921 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2922 2923 // Process the evaluation 2924 err := h.Process(NewBatchScheduler, eval) 2925 if err != nil { 2926 t.Fatalf("err: %v", err) 2927 } 2928 2929 // Ensure no plan 2930 if len(h.Plans) != 0 { 2931 t.Fatalf("bad: %#v", h.Plans) 2932 } 2933 2934 // Lookup the allocations by JobID 2935 ws := memdb.NewWatchSet() 2936 out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false) 2937 noErr(t, err) 2938 2939 // Ensure no replacement alloc was placed. 2940 if len(out) != 1 { 2941 t.Fatalf("bad: %#v", out) 2942 } 2943 2944 h.AssertEvalStatus(t, structs.EvalStatusComplete) 2945 } 2946 2947 // This test checks that terminal allocations that receive an in-place updated 2948 // are not added to the plan 2949 func TestBatchSched_JobModify_InPlace_Terminal(t *testing.T) { 2950 h := NewHarness(t) 2951 2952 // Create some nodes 2953 var nodes []*structs.Node 2954 for i := 0; i < 10; i++ { 2955 node := mock.Node() 2956 nodes = append(nodes, node) 2957 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 2958 } 2959 2960 // Generate a fake job with allocations 2961 job := mock.Job() 2962 job.Type = structs.JobTypeBatch 2963 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 2964 2965 var allocs []*structs.Allocation 2966 for i := 0; i < 10; i++ { 2967 alloc := mock.Alloc() 2968 alloc.Job = job 2969 alloc.JobID = job.ID 2970 alloc.NodeID = nodes[i].ID 2971 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 2972 alloc.ClientStatus = structs.AllocClientStatusComplete 2973 allocs = append(allocs, alloc) 2974 } 2975 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 2976 2977 // Create a mock evaluation to trigger the job 2978 eval := &structs.Evaluation{ 2979 Namespace: structs.DefaultNamespace, 2980 ID: uuid.Generate(), 2981 Priority: 50, 2982 TriggeredBy: structs.EvalTriggerJobRegister, 2983 JobID: job.ID, 2984 Status: structs.EvalStatusPending, 2985 } 2986 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 2987 2988 // Process the evaluation 2989 err := h.Process(NewBatchScheduler, eval) 2990 if err != nil { 2991 t.Fatalf("err: %v", err) 2992 } 2993 2994 // Ensure no plan 2995 if len(h.Plans) != 0 { 2996 t.Fatalf("bad: %#v", h.Plans[0]) 2997 } 2998 } 2999 3000 // This test ensures that terminal jobs from older versions are ignored. 3001 func TestBatchSched_JobModify_Destructive_Terminal(t *testing.T) { 3002 h := NewHarness(t) 3003 3004 // Create some nodes 3005 var nodes []*structs.Node 3006 for i := 0; i < 10; i++ { 3007 node := mock.Node() 3008 nodes = append(nodes, node) 3009 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3010 } 3011 3012 // Generate a fake job with allocations 3013 job := mock.Job() 3014 job.Type = structs.JobTypeBatch 3015 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3016 3017 var allocs []*structs.Allocation 3018 for i := 0; i < 10; i++ { 3019 alloc := mock.Alloc() 3020 alloc.Job = job 3021 alloc.JobID = job.ID 3022 alloc.NodeID = nodes[i].ID 3023 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 3024 alloc.ClientStatus = structs.AllocClientStatusComplete 3025 allocs = append(allocs, alloc) 3026 } 3027 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3028 3029 // Update the job 3030 job2 := mock.Job() 3031 job2.ID = job.ID 3032 job2.Type = structs.JobTypeBatch 3033 job2.Version++ 3034 job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} 3035 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 3036 3037 allocs = nil 3038 for i := 0; i < 10; i++ { 3039 alloc := mock.Alloc() 3040 alloc.Job = job2 3041 alloc.JobID = job2.ID 3042 alloc.NodeID = nodes[i].ID 3043 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 3044 alloc.ClientStatus = structs.AllocClientStatusComplete 3045 alloc.TaskStates = map[string]*structs.TaskState{ 3046 "web": { 3047 State: structs.TaskStateDead, 3048 Events: []*structs.TaskEvent{ 3049 { 3050 Type: structs.TaskTerminated, 3051 ExitCode: 0, 3052 }, 3053 }, 3054 }, 3055 } 3056 allocs = append(allocs, alloc) 3057 } 3058 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3059 3060 // Create a mock evaluation to deal with drain 3061 eval := &structs.Evaluation{ 3062 Namespace: structs.DefaultNamespace, 3063 ID: uuid.Generate(), 3064 Priority: 50, 3065 TriggeredBy: structs.EvalTriggerJobRegister, 3066 JobID: job.ID, 3067 Status: structs.EvalStatusPending, 3068 } 3069 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3070 3071 // Process the evaluation 3072 err := h.Process(NewBatchScheduler, eval) 3073 if err != nil { 3074 t.Fatalf("err: %v", err) 3075 } 3076 3077 // Ensure a plan 3078 if len(h.Plans) != 0 { 3079 t.Fatalf("bad: %#v", h.Plans) 3080 } 3081 } 3082 3083 // This test asserts that an allocation from an old job that is running on a 3084 // drained node is cleaned up. 3085 func TestBatchSched_NodeDrain_Running_OldJob(t *testing.T) { 3086 h := NewHarness(t) 3087 3088 // Create two nodes, one that is drained and has a successfully finished 3089 // alloc and a fresh undrained one 3090 node := mock.Node() 3091 node.Drain = true 3092 node2 := mock.Node() 3093 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3094 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 3095 3096 // Create a job 3097 job := mock.Job() 3098 job.Type = structs.JobTypeBatch 3099 job.TaskGroups[0].Count = 1 3100 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3101 3102 // Create a running alloc 3103 alloc := mock.Alloc() 3104 alloc.Job = job 3105 alloc.JobID = job.ID 3106 alloc.NodeID = node.ID 3107 alloc.Name = "my-job.web[0]" 3108 alloc.ClientStatus = structs.AllocClientStatusRunning 3109 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3110 3111 // Create an update job 3112 job2 := job.Copy() 3113 job2.TaskGroups[0].Tasks[0].Env = map[string]string{"foo": "bar"} 3114 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 3115 3116 // Create a mock evaluation to register the job 3117 eval := &structs.Evaluation{ 3118 Namespace: structs.DefaultNamespace, 3119 ID: uuid.Generate(), 3120 Priority: job.Priority, 3121 TriggeredBy: structs.EvalTriggerJobRegister, 3122 JobID: job.ID, 3123 Status: structs.EvalStatusPending, 3124 } 3125 3126 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3127 3128 // Process the evaluation 3129 err := h.Process(NewBatchScheduler, eval) 3130 if err != nil { 3131 t.Fatalf("err: %v", err) 3132 } 3133 3134 // Ensure a plan 3135 if len(h.Plans) != 1 { 3136 t.Fatalf("bad: %#v", h.Plans) 3137 } 3138 3139 plan := h.Plans[0] 3140 3141 // Ensure the plan evicted 1 3142 if len(plan.NodeUpdate[node.ID]) != 1 { 3143 t.Fatalf("bad: %#v", plan) 3144 } 3145 3146 // Ensure the plan places 1 3147 if len(plan.NodeAllocation[node2.ID]) != 1 { 3148 t.Fatalf("bad: %#v", plan) 3149 } 3150 3151 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3152 } 3153 3154 // This test asserts that an allocation from a job that is complete on a 3155 // drained node is ignored up. 3156 func TestBatchSched_NodeDrain_Complete(t *testing.T) { 3157 h := NewHarness(t) 3158 3159 // Create two nodes, one that is drained and has a successfully finished 3160 // alloc and a fresh undrained one 3161 node := mock.Node() 3162 node.Drain = true 3163 node2 := mock.Node() 3164 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3165 noErr(t, h.State.UpsertNode(h.NextIndex(), node2)) 3166 3167 // Create a job 3168 job := mock.Job() 3169 job.Type = structs.JobTypeBatch 3170 job.TaskGroups[0].Count = 1 3171 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3172 3173 // Create a complete alloc 3174 alloc := mock.Alloc() 3175 alloc.Job = job 3176 alloc.JobID = job.ID 3177 alloc.NodeID = node.ID 3178 alloc.Name = "my-job.web[0]" 3179 alloc.ClientStatus = structs.AllocClientStatusComplete 3180 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3181 3182 // Create a mock evaluation to register the job 3183 eval := &structs.Evaluation{ 3184 Namespace: structs.DefaultNamespace, 3185 ID: uuid.Generate(), 3186 Priority: job.Priority, 3187 TriggeredBy: structs.EvalTriggerJobRegister, 3188 JobID: job.ID, 3189 Status: structs.EvalStatusPending, 3190 } 3191 3192 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3193 3194 // Process the evaluation 3195 err := h.Process(NewBatchScheduler, eval) 3196 if err != nil { 3197 t.Fatalf("err: %v", err) 3198 } 3199 3200 // Ensure no plan 3201 if len(h.Plans) != 0 { 3202 t.Fatalf("bad: %#v", h.Plans) 3203 } 3204 3205 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3206 } 3207 3208 // This is a slightly odd test but it ensures that we handle a scale down of a 3209 // task group's count and that it works even if all the allocs have the same 3210 // name. 3211 func TestBatchSched_ScaleDown_SameName(t *testing.T) { 3212 h := NewHarness(t) 3213 3214 // Create a node 3215 node := mock.Node() 3216 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3217 3218 // Create a job 3219 job := mock.Job() 3220 job.Type = structs.JobTypeBatch 3221 job.TaskGroups[0].Count = 1 3222 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3223 3224 // Create a few running alloc 3225 var allocs []*structs.Allocation 3226 for i := 0; i < 5; i++ { 3227 alloc := mock.Alloc() 3228 alloc.Job = job 3229 alloc.JobID = job.ID 3230 alloc.NodeID = node.ID 3231 alloc.Name = "my-job.web[0]" 3232 alloc.ClientStatus = structs.AllocClientStatusRunning 3233 allocs = append(allocs, alloc) 3234 } 3235 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 3236 3237 // Create a mock evaluation to register the job 3238 eval := &structs.Evaluation{ 3239 Namespace: structs.DefaultNamespace, 3240 ID: uuid.Generate(), 3241 Priority: job.Priority, 3242 TriggeredBy: structs.EvalTriggerJobRegister, 3243 JobID: job.ID, 3244 Status: structs.EvalStatusPending, 3245 } 3246 3247 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3248 3249 // Process the evaluation 3250 err := h.Process(NewBatchScheduler, eval) 3251 if err != nil { 3252 t.Fatalf("err: %v", err) 3253 } 3254 3255 // Ensure a plan 3256 if len(h.Plans) != 1 { 3257 t.Fatalf("bad: %#v", h.Plans) 3258 } 3259 3260 plan := h.Plans[0] 3261 3262 // Ensure the plan evicted 4 of the 5 3263 if len(plan.NodeUpdate[node.ID]) != 4 { 3264 t.Fatalf("bad: %#v", plan) 3265 } 3266 3267 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3268 } 3269 3270 func TestGenericSched_ChainedAlloc(t *testing.T) { 3271 h := NewHarness(t) 3272 3273 // Create some nodes 3274 for i := 0; i < 10; i++ { 3275 node := mock.Node() 3276 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3277 } 3278 3279 // Create a job 3280 job := mock.Job() 3281 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3282 3283 // Create a mock evaluation to register the job 3284 eval := &structs.Evaluation{ 3285 Namespace: structs.DefaultNamespace, 3286 ID: uuid.Generate(), 3287 Priority: job.Priority, 3288 TriggeredBy: structs.EvalTriggerJobRegister, 3289 JobID: job.ID, 3290 Status: structs.EvalStatusPending, 3291 } 3292 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3293 // Process the evaluation 3294 if err := h.Process(NewServiceScheduler, eval); err != nil { 3295 t.Fatalf("err: %v", err) 3296 } 3297 3298 var allocIDs []string 3299 for _, allocList := range h.Plans[0].NodeAllocation { 3300 for _, alloc := range allocList { 3301 allocIDs = append(allocIDs, alloc.ID) 3302 } 3303 } 3304 sort.Strings(allocIDs) 3305 3306 // Create a new harness to invoke the scheduler again 3307 h1 := NewHarnessWithState(t, h.State) 3308 job1 := mock.Job() 3309 job1.ID = job.ID 3310 job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar" 3311 job1.TaskGroups[0].Count = 12 3312 noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1)) 3313 3314 // Create a mock evaluation to update the job 3315 eval1 := &structs.Evaluation{ 3316 Namespace: structs.DefaultNamespace, 3317 ID: uuid.Generate(), 3318 Priority: job1.Priority, 3319 TriggeredBy: structs.EvalTriggerJobRegister, 3320 JobID: job1.ID, 3321 Status: structs.EvalStatusPending, 3322 } 3323 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1})) 3324 3325 // Process the evaluation 3326 if err := h1.Process(NewServiceScheduler, eval1); err != nil { 3327 t.Fatalf("err: %v", err) 3328 } 3329 3330 plan := h1.Plans[0] 3331 3332 // Collect all the chained allocation ids and the new allocations which 3333 // don't have any chained allocations 3334 var prevAllocs []string 3335 var newAllocs []string 3336 for _, allocList := range plan.NodeAllocation { 3337 for _, alloc := range allocList { 3338 if alloc.PreviousAllocation == "" { 3339 newAllocs = append(newAllocs, alloc.ID) 3340 continue 3341 } 3342 prevAllocs = append(prevAllocs, alloc.PreviousAllocation) 3343 } 3344 } 3345 sort.Strings(prevAllocs) 3346 3347 // Ensure that the new allocations has their corresponging original 3348 // allocation ids 3349 if !reflect.DeepEqual(prevAllocs, allocIDs) { 3350 t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs)) 3351 } 3352 3353 // Ensuring two new allocations don't have any chained allocations 3354 if len(newAllocs) != 2 { 3355 t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs)) 3356 } 3357 } 3358 3359 func TestServiceSched_NodeDrain_Sticky(t *testing.T) { 3360 h := NewHarness(t) 3361 3362 // Register a draining node 3363 node := mock.Node() 3364 node.Drain = true 3365 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 3366 3367 // Create an alloc on the draining node 3368 alloc := mock.Alloc() 3369 alloc.Name = "my-job.web[0]" 3370 alloc.DesiredStatus = structs.AllocDesiredStatusStop 3371 alloc.NodeID = node.ID 3372 alloc.Job.TaskGroups[0].Count = 1 3373 alloc.Job.TaskGroups[0].EphemeralDisk.Sticky = true 3374 noErr(t, h.State.UpsertJob(h.NextIndex(), alloc.Job)) 3375 noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc})) 3376 3377 // Create a mock evaluation to deal with drain 3378 eval := &structs.Evaluation{ 3379 Namespace: structs.DefaultNamespace, 3380 ID: uuid.Generate(), 3381 Priority: 50, 3382 TriggeredBy: structs.EvalTriggerNodeUpdate, 3383 JobID: alloc.Job.ID, 3384 NodeID: node.ID, 3385 Status: structs.EvalStatusPending, 3386 } 3387 3388 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3389 3390 // Process the evaluation 3391 err := h.Process(NewServiceScheduler, eval) 3392 if err != nil { 3393 t.Fatalf("err: %v", err) 3394 } 3395 3396 // Ensure a single plan 3397 if len(h.Plans) != 1 { 3398 t.Fatalf("bad: %#v", h.Plans) 3399 } 3400 plan := h.Plans[0] 3401 3402 // Ensure the plan evicted all allocs 3403 if len(plan.NodeUpdate[node.ID]) != 1 { 3404 t.Fatalf("bad: %#v", plan) 3405 } 3406 3407 // Ensure the plan didn't create any new allocations 3408 var planned []*structs.Allocation 3409 for _, allocList := range plan.NodeAllocation { 3410 planned = append(planned, allocList...) 3411 } 3412 if len(planned) != 0 { 3413 t.Fatalf("bad: %#v", plan) 3414 } 3415 3416 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3417 } 3418 3419 // This test ensures that when a job is stopped, the scheduler properly cancels 3420 // an outstanding deployment. 3421 func TestServiceSched_CancelDeployment_Stopped(t *testing.T) { 3422 h := NewHarness(t) 3423 3424 // Generate a fake job 3425 job := mock.Job() 3426 job.JobModifyIndex = job.CreateIndex + 1 3427 job.ModifyIndex = job.CreateIndex + 1 3428 job.Stop = true 3429 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3430 3431 // Create a deployment 3432 d := mock.Deployment() 3433 d.JobID = job.ID 3434 d.JobCreateIndex = job.CreateIndex 3435 d.JobModifyIndex = job.JobModifyIndex - 1 3436 noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) 3437 3438 // Create a mock evaluation to deregister the job 3439 eval := &structs.Evaluation{ 3440 Namespace: structs.DefaultNamespace, 3441 ID: uuid.Generate(), 3442 Priority: 50, 3443 TriggeredBy: structs.EvalTriggerJobDeregister, 3444 JobID: job.ID, 3445 Status: structs.EvalStatusPending, 3446 } 3447 3448 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3449 3450 // Process the evaluation 3451 err := h.Process(NewServiceScheduler, eval) 3452 if err != nil { 3453 t.Fatalf("err: %v", err) 3454 } 3455 3456 // Ensure a single plan 3457 if len(h.Plans) != 1 { 3458 t.Fatalf("bad: %#v", h.Plans) 3459 } 3460 plan := h.Plans[0] 3461 3462 // Ensure the plan cancelled the existing deployment 3463 ws := memdb.NewWatchSet() 3464 out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) 3465 noErr(t, err) 3466 3467 if out == nil { 3468 t.Fatalf("No deployment for job") 3469 } 3470 if out.ID != d.ID { 3471 t.Fatalf("Latest deployment for job is different than original deployment") 3472 } 3473 if out.Status != structs.DeploymentStatusCancelled { 3474 t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled) 3475 } 3476 if out.StatusDescription != structs.DeploymentStatusDescriptionStoppedJob { 3477 t.Fatalf("Deployment status description is %q, want %q", 3478 out.StatusDescription, structs.DeploymentStatusDescriptionStoppedJob) 3479 } 3480 3481 // Ensure the plan didn't allocate anything 3482 var planned []*structs.Allocation 3483 for _, allocList := range plan.NodeAllocation { 3484 planned = append(planned, allocList...) 3485 } 3486 if len(planned) != 0 { 3487 t.Fatalf("bad: %#v", plan) 3488 } 3489 3490 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3491 } 3492 3493 // This test ensures that when a job is updated and had an old deployment, the scheduler properly cancels 3494 // the deployment. 3495 func TestServiceSched_CancelDeployment_NewerJob(t *testing.T) { 3496 h := NewHarness(t) 3497 3498 // Generate a fake job 3499 job := mock.Job() 3500 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3501 3502 // Create a deployment for an old version of the job 3503 d := mock.Deployment() 3504 d.JobID = job.ID 3505 noErr(t, h.State.UpsertDeployment(h.NextIndex(), d)) 3506 3507 // Upsert again to bump job version 3508 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 3509 3510 // Create a mock evaluation to kick the job 3511 eval := &structs.Evaluation{ 3512 Namespace: structs.DefaultNamespace, 3513 ID: uuid.Generate(), 3514 Priority: 50, 3515 TriggeredBy: structs.EvalTriggerJobRegister, 3516 JobID: job.ID, 3517 Status: structs.EvalStatusPending, 3518 } 3519 3520 noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval})) 3521 3522 // Process the evaluation 3523 err := h.Process(NewServiceScheduler, eval) 3524 if err != nil { 3525 t.Fatalf("err: %v", err) 3526 } 3527 3528 // Ensure a single plan 3529 if len(h.Plans) != 1 { 3530 t.Fatalf("bad: %#v", h.Plans) 3531 } 3532 plan := h.Plans[0] 3533 3534 // Ensure the plan cancelled the existing deployment 3535 ws := memdb.NewWatchSet() 3536 out, err := h.State.LatestDeploymentByJobID(ws, job.Namespace, job.ID) 3537 noErr(t, err) 3538 3539 if out == nil { 3540 t.Fatalf("No deployment for job") 3541 } 3542 if out.ID != d.ID { 3543 t.Fatalf("Latest deployment for job is different than original deployment") 3544 } 3545 if out.Status != structs.DeploymentStatusCancelled { 3546 t.Fatalf("Deployment status is %q, want %q", out.Status, structs.DeploymentStatusCancelled) 3547 } 3548 if out.StatusDescription != structs.DeploymentStatusDescriptionNewerJob { 3549 t.Fatalf("Deployment status description is %q, want %q", 3550 out.StatusDescription, structs.DeploymentStatusDescriptionNewerJob) 3551 } 3552 // Ensure the plan didn't allocate anything 3553 var planned []*structs.Allocation 3554 for _, allocList := range plan.NodeAllocation { 3555 planned = append(planned, allocList...) 3556 } 3557 if len(planned) != 0 { 3558 t.Fatalf("bad: %#v", plan) 3559 } 3560 3561 h.AssertEvalStatus(t, structs.EvalStatusComplete) 3562 }