github.com/hhrutter/nomad@v0.6.0-rc2.0.20170723054333-80c4b03f0705/nomad/core_sched_test.go (about) 1 package nomad 2 3 import ( 4 "testing" 5 "time" 6 7 memdb "github.com/hashicorp/go-memdb" 8 "github.com/hashicorp/nomad/nomad/mock" 9 "github.com/hashicorp/nomad/nomad/structs" 10 "github.com/hashicorp/nomad/testutil" 11 "github.com/stretchr/testify/assert" 12 ) 13 14 func TestCoreScheduler_EvalGC(t *testing.T) { 15 s1 := testServer(t, nil) 16 defer s1.Shutdown() 17 testutil.WaitForLeader(t, s1.RPC) 18 19 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 20 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 21 22 // Insert "dead" eval 23 state := s1.fsm.State() 24 eval := mock.Eval() 25 eval.Status = structs.EvalStatusFailed 26 state.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) 27 err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) 28 if err != nil { 29 t.Fatalf("err: %v", err) 30 } 31 32 // Insert "dead" alloc 33 alloc := mock.Alloc() 34 alloc.EvalID = eval.ID 35 alloc.DesiredStatus = structs.AllocDesiredStatusStop 36 alloc.JobID = eval.JobID 37 38 // Insert "lost" alloc 39 alloc2 := mock.Alloc() 40 alloc2.EvalID = eval.ID 41 alloc2.DesiredStatus = structs.AllocDesiredStatusRun 42 alloc2.ClientStatus = structs.AllocClientStatusLost 43 alloc2.JobID = eval.JobID 44 err = state.UpsertAllocs(1001, []*structs.Allocation{alloc, alloc2}) 45 if err != nil { 46 t.Fatalf("err: %v", err) 47 } 48 49 // Update the time tables to make this work 50 tt := s1.fsm.TimeTable() 51 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) 52 53 // Create a core scheduler 54 snap, err := state.Snapshot() 55 if err != nil { 56 t.Fatalf("err: %v", err) 57 } 58 core := NewCoreScheduler(s1, snap) 59 60 // Attempt the GC 61 gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000) 62 err = core.Process(gc) 63 if err != nil { 64 t.Fatalf("err: %v", err) 65 } 66 67 // Should be gone 68 ws := memdb.NewWatchSet() 69 out, err := state.EvalByID(ws, eval.ID) 70 if err != nil { 71 t.Fatalf("err: %v", err) 72 } 73 if out != nil { 74 t.Fatalf("bad: %v", out) 75 } 76 77 outA, err := state.AllocByID(ws, alloc.ID) 78 if err != nil { 79 t.Fatalf("err: %v", err) 80 } 81 if outA != nil { 82 t.Fatalf("bad: %v", outA) 83 } 84 85 outA2, err := state.AllocByID(ws, alloc2.ID) 86 if err != nil { 87 t.Fatalf("err: %v", err) 88 } 89 if outA2 != nil { 90 t.Fatalf("bad: %v", outA2) 91 } 92 } 93 94 // An EvalGC should never reap a batch job that has not been stopped 95 func TestCoreScheduler_EvalGC_Batch(t *testing.T) { 96 s1 := testServer(t, nil) 97 defer s1.Shutdown() 98 testutil.WaitForLeader(t, s1.RPC) 99 100 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 101 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 102 103 // Insert a "dead" job 104 state := s1.fsm.State() 105 job := mock.Job() 106 job.Type = structs.JobTypeBatch 107 job.Status = structs.JobStatusDead 108 err := state.UpsertJob(1000, job) 109 if err != nil { 110 t.Fatalf("err: %v", err) 111 } 112 113 // Insert "complete" eval 114 eval := mock.Eval() 115 eval.Status = structs.EvalStatusComplete 116 eval.Type = structs.JobTypeBatch 117 eval.JobID = job.ID 118 err = state.UpsertEvals(1001, []*structs.Evaluation{eval}) 119 if err != nil { 120 t.Fatalf("err: %v", err) 121 } 122 123 // Insert "failed" alloc 124 alloc := mock.Alloc() 125 alloc.JobID = job.ID 126 alloc.EvalID = eval.ID 127 alloc.DesiredStatus = structs.AllocDesiredStatusStop 128 129 // Insert "lost" alloc 130 alloc2 := mock.Alloc() 131 alloc2.JobID = job.ID 132 alloc2.EvalID = eval.ID 133 alloc2.DesiredStatus = structs.AllocDesiredStatusRun 134 alloc2.ClientStatus = structs.AllocClientStatusLost 135 136 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2}) 137 if err != nil { 138 t.Fatalf("err: %v", err) 139 } 140 141 // Update the time tables to make this work 142 tt := s1.fsm.TimeTable() 143 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) 144 145 // Create a core scheduler 146 snap, err := state.Snapshot() 147 if err != nil { 148 t.Fatalf("err: %v", err) 149 } 150 core := NewCoreScheduler(s1, snap) 151 152 // Attempt the GC 153 gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000) 154 err = core.Process(gc) 155 if err != nil { 156 t.Fatalf("err: %v", err) 157 } 158 159 // Nothing should be gone 160 ws := memdb.NewWatchSet() 161 out, err := state.EvalByID(ws, eval.ID) 162 if err != nil { 163 t.Fatalf("err: %v", err) 164 } 165 if out == nil { 166 t.Fatalf("bad: %v", out) 167 } 168 169 outA, err := state.AllocByID(ws, alloc.ID) 170 if err != nil { 171 t.Fatalf("err: %v", err) 172 } 173 if outA == nil { 174 t.Fatalf("bad: %v", outA) 175 } 176 177 outA2, err := state.AllocByID(ws, alloc2.ID) 178 if err != nil { 179 t.Fatalf("err: %v", err) 180 } 181 if outA2 == nil { 182 t.Fatalf("bad: %v", outA2) 183 } 184 185 outB, err := state.JobByID(ws, job.ID) 186 if err != nil { 187 t.Fatalf("err: %v", err) 188 } 189 if outB == nil { 190 t.Fatalf("bad: %v", outB) 191 } 192 } 193 194 // An EvalGC should reap a batch job that has been stopped 195 func TestCoreScheduler_EvalGC_BatchStopped(t *testing.T) { 196 s1 := testServer(t, nil) 197 defer s1.Shutdown() 198 testutil.WaitForLeader(t, s1.RPC) 199 200 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 201 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 202 203 // Create a "dead" job 204 state := s1.fsm.State() 205 job := mock.Job() 206 job.Type = structs.JobTypeBatch 207 job.Status = structs.JobStatusDead 208 209 // Insert "complete" eval 210 eval := mock.Eval() 211 eval.Status = structs.EvalStatusComplete 212 eval.Type = structs.JobTypeBatch 213 eval.JobID = job.ID 214 err := state.UpsertEvals(1001, []*structs.Evaluation{eval}) 215 if err != nil { 216 t.Fatalf("err: %v", err) 217 } 218 219 // Insert "failed" alloc 220 alloc := mock.Alloc() 221 alloc.JobID = job.ID 222 alloc.EvalID = eval.ID 223 alloc.DesiredStatus = structs.AllocDesiredStatusStop 224 225 // Insert "lost" alloc 226 alloc2 := mock.Alloc() 227 alloc2.JobID = job.ID 228 alloc2.EvalID = eval.ID 229 alloc2.DesiredStatus = structs.AllocDesiredStatusRun 230 alloc2.ClientStatus = structs.AllocClientStatusLost 231 232 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2}) 233 if err != nil { 234 t.Fatalf("err: %v", err) 235 } 236 237 // Update the time tables to make this work 238 tt := s1.fsm.TimeTable() 239 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) 240 241 // Create a core scheduler 242 snap, err := state.Snapshot() 243 if err != nil { 244 t.Fatalf("err: %v", err) 245 } 246 core := NewCoreScheduler(s1, snap) 247 248 // Attempt the GC 249 gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000) 250 err = core.Process(gc) 251 if err != nil { 252 t.Fatalf("err: %v", err) 253 } 254 255 // Everything should be gone 256 ws := memdb.NewWatchSet() 257 out, err := state.EvalByID(ws, eval.ID) 258 if err != nil { 259 t.Fatalf("err: %v", err) 260 } 261 if out != nil { 262 t.Fatalf("bad: %v", out) 263 } 264 265 outA, err := state.AllocByID(ws, alloc.ID) 266 if err != nil { 267 t.Fatalf("err: %v", err) 268 } 269 if outA != nil { 270 t.Fatalf("bad: %v", outA) 271 } 272 273 outA2, err := state.AllocByID(ws, alloc2.ID) 274 if err != nil { 275 t.Fatalf("err: %v", err) 276 } 277 if outA2 != nil { 278 t.Fatalf("bad: %v", outA2) 279 } 280 } 281 282 func TestCoreScheduler_EvalGC_Partial(t *testing.T) { 283 s1 := testServer(t, nil) 284 defer s1.Shutdown() 285 testutil.WaitForLeader(t, s1.RPC) 286 287 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 288 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 289 290 // Insert "dead" eval 291 state := s1.fsm.State() 292 eval := mock.Eval() 293 eval.Status = structs.EvalStatusComplete 294 state.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) 295 err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) 296 if err != nil { 297 t.Fatalf("err: %v", err) 298 } 299 300 // Insert "dead" alloc 301 alloc := mock.Alloc() 302 alloc.EvalID = eval.ID 303 alloc.DesiredStatus = structs.AllocDesiredStatusStop 304 state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) 305 306 // Insert "lost" alloc 307 alloc2 := mock.Alloc() 308 alloc2.JobID = alloc.JobID 309 alloc2.EvalID = eval.ID 310 alloc2.DesiredStatus = structs.AllocDesiredStatusRun 311 alloc2.ClientStatus = structs.AllocClientStatusLost 312 313 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2}) 314 if err != nil { 315 t.Fatalf("err: %v", err) 316 } 317 318 // Insert "running" alloc 319 alloc3 := mock.Alloc() 320 alloc3.EvalID = eval.ID 321 state.UpsertJobSummary(1003, mock.JobSummary(alloc3.JobID)) 322 err = state.UpsertAllocs(1004, []*structs.Allocation{alloc3}) 323 if err != nil { 324 t.Fatalf("err: %v", err) 325 } 326 327 // Update the time tables to make this work 328 tt := s1.fsm.TimeTable() 329 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.EvalGCThreshold)) 330 331 // Create a core scheduler 332 snap, err := state.Snapshot() 333 if err != nil { 334 t.Fatalf("err: %v", err) 335 } 336 core := NewCoreScheduler(s1, snap) 337 338 // Attempt the GC 339 gc := s1.coreJobEval(structs.CoreJobEvalGC, 2000) 340 err = core.Process(gc) 341 if err != nil { 342 t.Fatalf("err: %v", err) 343 } 344 345 // Should not be gone 346 ws := memdb.NewWatchSet() 347 out, err := state.EvalByID(ws, eval.ID) 348 if err != nil { 349 t.Fatalf("err: %v", err) 350 } 351 if out == nil { 352 t.Fatalf("bad: %v", out) 353 } 354 355 outA, err := state.AllocByID(ws, alloc3.ID) 356 if err != nil { 357 t.Fatalf("err: %v", err) 358 } 359 if outA == nil { 360 t.Fatalf("bad: %v", outA) 361 } 362 363 // Should be gone 364 outB, err := state.AllocByID(ws, alloc.ID) 365 if err != nil { 366 t.Fatalf("err: %v", err) 367 } 368 if outB != nil { 369 t.Fatalf("bad: %v", outB) 370 } 371 372 outC, err := state.AllocByID(ws, alloc2.ID) 373 if err != nil { 374 t.Fatalf("err: %v", err) 375 } 376 if outC != nil { 377 t.Fatalf("bad: %v", outC) 378 } 379 } 380 381 func TestCoreScheduler_EvalGC_Force(t *testing.T) { 382 s1 := testServer(t, nil) 383 defer s1.Shutdown() 384 testutil.WaitForLeader(t, s1.RPC) 385 386 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 387 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 388 389 // Insert "dead" eval 390 state := s1.fsm.State() 391 eval := mock.Eval() 392 eval.Status = structs.EvalStatusFailed 393 state.UpsertJobSummary(999, mock.JobSummary(eval.JobID)) 394 err := state.UpsertEvals(1000, []*structs.Evaluation{eval}) 395 if err != nil { 396 t.Fatalf("err: %v", err) 397 } 398 399 // Insert "dead" alloc 400 alloc := mock.Alloc() 401 alloc.EvalID = eval.ID 402 alloc.DesiredStatus = structs.AllocDesiredStatusStop 403 state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) 404 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc}) 405 if err != nil { 406 t.Fatalf("err: %v", err) 407 } 408 409 // Create a core scheduler 410 snap, err := state.Snapshot() 411 if err != nil { 412 t.Fatalf("err: %v", err) 413 } 414 core := NewCoreScheduler(s1, snap) 415 416 // Attempt the GC 417 gc := s1.coreJobEval(structs.CoreJobForceGC, 1002) 418 err = core.Process(gc) 419 if err != nil { 420 t.Fatalf("err: %v", err) 421 } 422 423 // Should be gone 424 ws := memdb.NewWatchSet() 425 out, err := state.EvalByID(ws, eval.ID) 426 if err != nil { 427 t.Fatalf("err: %v", err) 428 } 429 if out != nil { 430 t.Fatalf("bad: %v", out) 431 } 432 433 outA, err := state.AllocByID(ws, alloc.ID) 434 if err != nil { 435 t.Fatalf("err: %v", err) 436 } 437 if outA != nil { 438 t.Fatalf("bad: %v", outA) 439 } 440 } 441 442 func TestCoreScheduler_NodeGC(t *testing.T) { 443 s1 := testServer(t, nil) 444 defer s1.Shutdown() 445 testutil.WaitForLeader(t, s1.RPC) 446 447 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 448 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 449 450 // Insert "dead" node 451 state := s1.fsm.State() 452 node := mock.Node() 453 node.Status = structs.NodeStatusDown 454 err := state.UpsertNode(1000, node) 455 if err != nil { 456 t.Fatalf("err: %v", err) 457 } 458 459 // Update the time tables to make this work 460 tt := s1.fsm.TimeTable() 461 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold)) 462 463 // Create a core scheduler 464 snap, err := state.Snapshot() 465 if err != nil { 466 t.Fatalf("err: %v", err) 467 } 468 core := NewCoreScheduler(s1, snap) 469 470 // Attempt the GC 471 gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000) 472 err = core.Process(gc) 473 if err != nil { 474 t.Fatalf("err: %v", err) 475 } 476 477 // Should be gone 478 ws := memdb.NewWatchSet() 479 out, err := state.NodeByID(ws, node.ID) 480 if err != nil { 481 t.Fatalf("err: %v", err) 482 } 483 if out != nil { 484 t.Fatalf("bad: %v", out) 485 } 486 } 487 488 func TestCoreScheduler_NodeGC_TerminalAllocs(t *testing.T) { 489 s1 := testServer(t, nil) 490 defer s1.Shutdown() 491 testutil.WaitForLeader(t, s1.RPC) 492 493 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 494 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 495 496 // Insert "dead" node 497 state := s1.fsm.State() 498 node := mock.Node() 499 node.Status = structs.NodeStatusDown 500 err := state.UpsertNode(1000, node) 501 if err != nil { 502 t.Fatalf("err: %v", err) 503 } 504 505 // Insert a terminal alloc on that node 506 alloc := mock.Alloc() 507 alloc.DesiredStatus = structs.AllocDesiredStatusStop 508 state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) 509 if err := state.UpsertAllocs(1002, []*structs.Allocation{alloc}); err != nil { 510 t.Fatalf("err: %v", err) 511 } 512 513 // Update the time tables to make this work 514 tt := s1.fsm.TimeTable() 515 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold)) 516 517 // Create a core scheduler 518 snap, err := state.Snapshot() 519 if err != nil { 520 t.Fatalf("err: %v", err) 521 } 522 core := NewCoreScheduler(s1, snap) 523 524 // Attempt the GC 525 gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000) 526 err = core.Process(gc) 527 if err != nil { 528 t.Fatalf("err: %v", err) 529 } 530 531 // Should be gone 532 ws := memdb.NewWatchSet() 533 out, err := state.NodeByID(ws, node.ID) 534 if err != nil { 535 t.Fatalf("err: %v", err) 536 } 537 if out != nil { 538 t.Fatalf("bad: %v", out) 539 } 540 } 541 542 func TestCoreScheduler_NodeGC_RunningAllocs(t *testing.T) { 543 s1 := testServer(t, nil) 544 defer s1.Shutdown() 545 testutil.WaitForLeader(t, s1.RPC) 546 547 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 548 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 549 550 // Insert "dead" node 551 state := s1.fsm.State() 552 node := mock.Node() 553 node.Status = structs.NodeStatusDown 554 err := state.UpsertNode(1000, node) 555 if err != nil { 556 t.Fatalf("err: %v", err) 557 } 558 559 // Insert a running alloc on that node 560 alloc := mock.Alloc() 561 alloc.NodeID = node.ID 562 alloc.DesiredStatus = structs.AllocDesiredStatusRun 563 alloc.ClientStatus = structs.AllocClientStatusRunning 564 state.UpsertJobSummary(1001, mock.JobSummary(alloc.JobID)) 565 if err := state.UpsertAllocs(1002, []*structs.Allocation{alloc}); err != nil { 566 t.Fatalf("err: %v", err) 567 } 568 569 // Update the time tables to make this work 570 tt := s1.fsm.TimeTable() 571 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.NodeGCThreshold)) 572 573 // Create a core scheduler 574 snap, err := state.Snapshot() 575 if err != nil { 576 t.Fatalf("err: %v", err) 577 } 578 core := NewCoreScheduler(s1, snap) 579 580 // Attempt the GC 581 gc := s1.coreJobEval(structs.CoreJobNodeGC, 2000) 582 err = core.Process(gc) 583 if err != nil { 584 t.Fatalf("err: %v", err) 585 } 586 587 // Should still be here 588 ws := memdb.NewWatchSet() 589 out, err := state.NodeByID(ws, node.ID) 590 if err != nil { 591 t.Fatalf("err: %v", err) 592 } 593 if out == nil { 594 t.Fatalf("bad: %v", out) 595 } 596 } 597 598 func TestCoreScheduler_NodeGC_Force(t *testing.T) { 599 s1 := testServer(t, nil) 600 defer s1.Shutdown() 601 testutil.WaitForLeader(t, s1.RPC) 602 603 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 604 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 605 606 // Insert "dead" node 607 state := s1.fsm.State() 608 node := mock.Node() 609 node.Status = structs.NodeStatusDown 610 err := state.UpsertNode(1000, node) 611 if err != nil { 612 t.Fatalf("err: %v", err) 613 } 614 615 // Create a core scheduler 616 snap, err := state.Snapshot() 617 if err != nil { 618 t.Fatalf("err: %v", err) 619 } 620 core := NewCoreScheduler(s1, snap) 621 622 // Attempt the GC 623 gc := s1.coreJobEval(structs.CoreJobForceGC, 1000) 624 err = core.Process(gc) 625 if err != nil { 626 t.Fatalf("err: %v", err) 627 } 628 629 // Should be gone 630 ws := memdb.NewWatchSet() 631 out, err := state.NodeByID(ws, node.ID) 632 if err != nil { 633 t.Fatalf("err: %v", err) 634 } 635 if out != nil { 636 t.Fatalf("bad: %v", out) 637 } 638 } 639 640 func TestCoreScheduler_JobGC_OutstandingEvals(t *testing.T) { 641 s1 := testServer(t, nil) 642 defer s1.Shutdown() 643 testutil.WaitForLeader(t, s1.RPC) 644 645 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 646 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 647 648 // Insert job. 649 state := s1.fsm.State() 650 job := mock.Job() 651 job.Type = structs.JobTypeBatch 652 job.Status = structs.JobStatusDead 653 err := state.UpsertJob(1000, job) 654 if err != nil { 655 t.Fatalf("err: %v", err) 656 } 657 658 // Insert two evals, one terminal and one not 659 eval := mock.Eval() 660 eval.JobID = job.ID 661 eval.Status = structs.EvalStatusComplete 662 663 eval2 := mock.Eval() 664 eval2.JobID = job.ID 665 eval2.Status = structs.EvalStatusPending 666 err = state.UpsertEvals(1001, []*structs.Evaluation{eval, eval2}) 667 if err != nil { 668 t.Fatalf("err: %v", err) 669 } 670 671 // Update the time tables to make this work 672 tt := s1.fsm.TimeTable() 673 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) 674 675 // Create a core scheduler 676 snap, err := state.Snapshot() 677 if err != nil { 678 t.Fatalf("err: %v", err) 679 } 680 core := NewCoreScheduler(s1, snap) 681 682 // Attempt the GC 683 gc := s1.coreJobEval(structs.CoreJobJobGC, 2000) 684 err = core.Process(gc) 685 if err != nil { 686 t.Fatalf("err: %v", err) 687 } 688 689 // Should still exist 690 ws := memdb.NewWatchSet() 691 out, err := state.JobByID(ws, job.ID) 692 if err != nil { 693 t.Fatalf("err: %v", err) 694 } 695 if out == nil { 696 t.Fatalf("bad: %v", out) 697 } 698 699 outE, err := state.EvalByID(ws, eval.ID) 700 if err != nil { 701 t.Fatalf("err: %v", err) 702 } 703 if outE == nil { 704 t.Fatalf("bad: %v", outE) 705 } 706 707 outE2, err := state.EvalByID(ws, eval2.ID) 708 if err != nil { 709 t.Fatalf("err: %v", err) 710 } 711 if outE2 == nil { 712 t.Fatalf("bad: %v", outE2) 713 } 714 715 // Update the second eval to be terminal 716 eval2.Status = structs.EvalStatusComplete 717 err = state.UpsertEvals(1003, []*structs.Evaluation{eval2}) 718 if err != nil { 719 t.Fatalf("err: %v", err) 720 } 721 722 // Create a core scheduler 723 snap, err = state.Snapshot() 724 if err != nil { 725 t.Fatalf("err: %v", err) 726 } 727 core = NewCoreScheduler(s1, snap) 728 729 // Attempt the GC 730 gc = s1.coreJobEval(structs.CoreJobJobGC, 2000) 731 err = core.Process(gc) 732 if err != nil { 733 t.Fatalf("err: %v", err) 734 } 735 736 // Should not still exist 737 out, err = state.JobByID(ws, job.ID) 738 if err != nil { 739 t.Fatalf("err: %v", err) 740 } 741 if out != nil { 742 t.Fatalf("bad: %v", out) 743 } 744 745 outE, err = state.EvalByID(ws, eval.ID) 746 if err != nil { 747 t.Fatalf("err: %v", err) 748 } 749 if outE != nil { 750 t.Fatalf("bad: %v", outE) 751 } 752 753 outE2, err = state.EvalByID(ws, eval2.ID) 754 if err != nil { 755 t.Fatalf("err: %v", err) 756 } 757 if outE2 != nil { 758 t.Fatalf("bad: %v", outE2) 759 } 760 } 761 762 func TestCoreScheduler_JobGC_OutstandingAllocs(t *testing.T) { 763 s1 := testServer(t, nil) 764 defer s1.Shutdown() 765 testutil.WaitForLeader(t, s1.RPC) 766 767 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 768 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 769 770 // Insert job. 771 state := s1.fsm.State() 772 job := mock.Job() 773 job.Type = structs.JobTypeBatch 774 job.Status = structs.JobStatusDead 775 err := state.UpsertJob(1000, job) 776 if err != nil { 777 t.Fatalf("err: %v", err) 778 } 779 780 // Insert an eval 781 eval := mock.Eval() 782 eval.JobID = job.ID 783 eval.Status = structs.EvalStatusComplete 784 err = state.UpsertEvals(1001, []*structs.Evaluation{eval}) 785 if err != nil { 786 t.Fatalf("err: %v", err) 787 } 788 789 // Insert two allocs, one terminal and one not 790 alloc := mock.Alloc() 791 alloc.JobID = job.ID 792 alloc.EvalID = eval.ID 793 alloc.DesiredStatus = structs.AllocDesiredStatusRun 794 alloc.ClientStatus = structs.AllocClientStatusComplete 795 796 alloc2 := mock.Alloc() 797 alloc2.JobID = job.ID 798 alloc2.EvalID = eval.ID 799 alloc2.DesiredStatus = structs.AllocDesiredStatusRun 800 alloc2.ClientStatus = structs.AllocClientStatusRunning 801 802 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2}) 803 if err != nil { 804 t.Fatalf("err: %v", err) 805 } 806 807 // Update the time tables to make this work 808 tt := s1.fsm.TimeTable() 809 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) 810 811 // Create a core scheduler 812 snap, err := state.Snapshot() 813 if err != nil { 814 t.Fatalf("err: %v", err) 815 } 816 core := NewCoreScheduler(s1, snap) 817 818 // Attempt the GC 819 gc := s1.coreJobEval(structs.CoreJobJobGC, 2000) 820 err = core.Process(gc) 821 if err != nil { 822 t.Fatalf("err: %v", err) 823 } 824 825 // Should still exist 826 ws := memdb.NewWatchSet() 827 out, err := state.JobByID(ws, job.ID) 828 if err != nil { 829 t.Fatalf("err: %v", err) 830 } 831 if out == nil { 832 t.Fatalf("bad: %v", out) 833 } 834 835 outA, err := state.AllocByID(ws, alloc.ID) 836 if err != nil { 837 t.Fatalf("err: %v", err) 838 } 839 if outA == nil { 840 t.Fatalf("bad: %v", outA) 841 } 842 843 outA2, err := state.AllocByID(ws, alloc2.ID) 844 if err != nil { 845 t.Fatalf("err: %v", err) 846 } 847 if outA2 == nil { 848 t.Fatalf("bad: %v", outA2) 849 } 850 851 // Update the second alloc to be terminal 852 alloc2.ClientStatus = structs.AllocClientStatusComplete 853 err = state.UpsertAllocs(1003, []*structs.Allocation{alloc2}) 854 if err != nil { 855 t.Fatalf("err: %v", err) 856 } 857 858 // Create a core scheduler 859 snap, err = state.Snapshot() 860 if err != nil { 861 t.Fatalf("err: %v", err) 862 } 863 core = NewCoreScheduler(s1, snap) 864 865 // Attempt the GC 866 gc = s1.coreJobEval(structs.CoreJobJobGC, 2000) 867 err = core.Process(gc) 868 if err != nil { 869 t.Fatalf("err: %v", err) 870 } 871 872 // Should not still exist 873 out, err = state.JobByID(ws, job.ID) 874 if err != nil { 875 t.Fatalf("err: %v", err) 876 } 877 if out != nil { 878 t.Fatalf("bad: %v", out) 879 } 880 881 outA, err = state.AllocByID(ws, alloc.ID) 882 if err != nil { 883 t.Fatalf("err: %v", err) 884 } 885 if outA != nil { 886 t.Fatalf("bad: %v", outA) 887 } 888 889 outA2, err = state.AllocByID(ws, alloc2.ID) 890 if err != nil { 891 t.Fatalf("err: %v", err) 892 } 893 if outA2 != nil { 894 t.Fatalf("bad: %v", outA2) 895 } 896 } 897 898 // This test ensures that batch jobs are GC'd in one shot, meaning it all 899 // allocs/evals and job or nothing 900 func TestCoreScheduler_JobGC_OneShot(t *testing.T) { 901 s1 := testServer(t, nil) 902 defer s1.Shutdown() 903 testutil.WaitForLeader(t, s1.RPC) 904 905 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 906 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 907 908 // Insert job. 909 state := s1.fsm.State() 910 job := mock.Job() 911 job.Type = structs.JobTypeBatch 912 err := state.UpsertJob(1000, job) 913 if err != nil { 914 t.Fatalf("err: %v", err) 915 } 916 917 // Insert two complete evals 918 eval := mock.Eval() 919 eval.JobID = job.ID 920 eval.Status = structs.EvalStatusComplete 921 922 eval2 := mock.Eval() 923 eval2.JobID = job.ID 924 eval2.Status = structs.EvalStatusComplete 925 926 err = state.UpsertEvals(1001, []*structs.Evaluation{eval, eval2}) 927 if err != nil { 928 t.Fatalf("err: %v", err) 929 } 930 931 // Insert one complete alloc and one running on distinct evals 932 alloc := mock.Alloc() 933 alloc.JobID = job.ID 934 alloc.EvalID = eval.ID 935 alloc.DesiredStatus = structs.AllocDesiredStatusStop 936 937 alloc2 := mock.Alloc() 938 alloc2.JobID = job.ID 939 alloc2.EvalID = eval2.ID 940 alloc2.DesiredStatus = structs.AllocDesiredStatusRun 941 942 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc, alloc2}) 943 if err != nil { 944 t.Fatalf("err: %v", err) 945 } 946 947 // Force the jobs state to dead 948 job.Status = structs.JobStatusDead 949 950 // Update the time tables to make this work 951 tt := s1.fsm.TimeTable() 952 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) 953 954 // Create a core scheduler 955 snap, err := state.Snapshot() 956 if err != nil { 957 t.Fatalf("err: %v", err) 958 } 959 core := NewCoreScheduler(s1, snap) 960 961 // Attempt the GC 962 gc := s1.coreJobEval(structs.CoreJobJobGC, 2000) 963 err = core.Process(gc) 964 if err != nil { 965 t.Fatalf("err: %v", err) 966 } 967 968 // Should still exist 969 ws := memdb.NewWatchSet() 970 out, err := state.JobByID(ws, job.ID) 971 if err != nil { 972 t.Fatalf("err: %v", err) 973 } 974 if out == nil { 975 t.Fatalf("bad: %v", out) 976 } 977 978 outE, err := state.EvalByID(ws, eval.ID) 979 if err != nil { 980 t.Fatalf("err: %v", err) 981 } 982 if outE == nil { 983 t.Fatalf("bad: %v", outE) 984 } 985 986 outE2, err := state.EvalByID(ws, eval2.ID) 987 if err != nil { 988 t.Fatalf("err: %v", err) 989 } 990 if outE2 == nil { 991 t.Fatalf("bad: %v", outE2) 992 } 993 994 outA, err := state.AllocByID(ws, alloc.ID) 995 if err != nil { 996 t.Fatalf("err: %v", err) 997 } 998 if outA == nil { 999 t.Fatalf("bad: %v", outA) 1000 } 1001 outA2, err := state.AllocByID(ws, alloc2.ID) 1002 if err != nil { 1003 t.Fatalf("err: %v", err) 1004 } 1005 if outA2 == nil { 1006 t.Fatalf("bad: %v", outA2) 1007 } 1008 } 1009 1010 // This test ensures that stopped jobs are GCd 1011 func TestCoreScheduler_JobGC_Stopped(t *testing.T) { 1012 s1 := testServer(t, nil) 1013 defer s1.Shutdown() 1014 testutil.WaitForLeader(t, s1.RPC) 1015 1016 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1017 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1018 1019 // Insert job. 1020 state := s1.fsm.State() 1021 job := mock.Job() 1022 //job.Status = structs.JobStatusDead 1023 job.Stop = true 1024 err := state.UpsertJob(1000, job) 1025 if err != nil { 1026 t.Fatalf("err: %v", err) 1027 } 1028 1029 // Insert two complete evals 1030 eval := mock.Eval() 1031 eval.JobID = job.ID 1032 eval.Status = structs.EvalStatusComplete 1033 1034 eval2 := mock.Eval() 1035 eval2.JobID = job.ID 1036 eval2.Status = structs.EvalStatusComplete 1037 1038 err = state.UpsertEvals(1001, []*structs.Evaluation{eval, eval2}) 1039 if err != nil { 1040 t.Fatalf("err: %v", err) 1041 } 1042 1043 // Insert one complete alloc 1044 alloc := mock.Alloc() 1045 alloc.JobID = job.ID 1046 alloc.EvalID = eval.ID 1047 alloc.DesiredStatus = structs.AllocDesiredStatusStop 1048 1049 err = state.UpsertAllocs(1002, []*structs.Allocation{alloc}) 1050 if err != nil { 1051 t.Fatalf("err: %v", err) 1052 } 1053 1054 // Update the time tables to make this work 1055 tt := s1.fsm.TimeTable() 1056 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.JobGCThreshold)) 1057 1058 // Create a core scheduler 1059 snap, err := state.Snapshot() 1060 if err != nil { 1061 t.Fatalf("err: %v", err) 1062 } 1063 core := NewCoreScheduler(s1, snap) 1064 1065 // Attempt the GC 1066 gc := s1.coreJobEval(structs.CoreJobJobGC, 2000) 1067 err = core.Process(gc) 1068 if err != nil { 1069 t.Fatalf("err: %v", err) 1070 } 1071 1072 // Shouldn't still exist 1073 ws := memdb.NewWatchSet() 1074 out, err := state.JobByID(ws, job.ID) 1075 if err != nil { 1076 t.Fatalf("err: %v", err) 1077 } 1078 if out != nil { 1079 t.Fatalf("bad: %v", out) 1080 } 1081 1082 outE, err := state.EvalByID(ws, eval.ID) 1083 if err != nil { 1084 t.Fatalf("err: %v", err) 1085 } 1086 if outE != nil { 1087 t.Fatalf("bad: %v", outE) 1088 } 1089 1090 outE2, err := state.EvalByID(ws, eval2.ID) 1091 if err != nil { 1092 t.Fatalf("err: %v", err) 1093 } 1094 if outE2 != nil { 1095 t.Fatalf("bad: %v", outE2) 1096 } 1097 1098 outA, err := state.AllocByID(ws, alloc.ID) 1099 if err != nil { 1100 t.Fatalf("err: %v", err) 1101 } 1102 if outA != nil { 1103 t.Fatalf("bad: %v", outA) 1104 } 1105 } 1106 1107 func TestCoreScheduler_JobGC_Force(t *testing.T) { 1108 s1 := testServer(t, nil) 1109 defer s1.Shutdown() 1110 testutil.WaitForLeader(t, s1.RPC) 1111 1112 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1113 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1114 1115 // Insert job. 1116 state := s1.fsm.State() 1117 job := mock.Job() 1118 job.Type = structs.JobTypeBatch 1119 job.Status = structs.JobStatusDead 1120 err := state.UpsertJob(1000, job) 1121 if err != nil { 1122 t.Fatalf("err: %v", err) 1123 } 1124 1125 // Insert a terminal eval 1126 eval := mock.Eval() 1127 eval.JobID = job.ID 1128 eval.Status = structs.EvalStatusComplete 1129 err = state.UpsertEvals(1001, []*structs.Evaluation{eval}) 1130 if err != nil { 1131 t.Fatalf("err: %v", err) 1132 } 1133 1134 // Create a core scheduler 1135 snap, err := state.Snapshot() 1136 if err != nil { 1137 t.Fatalf("err: %v", err) 1138 } 1139 core := NewCoreScheduler(s1, snap) 1140 1141 // Attempt the GC 1142 gc := s1.coreJobEval(structs.CoreJobForceGC, 1002) 1143 err = core.Process(gc) 1144 if err != nil { 1145 t.Fatalf("err: %v", err) 1146 } 1147 1148 // Shouldn't still exist 1149 ws := memdb.NewWatchSet() 1150 out, err := state.JobByID(ws, job.ID) 1151 if err != nil { 1152 t.Fatalf("err: %v", err) 1153 } 1154 if out != nil { 1155 t.Fatalf("bad: %v", out) 1156 } 1157 1158 outE, err := state.EvalByID(ws, eval.ID) 1159 if err != nil { 1160 t.Fatalf("err: %v", err) 1161 } 1162 if outE != nil { 1163 t.Fatalf("bad: %v", outE) 1164 } 1165 } 1166 1167 // This test ensures parameterized jobs only get gc'd when stopped 1168 func TestCoreScheduler_JobGC_Parameterized(t *testing.T) { 1169 s1 := testServer(t, nil) 1170 defer s1.Shutdown() 1171 testutil.WaitForLeader(t, s1.RPC) 1172 1173 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1174 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1175 1176 // Insert a parameterized job. 1177 state := s1.fsm.State() 1178 job := mock.Job() 1179 job.Type = structs.JobTypeBatch 1180 job.Status = structs.JobStatusRunning 1181 job.ParameterizedJob = &structs.ParameterizedJobConfig{ 1182 Payload: structs.DispatchPayloadRequired, 1183 } 1184 err := state.UpsertJob(1000, job) 1185 if err != nil { 1186 t.Fatalf("err: %v", err) 1187 } 1188 1189 // Create a core scheduler 1190 snap, err := state.Snapshot() 1191 if err != nil { 1192 t.Fatalf("err: %v", err) 1193 } 1194 core := NewCoreScheduler(s1, snap) 1195 1196 // Attempt the GC 1197 gc := s1.coreJobEval(structs.CoreJobForceGC, 1002) 1198 err = core.Process(gc) 1199 if err != nil { 1200 t.Fatalf("err: %v", err) 1201 } 1202 1203 // Should still exist 1204 ws := memdb.NewWatchSet() 1205 out, err := state.JobByID(ws, job.ID) 1206 if err != nil { 1207 t.Fatalf("err: %v", err) 1208 } 1209 if out == nil { 1210 t.Fatalf("bad: %v", out) 1211 } 1212 1213 // Mark the job as stopped and try again 1214 job2 := job.Copy() 1215 job2.Stop = true 1216 err = state.UpsertJob(2000, job2) 1217 if err != nil { 1218 t.Fatalf("err: %v", err) 1219 } 1220 1221 // Create a core scheduler 1222 snap, err = state.Snapshot() 1223 if err != nil { 1224 t.Fatalf("err: %v", err) 1225 } 1226 core = NewCoreScheduler(s1, snap) 1227 1228 // Attempt the GC 1229 gc = s1.coreJobEval(structs.CoreJobForceGC, 2002) 1230 err = core.Process(gc) 1231 if err != nil { 1232 t.Fatalf("err: %v", err) 1233 } 1234 1235 // Should not exist 1236 out, err = state.JobByID(ws, job.ID) 1237 if err != nil { 1238 t.Fatalf("err: %v", err) 1239 } 1240 if out != nil { 1241 t.Fatalf("bad: %+v", out) 1242 } 1243 } 1244 1245 // This test ensures periodic jobs don't get GCd til they are stopped 1246 func TestCoreScheduler_JobGC_Periodic(t *testing.T) { 1247 1248 s1 := testServer(t, nil) 1249 defer s1.Shutdown() 1250 testutil.WaitForLeader(t, s1.RPC) 1251 1252 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1253 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1254 1255 // Insert a parameterized job. 1256 state := s1.fsm.State() 1257 job := mock.PeriodicJob() 1258 err := state.UpsertJob(1000, job) 1259 if err != nil { 1260 t.Fatalf("err: %v", err) 1261 } 1262 1263 // Create a core scheduler 1264 snap, err := state.Snapshot() 1265 if err != nil { 1266 t.Fatalf("err: %v", err) 1267 } 1268 core := NewCoreScheduler(s1, snap) 1269 1270 // Attempt the GC 1271 gc := s1.coreJobEval(structs.CoreJobForceGC, 1002) 1272 err = core.Process(gc) 1273 if err != nil { 1274 t.Fatalf("err: %v", err) 1275 } 1276 1277 // Should still exist 1278 ws := memdb.NewWatchSet() 1279 out, err := state.JobByID(ws, job.ID) 1280 if err != nil { 1281 t.Fatalf("err: %v", err) 1282 } 1283 if out == nil { 1284 t.Fatalf("bad: %v", out) 1285 } 1286 1287 // Mark the job as stopped and try again 1288 job2 := job.Copy() 1289 job2.Stop = true 1290 err = state.UpsertJob(2000, job2) 1291 if err != nil { 1292 t.Fatalf("err: %v", err) 1293 } 1294 1295 // Create a core scheduler 1296 snap, err = state.Snapshot() 1297 if err != nil { 1298 t.Fatalf("err: %v", err) 1299 } 1300 core = NewCoreScheduler(s1, snap) 1301 1302 // Attempt the GC 1303 gc = s1.coreJobEval(structs.CoreJobForceGC, 2002) 1304 err = core.Process(gc) 1305 if err != nil { 1306 t.Fatalf("err: %v", err) 1307 } 1308 1309 // Should not exist 1310 out, err = state.JobByID(ws, job.ID) 1311 if err != nil { 1312 t.Fatalf("err: %v", err) 1313 } 1314 if out != nil { 1315 t.Fatalf("bad: %+v", out) 1316 } 1317 } 1318 1319 func TestCoreScheduler_DeploymentGC(t *testing.T) { 1320 s1 := testServer(t, nil) 1321 defer s1.Shutdown() 1322 testutil.WaitForLeader(t, s1.RPC) 1323 assert := assert.New(t) 1324 1325 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1326 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1327 1328 // Insert an active, terminal, and terminal with allocations edeployment 1329 state := s1.fsm.State() 1330 d1, d2, d3 := mock.Deployment(), mock.Deployment(), mock.Deployment() 1331 d1.Status = structs.DeploymentStatusFailed 1332 d3.Status = structs.DeploymentStatusSuccessful 1333 assert.Nil(state.UpsertDeployment(1000, d1), "UpsertDeployment") 1334 assert.Nil(state.UpsertDeployment(1001, d2), "UpsertDeployment") 1335 assert.Nil(state.UpsertDeployment(1002, d3), "UpsertDeployment") 1336 1337 a := mock.Alloc() 1338 a.JobID = d3.JobID 1339 a.DeploymentID = d3.ID 1340 assert.Nil(state.UpsertAllocs(1003, []*structs.Allocation{a}), "UpsertAllocs") 1341 1342 // Update the time tables to make this work 1343 tt := s1.fsm.TimeTable() 1344 tt.Witness(2000, time.Now().UTC().Add(-1*s1.config.DeploymentGCThreshold)) 1345 1346 // Create a core scheduler 1347 snap, err := state.Snapshot() 1348 assert.Nil(err, "Snapshot") 1349 core := NewCoreScheduler(s1, snap) 1350 1351 // Attempt the GC 1352 gc := s1.coreJobEval(structs.CoreJobDeploymentGC, 2000) 1353 assert.Nil(core.Process(gc), "Process GC") 1354 1355 // Should be gone 1356 ws := memdb.NewWatchSet() 1357 out, err := state.DeploymentByID(ws, d1.ID) 1358 assert.Nil(err, "DeploymentByID") 1359 assert.Nil(out, "Terminal Deployment") 1360 out2, err := state.DeploymentByID(ws, d2.ID) 1361 assert.Nil(err, "DeploymentByID") 1362 assert.NotNil(out2, "Active Deployment") 1363 out3, err := state.DeploymentByID(ws, d3.ID) 1364 assert.Nil(err, "DeploymentByID") 1365 assert.NotNil(out3, "Terminal Deployment With Allocs") 1366 } 1367 1368 func TestCoreScheduler_DeploymentGC_Force(t *testing.T) { 1369 s1 := testServer(t, nil) 1370 defer s1.Shutdown() 1371 testutil.WaitForLeader(t, s1.RPC) 1372 assert := assert.New(t) 1373 1374 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1375 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1376 1377 // Insert terminal and active deployment 1378 state := s1.fsm.State() 1379 d1, d2 := mock.Deployment(), mock.Deployment() 1380 d1.Status = structs.DeploymentStatusFailed 1381 assert.Nil(state.UpsertDeployment(1000, d1), "UpsertDeployment") 1382 assert.Nil(state.UpsertDeployment(1001, d2), "UpsertDeployment") 1383 1384 // Create a core scheduler 1385 snap, err := state.Snapshot() 1386 assert.Nil(err, "Snapshot") 1387 core := NewCoreScheduler(s1, snap) 1388 1389 // Attempt the GC 1390 gc := s1.coreJobEval(structs.CoreJobForceGC, 1000) 1391 assert.Nil(core.Process(gc), "Process Force GC") 1392 1393 // Should be gone 1394 ws := memdb.NewWatchSet() 1395 out, err := state.DeploymentByID(ws, d1.ID) 1396 assert.Nil(err, "DeploymentByID") 1397 assert.Nil(out, "Terminal Deployment") 1398 out2, err := state.DeploymentByID(ws, d2.ID) 1399 assert.Nil(err, "DeploymentByID") 1400 assert.NotNil(out2, "Active Deployment") 1401 } 1402 1403 func TestCoreScheduler_PartitionEvalReap(t *testing.T) { 1404 s1 := testServer(t, nil) 1405 defer s1.Shutdown() 1406 testutil.WaitForLeader(t, s1.RPC) 1407 1408 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1409 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1410 1411 // Create a core scheduler 1412 snap, err := s1.fsm.State().Snapshot() 1413 if err != nil { 1414 t.Fatalf("err: %v", err) 1415 } 1416 core := NewCoreScheduler(s1, snap) 1417 1418 // Set the max ids per reap to something lower. 1419 maxIdsPerReap = 2 1420 1421 evals := []string{"a", "b", "c"} 1422 allocs := []string{"1", "2", "3"} 1423 requests := core.(*CoreScheduler).partitionEvalReap(evals, allocs) 1424 if len(requests) != 3 { 1425 t.Fatalf("Expected 3 requests got: %v", requests) 1426 } 1427 1428 first := requests[0] 1429 if len(first.Allocs) != 2 && len(first.Evals) != 0 { 1430 t.Fatalf("Unexpected first request: %v", first) 1431 } 1432 1433 second := requests[1] 1434 if len(second.Allocs) != 1 && len(second.Evals) != 1 { 1435 t.Fatalf("Unexpected second request: %v", second) 1436 } 1437 1438 third := requests[2] 1439 if len(third.Allocs) != 0 && len(third.Evals) != 2 { 1440 t.Fatalf("Unexpected third request: %v", third) 1441 } 1442 } 1443 1444 func TestCoreScheduler_PartitionDeploymentReap(t *testing.T) { 1445 s1 := testServer(t, nil) 1446 defer s1.Shutdown() 1447 testutil.WaitForLeader(t, s1.RPC) 1448 1449 // COMPAT Remove in 0.6: Reset the FSM time table since we reconcile which sets index 0 1450 s1.fsm.timetable.table = make([]TimeTableEntry, 1, 10) 1451 1452 // Create a core scheduler 1453 snap, err := s1.fsm.State().Snapshot() 1454 if err != nil { 1455 t.Fatalf("err: %v", err) 1456 } 1457 core := NewCoreScheduler(s1, snap) 1458 1459 // Set the max ids per reap to something lower. 1460 maxIdsPerReap = 2 1461 1462 deployments := []string{"a", "b", "c"} 1463 requests := core.(*CoreScheduler).partitionDeploymentReap(deployments) 1464 if len(requests) != 2 { 1465 t.Fatalf("Expected 2 requests got: %v", requests) 1466 } 1467 1468 first := requests[0] 1469 if len(first.Deployments) != 2 { 1470 t.Fatalf("Unexpected first request: %v", first) 1471 } 1472 1473 second := requests[1] 1474 if len(second.Deployments) != 1 { 1475 t.Fatalf("Unexpected second request: %v", second) 1476 } 1477 }