github.com/ryanslade/nomad@v0.2.4-0.20160128061903-fc95782f2089/scheduler/generic_sched_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "testing" 6 "time" 7 8 "github.com/hashicorp/nomad/nomad/mock" 9 "github.com/hashicorp/nomad/nomad/structs" 10 ) 11 12 func TestServiceSched_JobRegister(t *testing.T) { 13 h := NewHarness(t) 14 15 // Create some nodes 16 for i := 0; i < 10; i++ { 17 node := mock.Node() 18 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 19 } 20 21 // Create a job 22 job := mock.Job() 23 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 24 25 // Create a mock evaluation to register the job 26 eval := &structs.Evaluation{ 27 ID: structs.GenerateUUID(), 28 Priority: job.Priority, 29 TriggeredBy: structs.EvalTriggerJobRegister, 30 JobID: job.ID, 31 } 32 33 // Process the evaluation 34 err := h.Process(NewServiceScheduler, eval) 35 if err != nil { 36 t.Fatalf("err: %v", err) 37 } 38 39 // Ensure a single plan 40 if len(h.Plans) != 1 { 41 t.Fatalf("bad: %#v", h.Plans) 42 } 43 plan := h.Plans[0] 44 45 // Ensure the plan allocated 46 var planned []*structs.Allocation 47 for _, allocList := range plan.NodeAllocation { 48 planned = append(planned, allocList...) 49 } 50 if len(planned) != 10 { 51 t.Fatalf("bad: %#v", plan) 52 } 53 54 // Lookup the allocations by JobID 55 out, err := h.State.AllocsByJob(job.ID) 56 noErr(t, err) 57 58 // Ensure all allocations placed 59 if len(out) != 10 { 60 t.Fatalf("bad: %#v", out) 61 } 62 63 // Ensure different ports were used. 64 used := make(map[int]struct{}) 65 for _, alloc := range out { 66 for _, resource := range alloc.TaskResources { 67 for _, port := range resource.Networks[0].DynamicPorts { 68 if _, ok := used[port.Value]; ok { 69 t.Fatalf("Port collision %v", port.Value) 70 } 71 used[port.Value] = struct{}{} 72 } 73 } 74 } 75 76 h.AssertEvalStatus(t, structs.EvalStatusComplete) 77 } 78 79 func TestServiceSched_JobRegister_AllocFail(t *testing.T) { 80 h := NewHarness(t) 81 82 // Create NO nodes 83 // Create a job 84 job := mock.Job() 85 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 86 87 // Create a mock evaluation to register the job 88 eval := &structs.Evaluation{ 89 ID: structs.GenerateUUID(), 90 Priority: job.Priority, 91 TriggeredBy: structs.EvalTriggerJobRegister, 92 JobID: job.ID, 93 } 94 95 // Process the evaluation 96 err := h.Process(NewServiceScheduler, eval) 97 if err != nil { 98 t.Fatalf("err: %v", err) 99 } 100 101 // Ensure a single plan 102 if len(h.Plans) != 1 { 103 t.Fatalf("bad: %#v", h.Plans) 104 } 105 plan := h.Plans[0] 106 107 // Ensure the plan failed to alloc 108 if len(plan.FailedAllocs) != 1 { 109 t.Fatalf("bad: %#v", plan) 110 } 111 112 // Lookup the allocations by JobID 113 out, err := h.State.AllocsByJob(job.ID) 114 noErr(t, err) 115 116 // Ensure all allocations placed 117 if len(out) != 1 { 118 t.Fatalf("bad: %#v", out) 119 } 120 121 // Check the coalesced failures 122 if out[0].Metrics.CoalescedFailures != 9 { 123 t.Fatalf("bad: %#v", out[0].Metrics) 124 } 125 126 // Check the available nodes 127 if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 0 { 128 t.Fatalf("bad: %#v", out[0].Metrics) 129 } 130 131 h.AssertEvalStatus(t, structs.EvalStatusComplete) 132 } 133 134 func TestServiceSched_JobModify(t *testing.T) { 135 h := NewHarness(t) 136 137 // Create some nodes 138 var nodes []*structs.Node 139 for i := 0; i < 10; i++ { 140 node := mock.Node() 141 nodes = append(nodes, node) 142 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 143 } 144 145 // Generate a fake job with allocations 146 job := mock.Job() 147 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 148 149 var allocs []*structs.Allocation 150 for i := 0; i < 10; i++ { 151 alloc := mock.Alloc() 152 alloc.Job = job 153 alloc.JobID = job.ID 154 alloc.NodeID = nodes[i].ID 155 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 156 allocs = append(allocs, alloc) 157 } 158 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 159 160 // Add a few terminal status allocations, these should be ignored 161 var terminal []*structs.Allocation 162 for i := 0; i < 5; i++ { 163 alloc := mock.Alloc() 164 alloc.Job = job 165 alloc.JobID = job.ID 166 alloc.NodeID = nodes[i].ID 167 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 168 alloc.DesiredStatus = structs.AllocDesiredStatusFailed 169 terminal = append(terminal, alloc) 170 } 171 noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal)) 172 173 // Update the job 174 job2 := mock.Job() 175 job2.ID = job.ID 176 177 // Update the task, such that it cannot be done in-place 178 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 179 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 180 181 // Create a mock evaluation to deal with drain 182 eval := &structs.Evaluation{ 183 ID: structs.GenerateUUID(), 184 Priority: 50, 185 TriggeredBy: structs.EvalTriggerJobRegister, 186 JobID: job.ID, 187 } 188 189 // Process the evaluation 190 err := h.Process(NewServiceScheduler, eval) 191 if err != nil { 192 t.Fatalf("err: %v", err) 193 } 194 195 // Ensure a single plan 196 if len(h.Plans) != 1 { 197 t.Fatalf("bad: %#v", h.Plans) 198 } 199 plan := h.Plans[0] 200 201 // Ensure the plan evicted all allocs 202 var update []*structs.Allocation 203 for _, updateList := range plan.NodeUpdate { 204 update = append(update, updateList...) 205 } 206 if len(update) != len(allocs) { 207 t.Fatalf("bad: %#v", plan) 208 } 209 210 // Ensure the plan allocated 211 var planned []*structs.Allocation 212 for _, allocList := range plan.NodeAllocation { 213 planned = append(planned, allocList...) 214 } 215 if len(planned) != 10 { 216 t.Fatalf("bad: %#v", plan) 217 } 218 219 // Lookup the allocations by JobID 220 out, err := h.State.AllocsByJob(job.ID) 221 noErr(t, err) 222 223 // Ensure all allocations placed 224 out = structs.FilterTerminalAllocs(out) 225 if len(out) != 10 { 226 t.Fatalf("bad: %#v", out) 227 } 228 229 h.AssertEvalStatus(t, structs.EvalStatusComplete) 230 } 231 232 func TestServiceSched_JobModify_Rolling(t *testing.T) { 233 h := NewHarness(t) 234 235 // Create some nodes 236 var nodes []*structs.Node 237 for i := 0; i < 10; i++ { 238 node := mock.Node() 239 nodes = append(nodes, node) 240 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 241 } 242 243 // Generate a fake job with allocations 244 job := mock.Job() 245 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 246 247 var allocs []*structs.Allocation 248 for i := 0; i < 10; i++ { 249 alloc := mock.Alloc() 250 alloc.Job = job 251 alloc.JobID = job.ID 252 alloc.NodeID = nodes[i].ID 253 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 254 allocs = append(allocs, alloc) 255 } 256 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 257 258 // Update the job 259 job2 := mock.Job() 260 job2.ID = job.ID 261 job2.Update = structs.UpdateStrategy{ 262 Stagger: 30 * time.Second, 263 MaxParallel: 5, 264 } 265 266 // Update the task, such that it cannot be done in-place 267 job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 268 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 269 270 // Create a mock evaluation to deal with drain 271 eval := &structs.Evaluation{ 272 ID: structs.GenerateUUID(), 273 Priority: 50, 274 TriggeredBy: structs.EvalTriggerJobRegister, 275 JobID: job.ID, 276 } 277 278 // Process the evaluation 279 err := h.Process(NewServiceScheduler, eval) 280 if err != nil { 281 t.Fatalf("err: %v", err) 282 } 283 284 // Ensure a single plan 285 if len(h.Plans) != 1 { 286 t.Fatalf("bad: %#v", h.Plans) 287 } 288 plan := h.Plans[0] 289 290 // Ensure the plan evicted only MaxParallel 291 var update []*structs.Allocation 292 for _, updateList := range plan.NodeUpdate { 293 update = append(update, updateList...) 294 } 295 if len(update) != job2.Update.MaxParallel { 296 t.Fatalf("bad: %#v", plan) 297 } 298 299 // Ensure the plan allocated 300 var planned []*structs.Allocation 301 for _, allocList := range plan.NodeAllocation { 302 planned = append(planned, allocList...) 303 } 304 if len(planned) != job2.Update.MaxParallel { 305 t.Fatalf("bad: %#v", plan) 306 } 307 308 h.AssertEvalStatus(t, structs.EvalStatusComplete) 309 310 // Ensure a follow up eval was created 311 eval = h.Evals[0] 312 if eval.NextEval == "" { 313 t.Fatalf("missing next eval") 314 } 315 316 // Check for create 317 if len(h.CreateEvals) == 0 { 318 t.Fatalf("missing created eval") 319 } 320 create := h.CreateEvals[0] 321 if eval.NextEval != create.ID { 322 t.Fatalf("ID mismatch") 323 } 324 if create.PreviousEval != eval.ID { 325 t.Fatalf("missing previous eval") 326 } 327 328 if create.TriggeredBy != structs.EvalTriggerRollingUpdate { 329 t.Fatalf("bad: %#v", create) 330 } 331 } 332 333 func TestServiceSched_JobModify_InPlace(t *testing.T) { 334 h := NewHarness(t) 335 336 // Create some nodes 337 var nodes []*structs.Node 338 for i := 0; i < 10; i++ { 339 node := mock.Node() 340 nodes = append(nodes, node) 341 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 342 } 343 344 // Generate a fake job with allocations 345 job := mock.Job() 346 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 347 348 var allocs []*structs.Allocation 349 for i := 0; i < 10; i++ { 350 alloc := mock.Alloc() 351 alloc.Job = job 352 alloc.JobID = job.ID 353 alloc.NodeID = nodes[i].ID 354 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 355 allocs = append(allocs, alloc) 356 } 357 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 358 359 // Update the job 360 job2 := mock.Job() 361 job2.ID = job.ID 362 noErr(t, h.State.UpsertJob(h.NextIndex(), job2)) 363 364 // Create a mock evaluation to deal with drain 365 eval := &structs.Evaluation{ 366 ID: structs.GenerateUUID(), 367 Priority: 50, 368 TriggeredBy: structs.EvalTriggerJobRegister, 369 JobID: job.ID, 370 } 371 372 // Process the evaluation 373 err := h.Process(NewServiceScheduler, eval) 374 if err != nil { 375 t.Fatalf("err: %v", err) 376 } 377 378 // Ensure a single plan 379 if len(h.Plans) != 1 { 380 t.Fatalf("bad: %#v", h.Plans) 381 } 382 plan := h.Plans[0] 383 384 // Ensure the plan did not evict any allocs 385 var update []*structs.Allocation 386 for _, updateList := range plan.NodeUpdate { 387 update = append(update, updateList...) 388 } 389 if len(update) != 0 { 390 t.Fatalf("bad: %#v", plan) 391 } 392 393 // Ensure the plan updated the existing allocs 394 var planned []*structs.Allocation 395 for _, allocList := range plan.NodeAllocation { 396 planned = append(planned, allocList...) 397 } 398 if len(planned) != 10 { 399 t.Fatalf("bad: %#v", plan) 400 } 401 for _, p := range planned { 402 if p.Job != job2 { 403 t.Fatalf("should update job") 404 } 405 } 406 407 // Lookup the allocations by JobID 408 out, err := h.State.AllocsByJob(job.ID) 409 noErr(t, err) 410 411 // Ensure all allocations placed 412 if len(out) != 10 { 413 for _, alloc := range out { 414 t.Logf("%#v", alloc) 415 } 416 t.Fatalf("bad: %#v", out) 417 } 418 h.AssertEvalStatus(t, structs.EvalStatusComplete) 419 420 // Verify the network did not change 421 rp := structs.Port{"main", 5000} 422 for _, alloc := range out { 423 for _, resources := range alloc.TaskResources { 424 if resources.Networks[0].ReservedPorts[0] != rp { 425 t.Fatalf("bad: %#v", alloc) 426 } 427 } 428 } 429 } 430 431 func TestServiceSched_JobDeregister(t *testing.T) { 432 h := NewHarness(t) 433 434 // Generate a fake job with allocations 435 job := mock.Job() 436 437 var allocs []*structs.Allocation 438 for i := 0; i < 10; i++ { 439 alloc := mock.Alloc() 440 alloc.Job = job 441 alloc.JobID = job.ID 442 allocs = append(allocs, alloc) 443 } 444 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 445 446 // Create a mock evaluation to deregister the job 447 eval := &structs.Evaluation{ 448 ID: structs.GenerateUUID(), 449 Priority: 50, 450 TriggeredBy: structs.EvalTriggerJobDeregister, 451 JobID: job.ID, 452 } 453 454 // Process the evaluation 455 err := h.Process(NewServiceScheduler, eval) 456 if err != nil { 457 t.Fatalf("err: %v", err) 458 } 459 460 // Ensure a single plan 461 if len(h.Plans) != 1 { 462 t.Fatalf("bad: %#v", h.Plans) 463 } 464 plan := h.Plans[0] 465 466 // Ensure the plan evicted all nodes 467 if len(plan.NodeUpdate["12345678-abcd-efab-cdef-123456789abc"]) != len(allocs) { 468 t.Fatalf("bad: %#v", plan) 469 } 470 471 // Lookup the allocations by JobID 472 out, err := h.State.AllocsByJob(job.ID) 473 noErr(t, err) 474 475 // Ensure no remaining allocations 476 out = structs.FilterTerminalAllocs(out) 477 if len(out) != 0 { 478 t.Fatalf("bad: %#v", out) 479 } 480 481 h.AssertEvalStatus(t, structs.EvalStatusComplete) 482 } 483 484 func TestServiceSched_NodeDrain(t *testing.T) { 485 h := NewHarness(t) 486 487 // Register a draining node 488 node := mock.Node() 489 node.Drain = true 490 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 491 492 // Create some nodes 493 for i := 0; i < 10; i++ { 494 node := mock.Node() 495 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 496 } 497 498 // Generate a fake job with allocations 499 job := mock.Job() 500 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 501 502 var allocs []*structs.Allocation 503 for i := 0; i < 10; i++ { 504 alloc := mock.Alloc() 505 alloc.Job = job 506 alloc.JobID = job.ID 507 alloc.NodeID = node.ID 508 alloc.Name = fmt.Sprintf("my-job.web[%d]", i) 509 allocs = append(allocs, alloc) 510 } 511 noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs)) 512 513 // Create a mock evaluation to deal with drain 514 eval := &structs.Evaluation{ 515 ID: structs.GenerateUUID(), 516 Priority: 50, 517 TriggeredBy: structs.EvalTriggerNodeUpdate, 518 JobID: job.ID, 519 NodeID: node.ID, 520 } 521 522 // Process the evaluation 523 err := h.Process(NewServiceScheduler, eval) 524 if err != nil { 525 t.Fatalf("err: %v", err) 526 } 527 528 // Ensure a single plan 529 if len(h.Plans) != 1 { 530 t.Fatalf("bad: %#v", h.Plans) 531 } 532 plan := h.Plans[0] 533 534 // Ensure the plan evicted all allocs 535 if len(plan.NodeUpdate[node.ID]) != len(allocs) { 536 t.Fatalf("bad: %#v", plan) 537 } 538 539 // Ensure the plan allocated 540 var planned []*structs.Allocation 541 for _, allocList := range plan.NodeAllocation { 542 planned = append(planned, allocList...) 543 } 544 if len(planned) != 10 { 545 t.Fatalf("bad: %#v", plan) 546 } 547 548 // Lookup the allocations by JobID 549 out, err := h.State.AllocsByJob(job.ID) 550 noErr(t, err) 551 552 // Ensure all allocations placed 553 out = structs.FilterTerminalAllocs(out) 554 if len(out) != 10 { 555 t.Fatalf("bad: %#v", out) 556 } 557 558 h.AssertEvalStatus(t, structs.EvalStatusComplete) 559 } 560 561 func TestServiceSched_RetryLimit(t *testing.T) { 562 h := NewHarness(t) 563 h.Planner = &RejectPlan{h} 564 565 // Create some nodes 566 for i := 0; i < 10; i++ { 567 node := mock.Node() 568 noErr(t, h.State.UpsertNode(h.NextIndex(), node)) 569 } 570 571 // Create a job 572 job := mock.Job() 573 noErr(t, h.State.UpsertJob(h.NextIndex(), job)) 574 575 // Create a mock evaluation to register the job 576 eval := &structs.Evaluation{ 577 ID: structs.GenerateUUID(), 578 Priority: job.Priority, 579 TriggeredBy: structs.EvalTriggerJobRegister, 580 JobID: job.ID, 581 } 582 583 // Process the evaluation 584 err := h.Process(NewServiceScheduler, eval) 585 if err != nil { 586 t.Fatalf("err: %v", err) 587 } 588 589 // Ensure multiple plans 590 if len(h.Plans) == 0 { 591 t.Fatalf("bad: %#v", h.Plans) 592 } 593 594 // Lookup the allocations by JobID 595 out, err := h.State.AllocsByJob(job.ID) 596 noErr(t, err) 597 598 // Ensure no allocations placed 599 if len(out) != 0 { 600 t.Fatalf("bad: %#v", out) 601 } 602 603 // Should hit the retry limit 604 h.AssertEvalStatus(t, structs.EvalStatusFailed) 605 }