github.com/ranjib/nomad@v0.1.1-0.20160225204057-97751b02f70b/scheduler/util_test.go (about) 1 package scheduler 2 3 import ( 4 "fmt" 5 "log" 6 "os" 7 "reflect" 8 "testing" 9 10 "github.com/hashicorp/nomad/nomad/mock" 11 "github.com/hashicorp/nomad/nomad/state" 12 "github.com/hashicorp/nomad/nomad/structs" 13 ) 14 15 func TestMaterializeTaskGroups(t *testing.T) { 16 job := mock.Job() 17 index := materializeTaskGroups(job) 18 if len(index) != 10 { 19 t.Fatalf("Bad: %#v", index) 20 } 21 22 for i := 0; i < 10; i++ { 23 name := fmt.Sprintf("my-job.web[%d]", i) 24 tg, ok := index[name] 25 if !ok { 26 t.Fatalf("bad") 27 } 28 if tg != job.TaskGroups[0] { 29 t.Fatalf("bad") 30 } 31 } 32 } 33 34 func TestDiffAllocs(t *testing.T) { 35 job := mock.Job() 36 required := materializeTaskGroups(job) 37 38 // The "old" job has a previous modify index 39 oldJob := new(structs.Job) 40 *oldJob = *job 41 oldJob.JobModifyIndex -= 1 42 43 tainted := map[string]bool{ 44 "dead": true, 45 "zip": false, 46 } 47 48 allocs := []*structs.Allocation{ 49 // Update the 1st 50 &structs.Allocation{ 51 ID: structs.GenerateUUID(), 52 NodeID: "zip", 53 Name: "my-job.web[0]", 54 Job: oldJob, 55 }, 56 57 // Ignore the 2rd 58 &structs.Allocation{ 59 ID: structs.GenerateUUID(), 60 NodeID: "zip", 61 Name: "my-job.web[1]", 62 Job: job, 63 }, 64 65 // Evict 11th 66 &structs.Allocation{ 67 ID: structs.GenerateUUID(), 68 NodeID: "zip", 69 Name: "my-job.web[10]", 70 }, 71 72 // Migrate the 3rd 73 &structs.Allocation{ 74 ID: structs.GenerateUUID(), 75 NodeID: "dead", 76 Name: "my-job.web[2]", 77 }, 78 } 79 80 diff := diffAllocs(job, tainted, required, allocs) 81 place := diff.place 82 update := diff.update 83 migrate := diff.migrate 84 stop := diff.stop 85 ignore := diff.ignore 86 87 // We should update the first alloc 88 if len(update) != 1 || update[0].Alloc != allocs[0] { 89 t.Fatalf("bad: %#v", update) 90 } 91 92 // We should ignore the second alloc 93 if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { 94 t.Fatalf("bad: %#v", ignore) 95 } 96 97 // We should stop the 3rd alloc 98 if len(stop) != 1 || stop[0].Alloc != allocs[2] { 99 t.Fatalf("bad: %#v", stop) 100 } 101 102 // We should migrate the 4rd alloc 103 if len(migrate) != 1 || migrate[0].Alloc != allocs[3] { 104 t.Fatalf("bad: %#v", migrate) 105 } 106 107 // We should place 7 108 if len(place) != 7 { 109 t.Fatalf("bad: %#v", place) 110 } 111 } 112 113 func TestDiffSystemAllocs(t *testing.T) { 114 job := mock.SystemJob() 115 116 // Create three alive nodes. 117 nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}} 118 119 // The "old" job has a previous modify index 120 oldJob := new(structs.Job) 121 *oldJob = *job 122 oldJob.JobModifyIndex -= 1 123 124 tainted := map[string]bool{ 125 "dead": true, 126 "baz": false, 127 } 128 129 allocs := []*structs.Allocation{ 130 // Update allocation on baz 131 &structs.Allocation{ 132 ID: structs.GenerateUUID(), 133 NodeID: "baz", 134 Name: "my-job.web[0]", 135 Job: oldJob, 136 }, 137 138 // Ignore allocation on bar 139 &structs.Allocation{ 140 ID: structs.GenerateUUID(), 141 NodeID: "bar", 142 Name: "my-job.web[0]", 143 Job: job, 144 }, 145 146 // Stop allocation on dead. 147 &structs.Allocation{ 148 ID: structs.GenerateUUID(), 149 NodeID: "dead", 150 Name: "my-job.web[0]", 151 }, 152 } 153 154 diff := diffSystemAllocs(job, nodes, tainted, allocs) 155 place := diff.place 156 update := diff.update 157 migrate := diff.migrate 158 stop := diff.stop 159 ignore := diff.ignore 160 161 // We should update the first alloc 162 if len(update) != 1 || update[0].Alloc != allocs[0] { 163 t.Fatalf("bad: %#v", update) 164 } 165 166 // We should ignore the second alloc 167 if len(ignore) != 1 || ignore[0].Alloc != allocs[1] { 168 t.Fatalf("bad: %#v", ignore) 169 } 170 171 // We should stop the third alloc 172 if len(stop) != 1 || stop[0].Alloc != allocs[2] { 173 t.Fatalf("bad: %#v", stop) 174 } 175 176 // There should be no migrates. 177 if len(migrate) != 0 { 178 t.Fatalf("bad: %#v", migrate) 179 } 180 181 // We should place 1 182 if len(place) != 1 { 183 t.Fatalf("bad: %#v", place) 184 } 185 } 186 187 func TestReadyNodesInDCs(t *testing.T) { 188 state, err := state.NewStateStore(os.Stderr) 189 if err != nil { 190 t.Fatalf("err: %v", err) 191 } 192 193 node1 := mock.Node() 194 node2 := mock.Node() 195 node2.Datacenter = "dc2" 196 node3 := mock.Node() 197 node3.Datacenter = "dc2" 198 node3.Status = structs.NodeStatusDown 199 node4 := mock.Node() 200 node4.Drain = true 201 202 noErr(t, state.UpsertNode(1000, node1)) 203 noErr(t, state.UpsertNode(1001, node2)) 204 noErr(t, state.UpsertNode(1002, node3)) 205 noErr(t, state.UpsertNode(1003, node4)) 206 207 nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"}) 208 if err != nil { 209 t.Fatalf("err: %v", err) 210 } 211 212 if len(nodes) != 2 { 213 t.Fatalf("bad: %v", nodes) 214 } 215 if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID { 216 t.Fatalf("Bad: %#v", nodes) 217 } 218 if count, ok := dc["dc1"]; !ok || count != 1 { 219 t.Fatalf("Bad: dc1 count %v", count) 220 } 221 if count, ok := dc["dc2"]; !ok || count != 1 { 222 t.Fatalf("Bad: dc2 count %v", count) 223 } 224 } 225 226 func TestRetryMax(t *testing.T) { 227 calls := 0 228 bad := func() (bool, error) { 229 calls += 1 230 return false, nil 231 } 232 err := retryMax(3, bad, nil) 233 if err == nil { 234 t.Fatalf("should fail") 235 } 236 if calls != 3 { 237 t.Fatalf("mis match") 238 } 239 240 calls = 0 241 first := true 242 reset := func() bool { 243 if calls == 3 && first { 244 first = false 245 return true 246 } 247 return false 248 } 249 err = retryMax(3, bad, reset) 250 if err == nil { 251 t.Fatalf("should fail") 252 } 253 if calls != 6 { 254 t.Fatalf("mis match") 255 } 256 257 calls = 0 258 good := func() (bool, error) { 259 calls += 1 260 return true, nil 261 } 262 err = retryMax(3, good, nil) 263 if err != nil { 264 t.Fatalf("err: %v", err) 265 } 266 if calls != 1 { 267 t.Fatalf("mis match") 268 } 269 } 270 271 func TestTaintedNodes(t *testing.T) { 272 state, err := state.NewStateStore(os.Stderr) 273 if err != nil { 274 t.Fatalf("err: %v", err) 275 } 276 277 node1 := mock.Node() 278 node2 := mock.Node() 279 node2.Datacenter = "dc2" 280 node3 := mock.Node() 281 node3.Datacenter = "dc2" 282 node3.Status = structs.NodeStatusDown 283 node4 := mock.Node() 284 node4.Drain = true 285 noErr(t, state.UpsertNode(1000, node1)) 286 noErr(t, state.UpsertNode(1001, node2)) 287 noErr(t, state.UpsertNode(1002, node3)) 288 noErr(t, state.UpsertNode(1003, node4)) 289 290 allocs := []*structs.Allocation{ 291 &structs.Allocation{NodeID: node1.ID}, 292 &structs.Allocation{NodeID: node2.ID}, 293 &structs.Allocation{NodeID: node3.ID}, 294 &structs.Allocation{NodeID: node4.ID}, 295 &structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"}, 296 } 297 tainted, err := taintedNodes(state, allocs) 298 if err != nil { 299 t.Fatalf("err: %v", err) 300 } 301 302 if len(tainted) != 5 { 303 t.Fatalf("bad: %v", tainted) 304 } 305 if tainted[node1.ID] || tainted[node2.ID] { 306 t.Fatalf("Bad: %v", tainted) 307 } 308 if !tainted[node3.ID] || !tainted[node4.ID] || !tainted["12345678-abcd-efab-cdef-123456789abc"] { 309 t.Fatalf("Bad: %v", tainted) 310 } 311 } 312 313 func TestShuffleNodes(t *testing.T) { 314 // Use a large number of nodes to make the probability of shuffling to the 315 // original order very low. 316 nodes := []*structs.Node{ 317 mock.Node(), 318 mock.Node(), 319 mock.Node(), 320 mock.Node(), 321 mock.Node(), 322 mock.Node(), 323 mock.Node(), 324 mock.Node(), 325 mock.Node(), 326 mock.Node(), 327 } 328 orig := make([]*structs.Node, len(nodes)) 329 copy(orig, nodes) 330 shuffleNodes(nodes) 331 if reflect.DeepEqual(nodes, orig) { 332 t.Fatalf("should not match") 333 } 334 } 335 336 func TestTasksUpdated(t *testing.T) { 337 j1 := mock.Job() 338 j2 := mock.Job() 339 340 if tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) { 341 t.Fatalf("bad") 342 } 343 344 j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other" 345 if !tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) { 346 t.Fatalf("bad") 347 } 348 349 j3 := mock.Job() 350 j3.TaskGroups[0].Tasks[0].Name = "foo" 351 if !tasksUpdated(j1.TaskGroups[0], j3.TaskGroups[0]) { 352 t.Fatalf("bad") 353 } 354 355 j4 := mock.Job() 356 j4.TaskGroups[0].Tasks[0].Driver = "foo" 357 if !tasksUpdated(j1.TaskGroups[0], j4.TaskGroups[0]) { 358 t.Fatalf("bad") 359 } 360 361 j5 := mock.Job() 362 j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks, 363 j5.TaskGroups[0].Tasks[0]) 364 if !tasksUpdated(j1.TaskGroups[0], j5.TaskGroups[0]) { 365 t.Fatalf("bad") 366 } 367 368 j6 := mock.Job() 369 j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}} 370 if !tasksUpdated(j1.TaskGroups[0], j6.TaskGroups[0]) { 371 t.Fatalf("bad") 372 } 373 374 j7 := mock.Job() 375 j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE" 376 if !tasksUpdated(j1.TaskGroups[0], j7.TaskGroups[0]) { 377 t.Fatalf("bad") 378 } 379 } 380 381 func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) { 382 _, ctx := testContext(t) 383 allocs := []allocTuple{ 384 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 385 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 386 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 387 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 388 } 389 diff := &diffResult{} 390 391 limit := 2 392 if !evictAndPlace(ctx, diff, allocs, "", &limit) { 393 t.Fatal("evictAndReplace() should have returned true") 394 } 395 396 if limit != 0 { 397 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit) 398 } 399 400 if len(diff.place) != 2 { 401 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 402 } 403 } 404 405 func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) { 406 _, ctx := testContext(t) 407 allocs := []allocTuple{ 408 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 409 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 410 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 411 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 412 } 413 diff := &diffResult{} 414 415 limit := 4 416 if evictAndPlace(ctx, diff, allocs, "", &limit) { 417 t.Fatal("evictAndReplace() should have returned false") 418 } 419 420 if limit != 0 { 421 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit) 422 } 423 424 if len(diff.place) != 4 { 425 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 426 } 427 } 428 429 func TestSetStatus(t *testing.T) { 430 h := NewHarness(t) 431 logger := log.New(os.Stderr, "", log.LstdFlags) 432 eval := mock.Eval() 433 status := "a" 434 desc := "b" 435 if err := setStatus(logger, h, eval, nil, status, desc); err != nil { 436 t.Fatalf("setStatus() failed: %v", err) 437 } 438 439 if len(h.Evals) != 1 { 440 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 441 } 442 443 newEval := h.Evals[0] 444 if newEval.ID != eval.ID || newEval.Status != status || newEval.StatusDescription != desc { 445 t.Fatalf("setStatus() submited invalid eval: %v", newEval) 446 } 447 448 h = NewHarness(t) 449 next := mock.Eval() 450 if err := setStatus(logger, h, eval, next, status, desc); err != nil { 451 t.Fatalf("setStatus() failed: %v", err) 452 } 453 454 if len(h.Evals) != 1 { 455 t.Fatalf("setStatus() didn't update plan: %v", h.Evals) 456 } 457 458 newEval = h.Evals[0] 459 if newEval.NextEval != next.ID { 460 t.Fatalf("setStatus() didn't set nextEval correctly: %v", newEval) 461 } 462 } 463 464 func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) { 465 state, ctx := testContext(t) 466 eval := mock.Eval() 467 job := mock.Job() 468 469 node := mock.Node() 470 noErr(t, state.UpsertNode(1000, node)) 471 472 // Register an alloc 473 alloc := &structs.Allocation{ 474 ID: structs.GenerateUUID(), 475 EvalID: eval.ID, 476 NodeID: node.ID, 477 JobID: job.ID, 478 Job: job, 479 Resources: &structs.Resources{ 480 CPU: 2048, 481 MemoryMB: 2048, 482 }, 483 DesiredStatus: structs.AllocDesiredStatusRun, 484 } 485 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 486 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 487 488 // Create a new task group that prevents in-place updates. 489 tg := &structs.TaskGroup{} 490 *tg = *job.TaskGroups[0] 491 task := &structs.Task{Name: "FOO"} 492 tg.Tasks = nil 493 tg.Tasks = append(tg.Tasks, task) 494 495 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 496 stack := NewGenericStack(false, ctx) 497 498 // Do the inplace update. 499 unplaced := inplaceUpdate(ctx, eval, job, stack, updates) 500 501 if len(unplaced) != 1 { 502 t.Fatal("inplaceUpdate incorrectly did an inplace update") 503 } 504 505 if len(ctx.plan.NodeAllocation) != 0 { 506 t.Fatal("inplaceUpdate incorrectly did an inplace update") 507 } 508 } 509 510 func TestInplaceUpdate_NoMatch(t *testing.T) { 511 state, ctx := testContext(t) 512 eval := mock.Eval() 513 job := mock.Job() 514 515 node := mock.Node() 516 noErr(t, state.UpsertNode(1000, node)) 517 518 // Register an alloc 519 alloc := &structs.Allocation{ 520 ID: structs.GenerateUUID(), 521 EvalID: eval.ID, 522 NodeID: node.ID, 523 JobID: job.ID, 524 Job: job, 525 Resources: &structs.Resources{ 526 CPU: 2048, 527 MemoryMB: 2048, 528 }, 529 DesiredStatus: structs.AllocDesiredStatusRun, 530 } 531 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 532 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 533 534 // Create a new task group that requires too much resources. 535 tg := &structs.TaskGroup{} 536 *tg = *job.TaskGroups[0] 537 resource := &structs.Resources{CPU: 9999} 538 tg.Tasks[0].Resources = resource 539 540 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 541 stack := NewGenericStack(false, ctx) 542 543 // Do the inplace update. 544 unplaced := inplaceUpdate(ctx, eval, job, stack, updates) 545 546 if len(unplaced) != 1 { 547 t.Fatal("inplaceUpdate incorrectly did an inplace update") 548 } 549 550 if len(ctx.plan.NodeAllocation) != 0 { 551 t.Fatal("inplaceUpdate incorrectly did an inplace update") 552 } 553 } 554 555 func TestInplaceUpdate_Success(t *testing.T) { 556 state, ctx := testContext(t) 557 eval := mock.Eval() 558 job := mock.Job() 559 560 node := mock.Node() 561 noErr(t, state.UpsertNode(1000, node)) 562 563 // Register an alloc 564 alloc := &structs.Allocation{ 565 ID: structs.GenerateUUID(), 566 EvalID: eval.ID, 567 NodeID: node.ID, 568 JobID: job.ID, 569 Job: job, 570 TaskGroup: job.TaskGroups[0].Name, 571 Resources: &structs.Resources{ 572 CPU: 2048, 573 MemoryMB: 2048, 574 }, 575 DesiredStatus: structs.AllocDesiredStatusRun, 576 } 577 alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources} 578 alloc.PopulateServiceIDs(job.TaskGroups[0]) 579 noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc})) 580 581 webFeSrvID := alloc.Services["web-frontend"] 582 adminSrvID := alloc.Services["web-admin"] 583 584 if webFeSrvID == "" || adminSrvID == "" { 585 t.Fatal("Service ID needs to be generated for service") 586 } 587 588 // Create a new task group that updates the resources. 589 tg := &structs.TaskGroup{} 590 *tg = *job.TaskGroups[0] 591 resource := &structs.Resources{CPU: 737} 592 tg.Tasks[0].Resources = resource 593 newServices := []*structs.Service{ 594 { 595 Name: "dummy-service", 596 PortLabel: "http", 597 }, 598 { 599 Name: "dummy-service2", 600 PortLabel: "http", 601 }, 602 } 603 604 // Delete service 2 605 tg.Tasks[0].Services = tg.Tasks[0].Services[:1] 606 607 // Add the new services 608 tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...) 609 610 updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}} 611 stack := NewGenericStack(false, ctx) 612 stack.SetJob(job) 613 614 // Do the inplace update. 615 unplaced := inplaceUpdate(ctx, eval, job, stack, updates) 616 617 if len(unplaced) != 0 { 618 t.Fatal("inplaceUpdate did not do an inplace update") 619 } 620 621 if len(ctx.plan.NodeAllocation) != 1 { 622 t.Fatal("inplaceUpdate did not do an inplace update") 623 } 624 625 // Get the alloc we inserted. 626 a := ctx.plan.NodeAllocation[alloc.NodeID][0] 627 if len(a.Services) != 3 { 628 t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Services)) 629 } 630 631 // Test that the service id for the old service is still the same 632 if a.Services["web-frontend"] != webFeSrvID { 633 t.Fatalf("Expected service ID: %v, Actual: %v", webFeSrvID, a.Services["web-frontend"]) 634 } 635 636 // Test that the map doesn't contain the service ID of the admin Service 637 // anymore 638 if _, ok := a.Services["web-admin"]; ok { 639 t.Fatal("Service shouldn't be present") 640 } 641 } 642 643 func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) { 644 _, ctx := testContext(t) 645 allocs := []allocTuple{ 646 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 647 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 648 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 649 allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}}, 650 } 651 diff := &diffResult{} 652 653 limit := 6 654 if evictAndPlace(ctx, diff, allocs, "", &limit) { 655 t.Fatal("evictAndReplace() should have returned false") 656 } 657 658 if limit != 2 { 659 t.Fatalf("evictAndReplace() should decremented limit; got %v; want 2", limit) 660 } 661 662 if len(diff.place) != 4 { 663 t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place) 664 } 665 } 666 667 func TestTaskGroupConstraints(t *testing.T) { 668 constr := &structs.Constraint{RTarget: "bar"} 669 constr2 := &structs.Constraint{LTarget: "foo"} 670 constr3 := &structs.Constraint{Operand: "<"} 671 672 tg := &structs.TaskGroup{ 673 Name: "web", 674 Count: 10, 675 Constraints: []*structs.Constraint{constr}, 676 Tasks: []*structs.Task{ 677 &structs.Task{ 678 Driver: "exec", 679 Resources: &structs.Resources{ 680 CPU: 500, 681 MemoryMB: 256, 682 }, 683 Constraints: []*structs.Constraint{constr2}, 684 }, 685 &structs.Task{ 686 Driver: "docker", 687 Resources: &structs.Resources{ 688 CPU: 500, 689 MemoryMB: 256, 690 }, 691 Constraints: []*structs.Constraint{constr3}, 692 }, 693 }, 694 } 695 696 // Build the expected values. 697 expConstr := []*structs.Constraint{constr, constr2, constr3} 698 expDrivers := map[string]struct{}{"exec": struct{}{}, "docker": struct{}{}} 699 expSize := &structs.Resources{ 700 CPU: 1000, 701 MemoryMB: 512, 702 } 703 704 actConstrains := taskGroupConstraints(tg) 705 if !reflect.DeepEqual(actConstrains.constraints, expConstr) { 706 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr) 707 } 708 if !reflect.DeepEqual(actConstrains.drivers, expDrivers) { 709 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers) 710 } 711 if !reflect.DeepEqual(actConstrains.size, expSize) { 712 t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.size, expSize) 713 } 714 715 } 716 717 func TestInitTaskState(t *testing.T) { 718 tg := &structs.TaskGroup{ 719 Tasks: []*structs.Task{ 720 &structs.Task{Name: "foo"}, 721 &structs.Task{Name: "bar"}, 722 }, 723 } 724 expPending := map[string]*structs.TaskState{ 725 "foo": &structs.TaskState{State: structs.TaskStatePending}, 726 "bar": &structs.TaskState{State: structs.TaskStatePending}, 727 } 728 expDead := map[string]*structs.TaskState{ 729 "foo": &structs.TaskState{State: structs.TaskStateDead}, 730 "bar": &structs.TaskState{State: structs.TaskStateDead}, 731 } 732 actPending := initTaskState(tg, structs.TaskStatePending) 733 actDead := initTaskState(tg, structs.TaskStateDead) 734 735 if !(reflect.DeepEqual(expPending, actPending) && reflect.DeepEqual(expDead, actDead)) { 736 t.Fatal("Expected and actual not equal") 737 } 738 } 739 740 func TestProgressMade(t *testing.T) { 741 noopPlan := &structs.PlanResult{} 742 if progressMade(nil) || progressMade(noopPlan) { 743 t.Fatal("no progress plan marked as making progress") 744 } 745 746 m := map[string][]*structs.Allocation{ 747 "foo": []*structs.Allocation{mock.Alloc()}, 748 } 749 both := &structs.PlanResult{ 750 NodeAllocation: m, 751 NodeUpdate: m, 752 } 753 update := &structs.PlanResult{NodeUpdate: m} 754 alloc := &structs.PlanResult{NodeAllocation: m} 755 if !(progressMade(both) && progressMade(update) && progressMade(alloc)) { 756 t.Fatal("bad") 757 } 758 }