github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 "github.com/kr/pretty" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 ) 18 19 func TestJob_Validate(t *testing.T) { 20 j := &Job{} 21 err := j.Validate() 22 mErr := err.(*multierror.Error) 23 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 24 t.Fatalf("err: %s", err) 25 } 26 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 27 t.Fatalf("err: %s", err) 28 } 29 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 30 t.Fatalf("err: %s", err) 31 } 32 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 33 t.Fatalf("err: %s", err) 34 } 35 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 36 t.Fatalf("err: %s", err) 37 } 38 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 39 t.Fatalf("err: %s", err) 40 } 41 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 42 t.Fatalf("err: %s", err) 43 } 44 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 45 t.Fatalf("err: %s", err) 46 } 47 48 j = &Job{ 49 Type: "invalid-job-type", 50 } 51 err = j.Validate() 52 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 53 t.Errorf("expected %s but found: %v", expected, err) 54 } 55 56 j = &Job{ 57 Type: JobTypeService, 58 Periodic: &PeriodicConfig{ 59 Enabled: true, 60 }, 61 } 62 err = j.Validate() 63 mErr = err.(*multierror.Error) 64 if !strings.Contains(mErr.Error(), "Periodic") { 65 t.Fatalf("err: %s", err) 66 } 67 68 j = &Job{ 69 Region: "global", 70 ID: uuid.Generate(), 71 Namespace: "test", 72 Name: "my-job", 73 Type: JobTypeService, 74 Priority: 50, 75 Datacenters: []string{"dc1"}, 76 TaskGroups: []*TaskGroup{ 77 { 78 Name: "web", 79 RestartPolicy: &RestartPolicy{ 80 Interval: 5 * time.Minute, 81 Delay: 10 * time.Second, 82 Attempts: 10, 83 }, 84 }, 85 { 86 Name: "web", 87 RestartPolicy: &RestartPolicy{ 88 Interval: 5 * time.Minute, 89 Delay: 10 * time.Second, 90 Attempts: 10, 91 }, 92 }, 93 { 94 RestartPolicy: &RestartPolicy{ 95 Interval: 5 * time.Minute, 96 Delay: 10 * time.Second, 97 Attempts: 10, 98 }, 99 }, 100 }, 101 } 102 err = j.Validate() 103 mErr = err.(*multierror.Error) 104 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 105 t.Fatalf("err: %s", err) 106 } 107 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 108 t.Fatalf("err: %s", err) 109 } 110 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 111 t.Fatalf("err: %s", err) 112 } 113 114 // test for empty datacenters 115 j = &Job{ 116 Datacenters: []string{""}, 117 } 118 err = j.Validate() 119 mErr = err.(*multierror.Error) 120 if !strings.Contains(mErr.Error(), "datacenter must be non-empty string") { 121 t.Fatalf("err: %s", err) 122 } 123 } 124 125 func TestJob_Warnings(t *testing.T) { 126 cases := []struct { 127 Name string 128 Job *Job 129 Expected []string 130 }{ 131 { 132 Name: "Higher counts for update stanza", 133 Expected: []string{"max parallel count is greater"}, 134 Job: &Job{ 135 Type: JobTypeService, 136 TaskGroups: []*TaskGroup{ 137 { 138 Name: "foo", 139 Count: 2, 140 Update: &UpdateStrategy{ 141 MaxParallel: 10, 142 }, 143 }, 144 }, 145 }, 146 }, 147 { 148 Name: "AutoPromote mixed TaskGroups", 149 Expected: []string{"auto_promote must be true for all groups"}, 150 Job: &Job{ 151 Type: JobTypeService, 152 TaskGroups: []*TaskGroup{ 153 { 154 Update: &UpdateStrategy{ 155 AutoPromote: true, 156 }, 157 }, 158 { 159 Update: &UpdateStrategy{ 160 AutoPromote: false, 161 }, 162 }, 163 }, 164 }, 165 }, 166 } 167 168 for _, c := range cases { 169 t.Run(c.Name, func(t *testing.T) { 170 warnings := c.Job.Warnings() 171 if warnings == nil { 172 if len(c.Expected) == 0 { 173 return 174 } else { 175 t.Fatal("Got no warnings when they were expected") 176 } 177 } 178 179 a := warnings.Error() 180 for _, e := range c.Expected { 181 if !strings.Contains(a, e) { 182 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 183 } 184 } 185 }) 186 } 187 } 188 189 func TestJob_SpecChanged(t *testing.T) { 190 // Get a base test job 191 base := testJob() 192 193 // Only modify the indexes/mutable state of the job 194 mutatedBase := base.Copy() 195 mutatedBase.Status = "foo" 196 mutatedBase.ModifyIndex = base.ModifyIndex + 100 197 198 // changed contains a spec change that should be detected 199 change := base.Copy() 200 change.Priority = 99 201 202 cases := []struct { 203 Name string 204 Original *Job 205 New *Job 206 Changed bool 207 }{ 208 { 209 Name: "Same job except mutable indexes", 210 Changed: false, 211 Original: base, 212 New: mutatedBase, 213 }, 214 { 215 Name: "Different", 216 Changed: true, 217 Original: base, 218 New: change, 219 }, 220 } 221 222 for _, c := range cases { 223 t.Run(c.Name, func(t *testing.T) { 224 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 225 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 226 } 227 }) 228 } 229 } 230 231 func testJob() *Job { 232 return &Job{ 233 Region: "global", 234 ID: uuid.Generate(), 235 Namespace: "test", 236 Name: "my-job", 237 Type: JobTypeService, 238 Priority: 50, 239 AllAtOnce: false, 240 Datacenters: []string{"dc1"}, 241 Constraints: []*Constraint{ 242 { 243 LTarget: "$attr.kernel.name", 244 RTarget: "linux", 245 Operand: "=", 246 }, 247 }, 248 Periodic: &PeriodicConfig{ 249 Enabled: false, 250 }, 251 TaskGroups: []*TaskGroup{ 252 { 253 Name: "web", 254 Count: 10, 255 EphemeralDisk: DefaultEphemeralDisk(), 256 RestartPolicy: &RestartPolicy{ 257 Mode: RestartPolicyModeFail, 258 Attempts: 3, 259 Interval: 10 * time.Minute, 260 Delay: 1 * time.Minute, 261 }, 262 ReschedulePolicy: &ReschedulePolicy{ 263 Interval: 5 * time.Minute, 264 Attempts: 10, 265 Delay: 5 * time.Second, 266 DelayFunction: "constant", 267 }, 268 Tasks: []*Task{ 269 { 270 Name: "web", 271 Driver: "exec", 272 Config: map[string]interface{}{ 273 "command": "/bin/date", 274 }, 275 Env: map[string]string{ 276 "FOO": "bar", 277 }, 278 Artifacts: []*TaskArtifact{ 279 { 280 GetterSource: "http://foo.com", 281 }, 282 }, 283 Services: []*Service{ 284 { 285 Name: "${TASK}-frontend", 286 PortLabel: "http", 287 }, 288 }, 289 Resources: &Resources{ 290 CPU: 500, 291 MemoryMB: 256, 292 Networks: []*NetworkResource{ 293 { 294 MBits: 50, 295 DynamicPorts: []Port{{Label: "http"}}, 296 }, 297 }, 298 }, 299 LogConfig: &LogConfig{ 300 MaxFiles: 10, 301 MaxFileSizeMB: 1, 302 }, 303 }, 304 }, 305 Meta: map[string]string{ 306 "elb_check_type": "http", 307 "elb_check_interval": "30s", 308 "elb_check_min": "3", 309 }, 310 }, 311 }, 312 Meta: map[string]string{ 313 "owner": "armon", 314 }, 315 } 316 } 317 318 func TestJob_Copy(t *testing.T) { 319 j := testJob() 320 c := j.Copy() 321 if !reflect.DeepEqual(j, c) { 322 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 323 } 324 } 325 326 func TestJob_IsPeriodic(t *testing.T) { 327 j := &Job{ 328 Type: JobTypeService, 329 Periodic: &PeriodicConfig{ 330 Enabled: true, 331 }, 332 } 333 if !j.IsPeriodic() { 334 t.Fatalf("IsPeriodic() returned false on periodic job") 335 } 336 337 j = &Job{ 338 Type: JobTypeService, 339 } 340 if j.IsPeriodic() { 341 t.Fatalf("IsPeriodic() returned true on non-periodic job") 342 } 343 } 344 345 func TestJob_IsPeriodicActive(t *testing.T) { 346 cases := []struct { 347 job *Job 348 active bool 349 }{ 350 { 351 job: &Job{ 352 Type: JobTypeService, 353 Periodic: &PeriodicConfig{ 354 Enabled: true, 355 }, 356 }, 357 active: true, 358 }, 359 { 360 job: &Job{ 361 Type: JobTypeService, 362 Periodic: &PeriodicConfig{ 363 Enabled: false, 364 }, 365 }, 366 active: false, 367 }, 368 { 369 job: &Job{ 370 Type: JobTypeService, 371 Periodic: &PeriodicConfig{ 372 Enabled: true, 373 }, 374 Stop: true, 375 }, 376 active: false, 377 }, 378 { 379 job: &Job{ 380 Type: JobTypeService, 381 Periodic: &PeriodicConfig{ 382 Enabled: false, 383 }, 384 ParameterizedJob: &ParameterizedJobConfig{}, 385 }, 386 active: false, 387 }, 388 } 389 390 for i, c := range cases { 391 if act := c.job.IsPeriodicActive(); act != c.active { 392 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 393 } 394 } 395 } 396 397 func TestJob_SystemJob_Validate(t *testing.T) { 398 j := testJob() 399 j.Type = JobTypeSystem 400 j.TaskGroups[0].ReschedulePolicy = nil 401 j.Canonicalize() 402 403 err := j.Validate() 404 if err == nil || !strings.Contains(err.Error(), "exceed") { 405 t.Fatalf("expect error due to count") 406 } 407 408 j.TaskGroups[0].Count = 0 409 if err := j.Validate(); err != nil { 410 t.Fatalf("unexpected err: %v", err) 411 } 412 413 j.TaskGroups[0].Count = 1 414 if err := j.Validate(); err != nil { 415 t.Fatalf("unexpected err: %v", err) 416 } 417 418 // Add affinities at job, task group and task level, that should fail validation 419 420 j.Affinities = []*Affinity{{ 421 Operand: "=", 422 LTarget: "${node.datacenter}", 423 RTarget: "dc1", 424 }} 425 j.TaskGroups[0].Affinities = []*Affinity{{ 426 Operand: "=", 427 LTarget: "${meta.rack}", 428 RTarget: "r1", 429 }} 430 j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{ 431 Operand: "=", 432 LTarget: "${meta.rack}", 433 RTarget: "r1", 434 }} 435 err = j.Validate() 436 require.NotNil(t, err) 437 require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") 438 439 // Add spread at job and task group level, that should fail validation 440 j.Spreads = []*Spread{{ 441 Attribute: "${node.datacenter}", 442 Weight: 100, 443 }} 444 j.TaskGroups[0].Spreads = []*Spread{{ 445 Attribute: "${node.datacenter}", 446 Weight: 100, 447 }} 448 449 err = j.Validate() 450 require.NotNil(t, err) 451 require.Contains(t, err.Error(), "System jobs may not have a spread stanza") 452 453 } 454 455 func TestJob_VaultPolicies(t *testing.T) { 456 j0 := &Job{} 457 e0 := make(map[string]map[string]*Vault, 0) 458 459 vj1 := &Vault{ 460 Policies: []string{ 461 "p1", 462 "p2", 463 }, 464 } 465 vj2 := &Vault{ 466 Policies: []string{ 467 "p3", 468 "p4", 469 }, 470 } 471 vj3 := &Vault{ 472 Policies: []string{ 473 "p5", 474 }, 475 } 476 j1 := &Job{ 477 TaskGroups: []*TaskGroup{ 478 { 479 Name: "foo", 480 Tasks: []*Task{ 481 { 482 Name: "t1", 483 }, 484 { 485 Name: "t2", 486 Vault: vj1, 487 }, 488 }, 489 }, 490 { 491 Name: "bar", 492 Tasks: []*Task{ 493 { 494 Name: "t3", 495 Vault: vj2, 496 }, 497 { 498 Name: "t4", 499 Vault: vj3, 500 }, 501 }, 502 }, 503 }, 504 } 505 506 e1 := map[string]map[string]*Vault{ 507 "foo": { 508 "t2": vj1, 509 }, 510 "bar": { 511 "t3": vj2, 512 "t4": vj3, 513 }, 514 } 515 516 cases := []struct { 517 Job *Job 518 Expected map[string]map[string]*Vault 519 }{ 520 { 521 Job: j0, 522 Expected: e0, 523 }, 524 { 525 Job: j1, 526 Expected: e1, 527 }, 528 } 529 530 for i, c := range cases { 531 got := c.Job.VaultPolicies() 532 if !reflect.DeepEqual(got, c.Expected) { 533 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 534 } 535 } 536 } 537 538 func TestJob_RequiredSignals(t *testing.T) { 539 j0 := &Job{} 540 e0 := make(map[string]map[string][]string, 0) 541 542 vj1 := &Vault{ 543 Policies: []string{"p1"}, 544 ChangeMode: VaultChangeModeNoop, 545 } 546 vj2 := &Vault{ 547 Policies: []string{"p1"}, 548 ChangeMode: VaultChangeModeSignal, 549 ChangeSignal: "SIGUSR1", 550 } 551 tj1 := &Template{ 552 SourcePath: "foo", 553 DestPath: "bar", 554 ChangeMode: TemplateChangeModeNoop, 555 } 556 tj2 := &Template{ 557 SourcePath: "foo", 558 DestPath: "bar", 559 ChangeMode: TemplateChangeModeSignal, 560 ChangeSignal: "SIGUSR2", 561 } 562 j1 := &Job{ 563 TaskGroups: []*TaskGroup{ 564 { 565 Name: "foo", 566 Tasks: []*Task{ 567 { 568 Name: "t1", 569 }, 570 { 571 Name: "t2", 572 Vault: vj2, 573 Templates: []*Template{tj2}, 574 }, 575 }, 576 }, 577 { 578 Name: "bar", 579 Tasks: []*Task{ 580 { 581 Name: "t3", 582 Vault: vj1, 583 Templates: []*Template{tj1}, 584 }, 585 { 586 Name: "t4", 587 Vault: vj2, 588 }, 589 }, 590 }, 591 }, 592 } 593 594 e1 := map[string]map[string][]string{ 595 "foo": { 596 "t2": {"SIGUSR1", "SIGUSR2"}, 597 }, 598 "bar": { 599 "t4": {"SIGUSR1"}, 600 }, 601 } 602 603 j2 := &Job{ 604 TaskGroups: []*TaskGroup{ 605 { 606 Name: "foo", 607 Tasks: []*Task{ 608 { 609 Name: "t1", 610 KillSignal: "SIGQUIT", 611 }, 612 }, 613 }, 614 }, 615 } 616 617 e2 := map[string]map[string][]string{ 618 "foo": { 619 "t1": {"SIGQUIT"}, 620 }, 621 } 622 623 cases := []struct { 624 Job *Job 625 Expected map[string]map[string][]string 626 }{ 627 { 628 Job: j0, 629 Expected: e0, 630 }, 631 { 632 Job: j1, 633 Expected: e1, 634 }, 635 { 636 Job: j2, 637 Expected: e2, 638 }, 639 } 640 641 for i, c := range cases { 642 got := c.Job.RequiredSignals() 643 if !reflect.DeepEqual(got, c.Expected) { 644 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 645 } 646 } 647 } 648 649 // test new Equal comparisons for components of Jobs 650 func TestJob_PartEqual(t *testing.T) { 651 ns := &Networks{} 652 require.True(t, ns.Equals(&Networks{})) 653 654 ns = &Networks{ 655 &NetworkResource{Device: "eth0"}, 656 } 657 require.True(t, ns.Equals(&Networks{ 658 &NetworkResource{Device: "eth0"}, 659 })) 660 661 ns = &Networks{ 662 &NetworkResource{Device: "eth0"}, 663 &NetworkResource{Device: "eth1"}, 664 &NetworkResource{Device: "eth2"}, 665 } 666 require.True(t, ns.Equals(&Networks{ 667 &NetworkResource{Device: "eth2"}, 668 &NetworkResource{Device: "eth0"}, 669 &NetworkResource{Device: "eth1"}, 670 })) 671 672 cs := &Constraints{ 673 &Constraint{"left0", "right0", "=", ""}, 674 &Constraint{"left1", "right1", "=", ""}, 675 &Constraint{"left2", "right2", "=", ""}, 676 } 677 require.True(t, cs.Equals(&Constraints{ 678 &Constraint{"left0", "right0", "=", ""}, 679 &Constraint{"left2", "right2", "=", ""}, 680 &Constraint{"left1", "right1", "=", ""}, 681 })) 682 683 as := &Affinities{ 684 &Affinity{"left0", "right0", "=", 0, ""}, 685 &Affinity{"left1", "right1", "=", 0, ""}, 686 &Affinity{"left2", "right2", "=", 0, ""}, 687 } 688 require.True(t, as.Equals(&Affinities{ 689 &Affinity{"left0", "right0", "=", 0, ""}, 690 &Affinity{"left2", "right2", "=", 0, ""}, 691 &Affinity{"left1", "right1", "=", 0, ""}, 692 })) 693 } 694 695 func TestTaskGroup_Validate(t *testing.T) { 696 j := testJob() 697 tg := &TaskGroup{ 698 Count: -1, 699 RestartPolicy: &RestartPolicy{ 700 Interval: 5 * time.Minute, 701 Delay: 10 * time.Second, 702 Attempts: 10, 703 Mode: RestartPolicyModeDelay, 704 }, 705 ReschedulePolicy: &ReschedulePolicy{ 706 Interval: 5 * time.Minute, 707 Attempts: 5, 708 Delay: 5 * time.Second, 709 }, 710 } 711 err := tg.Validate(j) 712 mErr := err.(*multierror.Error) 713 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 714 t.Fatalf("err: %s", err) 715 } 716 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 717 t.Fatalf("err: %s", err) 718 } 719 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 720 t.Fatalf("err: %s", err) 721 } 722 723 tg = &TaskGroup{ 724 Tasks: []*Task{ 725 { 726 Name: "task-a", 727 Resources: &Resources{ 728 Networks: []*NetworkResource{ 729 { 730 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 731 }, 732 }, 733 }, 734 }, 735 { 736 Name: "task-b", 737 Resources: &Resources{ 738 Networks: []*NetworkResource{ 739 { 740 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 741 }, 742 }, 743 }, 744 }, 745 }, 746 } 747 err = tg.Validate(&Job{}) 748 expected := `Static port 123 already reserved by task-a:foo` 749 if !strings.Contains(err.Error(), expected) { 750 t.Errorf("expected %s but found: %v", expected, err) 751 } 752 753 tg = &TaskGroup{ 754 Tasks: []*Task{ 755 { 756 Name: "task-a", 757 Resources: &Resources{ 758 Networks: []*NetworkResource{ 759 { 760 ReservedPorts: []Port{ 761 {Label: "foo", Value: 123}, 762 {Label: "bar", Value: 123}, 763 }, 764 }, 765 }, 766 }, 767 }, 768 }, 769 } 770 err = tg.Validate(&Job{}) 771 expected = `Static port 123 already reserved by task-a:foo` 772 if !strings.Contains(err.Error(), expected) { 773 t.Errorf("expected %s but found: %v", expected, err) 774 } 775 776 tg = &TaskGroup{ 777 Name: "web", 778 Count: 1, 779 Tasks: []*Task{ 780 {Name: "web", Leader: true}, 781 {Name: "web", Leader: true}, 782 {}, 783 }, 784 RestartPolicy: &RestartPolicy{ 785 Interval: 5 * time.Minute, 786 Delay: 10 * time.Second, 787 Attempts: 10, 788 Mode: RestartPolicyModeDelay, 789 }, 790 ReschedulePolicy: &ReschedulePolicy{ 791 Interval: 5 * time.Minute, 792 Attempts: 10, 793 Delay: 5 * time.Second, 794 DelayFunction: "constant", 795 }, 796 } 797 798 err = tg.Validate(j) 799 mErr = err.(*multierror.Error) 800 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 801 t.Fatalf("err: %s", err) 802 } 803 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 804 t.Fatalf("err: %s", err) 805 } 806 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 807 t.Fatalf("err: %s", err) 808 } 809 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 810 t.Fatalf("err: %s", err) 811 } 812 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 813 t.Fatalf("err: %s", err) 814 } 815 816 tg = &TaskGroup{ 817 Name: "web", 818 Count: 1, 819 Tasks: []*Task{ 820 {Name: "web", Leader: true}, 821 }, 822 Update: DefaultUpdateStrategy.Copy(), 823 } 824 j.Type = JobTypeBatch 825 err = tg.Validate(j) 826 if !strings.Contains(err.Error(), "does not allow update block") { 827 t.Fatalf("err: %s", err) 828 } 829 830 tg = &TaskGroup{ 831 Count: -1, 832 RestartPolicy: &RestartPolicy{ 833 Interval: 5 * time.Minute, 834 Delay: 10 * time.Second, 835 Attempts: 10, 836 Mode: RestartPolicyModeDelay, 837 }, 838 ReschedulePolicy: &ReschedulePolicy{ 839 Interval: 5 * time.Minute, 840 Attempts: 5, 841 Delay: 5 * time.Second, 842 }, 843 } 844 j.Type = JobTypeSystem 845 err = tg.Validate(j) 846 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 847 t.Fatalf("err: %s", err) 848 } 849 } 850 851 func TestTask_Validate(t *testing.T) { 852 task := &Task{} 853 ephemeralDisk := DefaultEphemeralDisk() 854 err := task.Validate(ephemeralDisk, JobTypeBatch) 855 mErr := err.(*multierror.Error) 856 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 857 t.Fatalf("err: %s", err) 858 } 859 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 860 t.Fatalf("err: %s", err) 861 } 862 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 863 t.Fatalf("err: %s", err) 864 } 865 866 task = &Task{Name: "web/foo"} 867 err = task.Validate(ephemeralDisk, JobTypeBatch) 868 mErr = err.(*multierror.Error) 869 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 870 t.Fatalf("err: %s", err) 871 } 872 873 task = &Task{ 874 Name: "web", 875 Driver: "docker", 876 Resources: &Resources{ 877 CPU: 100, 878 MemoryMB: 100, 879 }, 880 LogConfig: DefaultLogConfig(), 881 } 882 ephemeralDisk.SizeMB = 200 883 err = task.Validate(ephemeralDisk, JobTypeBatch) 884 if err != nil { 885 t.Fatalf("err: %s", err) 886 } 887 888 task.Constraints = append(task.Constraints, 889 &Constraint{ 890 Operand: ConstraintDistinctHosts, 891 }, 892 &Constraint{ 893 Operand: ConstraintDistinctProperty, 894 LTarget: "${meta.rack}", 895 }) 896 897 err = task.Validate(ephemeralDisk, JobTypeBatch) 898 mErr = err.(*multierror.Error) 899 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 900 t.Fatalf("err: %s", err) 901 } 902 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 903 t.Fatalf("err: %s", err) 904 } 905 } 906 907 func TestTask_Validate_Services(t *testing.T) { 908 s1 := &Service{ 909 Name: "service-name", 910 PortLabel: "bar", 911 Checks: []*ServiceCheck{ 912 { 913 Name: "check-name", 914 Type: ServiceCheckTCP, 915 Interval: 0 * time.Second, 916 }, 917 { 918 Name: "check-name", 919 Type: ServiceCheckTCP, 920 Timeout: 2 * time.Second, 921 }, 922 { 923 Name: "check-name", 924 Type: ServiceCheckTCP, 925 Interval: 1 * time.Second, 926 }, 927 }, 928 } 929 930 s2 := &Service{ 931 Name: "service-name", 932 PortLabel: "bar", 933 } 934 935 s3 := &Service{ 936 Name: "service-A", 937 PortLabel: "a", 938 } 939 s4 := &Service{ 940 Name: "service-A", 941 PortLabel: "b", 942 } 943 944 ephemeralDisk := DefaultEphemeralDisk() 945 ephemeralDisk.SizeMB = 200 946 task := &Task{ 947 Name: "web", 948 Driver: "docker", 949 Resources: &Resources{ 950 CPU: 100, 951 MemoryMB: 100, 952 }, 953 Services: []*Service{s1, s2}, 954 } 955 956 task1 := &Task{ 957 Name: "web", 958 Driver: "docker", 959 Resources: DefaultResources(), 960 Services: []*Service{s3, s4}, 961 LogConfig: DefaultLogConfig(), 962 } 963 task1.Resources.Networks = []*NetworkResource{ 964 { 965 MBits: 10, 966 DynamicPorts: []Port{ 967 { 968 Label: "a", 969 Value: 1000, 970 }, 971 { 972 Label: "b", 973 Value: 2000, 974 }, 975 }, 976 }, 977 } 978 979 err := task.Validate(ephemeralDisk, JobTypeService) 980 if err == nil { 981 t.Fatal("expected an error") 982 } 983 984 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 985 t.Fatalf("err: %v", err) 986 } 987 988 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 989 t.Fatalf("err: %v", err) 990 } 991 992 if !strings.Contains(err.Error(), "missing required value interval") { 993 t.Fatalf("err: %v", err) 994 } 995 996 if !strings.Contains(err.Error(), "cannot be less than") { 997 t.Fatalf("err: %v", err) 998 } 999 1000 if err = task1.Validate(ephemeralDisk, JobTypeService); err != nil { 1001 t.Fatalf("err : %v", err) 1002 } 1003 } 1004 1005 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1006 ephemeralDisk := DefaultEphemeralDisk() 1007 getTask := func(s *Service) *Task { 1008 task := &Task{ 1009 Name: "web", 1010 Driver: "docker", 1011 Resources: DefaultResources(), 1012 Services: []*Service{s}, 1013 LogConfig: DefaultLogConfig(), 1014 } 1015 task.Resources.Networks = []*NetworkResource{ 1016 { 1017 MBits: 10, 1018 DynamicPorts: []Port{ 1019 { 1020 Label: "http", 1021 Value: 80, 1022 }, 1023 }, 1024 }, 1025 } 1026 return task 1027 } 1028 1029 cases := []*Service{ 1030 { 1031 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1032 Name: "DriverModeWithLabel", 1033 PortLabel: "http", 1034 AddressMode: AddressModeDriver, 1035 }, 1036 { 1037 Name: "DriverModeWithPort", 1038 PortLabel: "80", 1039 AddressMode: AddressModeDriver, 1040 }, 1041 { 1042 Name: "HostModeWithLabel", 1043 PortLabel: "http", 1044 AddressMode: AddressModeHost, 1045 }, 1046 { 1047 Name: "HostModeWithoutLabel", 1048 AddressMode: AddressModeHost, 1049 }, 1050 { 1051 Name: "DriverModeWithoutLabel", 1052 AddressMode: AddressModeDriver, 1053 }, 1054 } 1055 1056 for _, service := range cases { 1057 task := getTask(service) 1058 t.Run(service.Name, func(t *testing.T) { 1059 if err := task.Validate(ephemeralDisk, JobTypeService); err != nil { 1060 t.Fatalf("unexpected err: %v", err) 1061 } 1062 }) 1063 } 1064 } 1065 1066 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1067 ephemeralDisk := DefaultEphemeralDisk() 1068 getTask := func(s *Service) *Task { 1069 task := &Task{ 1070 Name: "web", 1071 Driver: "docker", 1072 Resources: DefaultResources(), 1073 Services: []*Service{s}, 1074 LogConfig: DefaultLogConfig(), 1075 } 1076 task.Resources.Networks = []*NetworkResource{ 1077 { 1078 MBits: 10, 1079 DynamicPorts: []Port{ 1080 { 1081 Label: "http", 1082 Value: 80, 1083 }, 1084 }, 1085 }, 1086 } 1087 return task 1088 } 1089 1090 cases := []*Service{ 1091 { 1092 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1093 Name: "DriverModeWithLabel", 1094 PortLabel: "asdf", 1095 AddressMode: AddressModeDriver, 1096 }, 1097 { 1098 Name: "HostModeWithLabel", 1099 PortLabel: "asdf", 1100 AddressMode: AddressModeHost, 1101 }, 1102 { 1103 Name: "HostModeWithPort", 1104 PortLabel: "80", 1105 AddressMode: AddressModeHost, 1106 }, 1107 } 1108 1109 for _, service := range cases { 1110 task := getTask(service) 1111 t.Run(service.Name, func(t *testing.T) { 1112 err := task.Validate(ephemeralDisk, JobTypeService) 1113 if err == nil { 1114 t.Fatalf("expected an error") 1115 } 1116 //t.Logf("err: %v", err) 1117 }) 1118 } 1119 } 1120 1121 func TestTask_Validate_Service_Check(t *testing.T) { 1122 1123 invalidCheck := ServiceCheck{ 1124 Name: "check-name", 1125 Command: "/bin/true", 1126 Type: ServiceCheckScript, 1127 Interval: 10 * time.Second, 1128 } 1129 1130 err := invalidCheck.validate() 1131 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1132 t.Fatalf("expected a timeout validation error but received: %q", err) 1133 } 1134 1135 check1 := ServiceCheck{ 1136 Name: "check-name", 1137 Type: ServiceCheckTCP, 1138 Interval: 10 * time.Second, 1139 Timeout: 2 * time.Second, 1140 } 1141 1142 if err := check1.validate(); err != nil { 1143 t.Fatalf("err: %v", err) 1144 } 1145 1146 check1.InitialStatus = "foo" 1147 err = check1.validate() 1148 if err == nil { 1149 t.Fatal("Expected an error") 1150 } 1151 1152 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1153 t.Fatalf("err: %v", err) 1154 } 1155 1156 check1.InitialStatus = api.HealthCritical 1157 err = check1.validate() 1158 if err != nil { 1159 t.Fatalf("err: %v", err) 1160 } 1161 1162 check1.InitialStatus = api.HealthPassing 1163 err = check1.validate() 1164 if err != nil { 1165 t.Fatalf("err: %v", err) 1166 } 1167 1168 check1.InitialStatus = "" 1169 err = check1.validate() 1170 if err != nil { 1171 t.Fatalf("err: %v", err) 1172 } 1173 1174 check2 := ServiceCheck{ 1175 Name: "check-name-2", 1176 Type: ServiceCheckHTTP, 1177 Interval: 10 * time.Second, 1178 Timeout: 2 * time.Second, 1179 Path: "/foo/bar", 1180 } 1181 1182 err = check2.validate() 1183 if err != nil { 1184 t.Fatalf("err: %v", err) 1185 } 1186 1187 check2.Path = "" 1188 err = check2.validate() 1189 if err == nil { 1190 t.Fatal("Expected an error") 1191 } 1192 if !strings.Contains(err.Error(), "valid http path") { 1193 t.Fatalf("err: %v", err) 1194 } 1195 1196 check2.Path = "http://www.example.com" 1197 err = check2.validate() 1198 if err == nil { 1199 t.Fatal("Expected an error") 1200 } 1201 if !strings.Contains(err.Error(), "relative http path") { 1202 t.Fatalf("err: %v", err) 1203 } 1204 } 1205 1206 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1207 // inherit address mode but do inherit ports. 1208 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1209 getTask := func(s *Service) *Task { 1210 return &Task{ 1211 Resources: &Resources{ 1212 Networks: []*NetworkResource{ 1213 { 1214 DynamicPorts: []Port{ 1215 { 1216 Label: "http", 1217 Value: 9999, 1218 }, 1219 }, 1220 }, 1221 }, 1222 }, 1223 Services: []*Service{s}, 1224 } 1225 } 1226 1227 cases := []struct { 1228 Service *Service 1229 ErrContains string 1230 }{ 1231 { 1232 Service: &Service{ 1233 Name: "invalid-driver", 1234 PortLabel: "80", 1235 AddressMode: "host", 1236 }, 1237 ErrContains: `port label "80" referenced`, 1238 }, 1239 { 1240 Service: &Service{ 1241 Name: "http-driver-fail-1", 1242 PortLabel: "80", 1243 AddressMode: "driver", 1244 Checks: []*ServiceCheck{ 1245 { 1246 Name: "invalid-check-1", 1247 Type: "tcp", 1248 Interval: time.Second, 1249 Timeout: time.Second, 1250 }, 1251 }, 1252 }, 1253 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1254 }, 1255 { 1256 Service: &Service{ 1257 Name: "http-driver-fail-2", 1258 PortLabel: "80", 1259 AddressMode: "driver", 1260 Checks: []*ServiceCheck{ 1261 { 1262 Name: "invalid-check-2", 1263 Type: "tcp", 1264 PortLabel: "80", 1265 Interval: time.Second, 1266 Timeout: time.Second, 1267 }, 1268 }, 1269 }, 1270 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1271 }, 1272 { 1273 Service: &Service{ 1274 Name: "http-driver-fail-3", 1275 PortLabel: "80", 1276 AddressMode: "driver", 1277 Checks: []*ServiceCheck{ 1278 { 1279 Name: "invalid-check-3", 1280 Type: "tcp", 1281 PortLabel: "missing-port-label", 1282 Interval: time.Second, 1283 Timeout: time.Second, 1284 }, 1285 }, 1286 }, 1287 ErrContains: `port label "missing-port-label" referenced`, 1288 }, 1289 { 1290 Service: &Service{ 1291 Name: "http-driver-passes", 1292 PortLabel: "80", 1293 AddressMode: "driver", 1294 Checks: []*ServiceCheck{ 1295 { 1296 Name: "valid-script-check", 1297 Type: "script", 1298 Command: "ok", 1299 Interval: time.Second, 1300 Timeout: time.Second, 1301 }, 1302 { 1303 Name: "valid-host-check", 1304 Type: "tcp", 1305 PortLabel: "http", 1306 Interval: time.Second, 1307 Timeout: time.Second, 1308 }, 1309 { 1310 Name: "valid-driver-check", 1311 Type: "tcp", 1312 AddressMode: "driver", 1313 Interval: time.Second, 1314 Timeout: time.Second, 1315 }, 1316 }, 1317 }, 1318 }, 1319 { 1320 Service: &Service{ 1321 Name: "empty-address-3673-passes-1", 1322 Checks: []*ServiceCheck{ 1323 { 1324 Name: "valid-port-label", 1325 Type: "tcp", 1326 PortLabel: "http", 1327 Interval: time.Second, 1328 Timeout: time.Second, 1329 }, 1330 { 1331 Name: "empty-is-ok", 1332 Type: "script", 1333 Command: "ok", 1334 Interval: time.Second, 1335 Timeout: time.Second, 1336 }, 1337 }, 1338 }, 1339 }, 1340 { 1341 Service: &Service{ 1342 Name: "empty-address-3673-passes-2", 1343 }, 1344 }, 1345 { 1346 Service: &Service{ 1347 Name: "empty-address-3673-fails", 1348 Checks: []*ServiceCheck{ 1349 { 1350 Name: "empty-is-not-ok", 1351 Type: "tcp", 1352 Interval: time.Second, 1353 Timeout: time.Second, 1354 }, 1355 }, 1356 }, 1357 ErrContains: `invalid: check requires a port but neither check nor service`, 1358 }, 1359 } 1360 1361 for _, tc := range cases { 1362 tc := tc 1363 task := getTask(tc.Service) 1364 t.Run(tc.Service.Name, func(t *testing.T) { 1365 err := validateServices(task) 1366 if err == nil && tc.ErrContains == "" { 1367 // Ok! 1368 return 1369 } 1370 if err == nil { 1371 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1372 } 1373 if !strings.Contains(err.Error(), tc.ErrContains) { 1374 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1375 } 1376 }) 1377 } 1378 } 1379 1380 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 1381 t.Parallel() 1382 // Bad (no port) 1383 invalidGRPC := &ServiceCheck{ 1384 Type: ServiceCheckGRPC, 1385 Interval: time.Second, 1386 Timeout: time.Second, 1387 } 1388 service := &Service{ 1389 Name: "test", 1390 Checks: []*ServiceCheck{invalidGRPC}, 1391 } 1392 1393 assert.Error(t, service.Validate()) 1394 1395 // Good 1396 service.Checks[0] = &ServiceCheck{ 1397 Type: ServiceCheckGRPC, 1398 Interval: time.Second, 1399 Timeout: time.Second, 1400 PortLabel: "some-port-label", 1401 } 1402 1403 assert.NoError(t, service.Validate()) 1404 } 1405 1406 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1407 t.Parallel() 1408 invalidCheckRestart := &CheckRestart{ 1409 Limit: -1, 1410 Grace: -1, 1411 } 1412 1413 err := invalidCheckRestart.Validate() 1414 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1415 assert.Len(t, err.(*multierror.Error).Errors, 2) 1416 1417 validCheckRestart := &CheckRestart{} 1418 assert.Nil(t, validCheckRestart.Validate()) 1419 1420 validCheckRestart.Limit = 1 1421 validCheckRestart.Grace = 1 1422 assert.Nil(t, validCheckRestart.Validate()) 1423 } 1424 1425 func TestTask_Validate_LogConfig(t *testing.T) { 1426 task := &Task{ 1427 LogConfig: DefaultLogConfig(), 1428 } 1429 ephemeralDisk := &EphemeralDisk{ 1430 SizeMB: 1, 1431 } 1432 1433 err := task.Validate(ephemeralDisk, JobTypeService) 1434 mErr := err.(*multierror.Error) 1435 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1436 t.Fatalf("err: %s", err) 1437 } 1438 } 1439 1440 func TestTask_Validate_Template(t *testing.T) { 1441 1442 bad := &Template{} 1443 task := &Task{ 1444 Templates: []*Template{bad}, 1445 } 1446 ephemeralDisk := &EphemeralDisk{ 1447 SizeMB: 1, 1448 } 1449 1450 err := task.Validate(ephemeralDisk, JobTypeService) 1451 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1452 t.Fatalf("err: %s", err) 1453 } 1454 1455 // Have two templates that share the same destination 1456 good := &Template{ 1457 SourcePath: "foo", 1458 DestPath: "local/foo", 1459 ChangeMode: "noop", 1460 } 1461 1462 task.Templates = []*Template{good, good} 1463 err = task.Validate(ephemeralDisk, JobTypeService) 1464 if !strings.Contains(err.Error(), "same destination as") { 1465 t.Fatalf("err: %s", err) 1466 } 1467 1468 // Env templates can't use signals 1469 task.Templates = []*Template{ 1470 { 1471 Envvars: true, 1472 ChangeMode: "signal", 1473 }, 1474 } 1475 1476 err = task.Validate(ephemeralDisk, JobTypeService) 1477 if err == nil { 1478 t.Fatalf("expected error from Template.Validate") 1479 } 1480 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1481 t.Errorf("expected to find %q but found %v", expected, err) 1482 } 1483 } 1484 1485 func TestTemplate_Validate(t *testing.T) { 1486 cases := []struct { 1487 Tmpl *Template 1488 Fail bool 1489 ContainsErrs []string 1490 }{ 1491 { 1492 Tmpl: &Template{}, 1493 Fail: true, 1494 ContainsErrs: []string{ 1495 "specify a source path", 1496 "specify a destination", 1497 TemplateChangeModeInvalidError.Error(), 1498 }, 1499 }, 1500 { 1501 Tmpl: &Template{ 1502 Splay: -100, 1503 }, 1504 Fail: true, 1505 ContainsErrs: []string{ 1506 "positive splay", 1507 }, 1508 }, 1509 { 1510 Tmpl: &Template{ 1511 ChangeMode: "foo", 1512 }, 1513 Fail: true, 1514 ContainsErrs: []string{ 1515 TemplateChangeModeInvalidError.Error(), 1516 }, 1517 }, 1518 { 1519 Tmpl: &Template{ 1520 ChangeMode: "signal", 1521 }, 1522 Fail: true, 1523 ContainsErrs: []string{ 1524 "specify signal value", 1525 }, 1526 }, 1527 { 1528 Tmpl: &Template{ 1529 SourcePath: "foo", 1530 DestPath: "../../root", 1531 ChangeMode: "noop", 1532 }, 1533 Fail: true, 1534 ContainsErrs: []string{ 1535 "destination escapes", 1536 }, 1537 }, 1538 { 1539 Tmpl: &Template{ 1540 SourcePath: "foo", 1541 DestPath: "local/foo", 1542 ChangeMode: "noop", 1543 }, 1544 Fail: false, 1545 }, 1546 { 1547 Tmpl: &Template{ 1548 SourcePath: "foo", 1549 DestPath: "local/foo", 1550 ChangeMode: "noop", 1551 Perms: "0444", 1552 }, 1553 Fail: false, 1554 }, 1555 { 1556 Tmpl: &Template{ 1557 SourcePath: "foo", 1558 DestPath: "local/foo", 1559 ChangeMode: "noop", 1560 Perms: "zza", 1561 }, 1562 Fail: true, 1563 ContainsErrs: []string{ 1564 "as octal", 1565 }, 1566 }, 1567 } 1568 1569 for i, c := range cases { 1570 err := c.Tmpl.Validate() 1571 if err != nil { 1572 if !c.Fail { 1573 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 1574 } 1575 1576 e := err.Error() 1577 for _, exp := range c.ContainsErrs { 1578 if !strings.Contains(e, exp) { 1579 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 1580 } 1581 } 1582 } else if c.Fail { 1583 t.Fatalf("Case %d: should have failed: %v", i+1, err) 1584 } 1585 } 1586 } 1587 1588 func TestConstraint_Validate(t *testing.T) { 1589 c := &Constraint{} 1590 err := c.Validate() 1591 mErr := err.(*multierror.Error) 1592 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 1593 t.Fatalf("err: %s", err) 1594 } 1595 1596 c = &Constraint{ 1597 LTarget: "$attr.kernel.name", 1598 RTarget: "linux", 1599 Operand: "=", 1600 } 1601 err = c.Validate() 1602 if err != nil { 1603 t.Fatalf("err: %v", err) 1604 } 1605 1606 // Perform additional regexp validation 1607 c.Operand = ConstraintRegex 1608 c.RTarget = "(foo" 1609 err = c.Validate() 1610 mErr = err.(*multierror.Error) 1611 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 1612 t.Fatalf("err: %s", err) 1613 } 1614 1615 // Perform version validation 1616 c.Operand = ConstraintVersion 1617 c.RTarget = "~> foo" 1618 err = c.Validate() 1619 mErr = err.(*multierror.Error) 1620 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 1621 t.Fatalf("err: %s", err) 1622 } 1623 1624 // Perform distinct_property validation 1625 c.Operand = ConstraintDistinctProperty 1626 c.RTarget = "0" 1627 err = c.Validate() 1628 mErr = err.(*multierror.Error) 1629 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 1630 t.Fatalf("err: %s", err) 1631 } 1632 1633 c.RTarget = "-1" 1634 err = c.Validate() 1635 mErr = err.(*multierror.Error) 1636 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 1637 t.Fatalf("err: %s", err) 1638 } 1639 1640 // Perform distinct_hosts validation 1641 c.Operand = ConstraintDistinctHosts 1642 c.LTarget = "" 1643 c.RTarget = "" 1644 if err := c.Validate(); err != nil { 1645 t.Fatalf("expected valid constraint: %v", err) 1646 } 1647 1648 // Perform set_contains* validation 1649 c.RTarget = "" 1650 for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} { 1651 c.Operand = o 1652 err = c.Validate() 1653 mErr = err.(*multierror.Error) 1654 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 1655 t.Fatalf("err: %s", err) 1656 } 1657 } 1658 1659 // Perform LTarget validation 1660 c.Operand = ConstraintRegex 1661 c.RTarget = "foo" 1662 c.LTarget = "" 1663 err = c.Validate() 1664 mErr = err.(*multierror.Error) 1665 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 1666 t.Fatalf("err: %s", err) 1667 } 1668 1669 // Perform constraint type validation 1670 c.Operand = "foo" 1671 err = c.Validate() 1672 mErr = err.(*multierror.Error) 1673 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 1674 t.Fatalf("err: %s", err) 1675 } 1676 } 1677 1678 func TestAffinity_Validate(t *testing.T) { 1679 1680 type tc struct { 1681 affinity *Affinity 1682 err error 1683 name string 1684 } 1685 1686 testCases := []tc{ 1687 { 1688 affinity: &Affinity{}, 1689 err: fmt.Errorf("Missing affinity operand"), 1690 }, 1691 { 1692 affinity: &Affinity{ 1693 Operand: "foo", 1694 LTarget: "${meta.node_class}", 1695 Weight: 10, 1696 }, 1697 err: fmt.Errorf("Unknown affinity operator \"foo\""), 1698 }, 1699 { 1700 affinity: &Affinity{ 1701 Operand: "=", 1702 LTarget: "${meta.node_class}", 1703 Weight: 10, 1704 }, 1705 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 1706 }, 1707 { 1708 affinity: &Affinity{ 1709 Operand: "=", 1710 LTarget: "${meta.node_class}", 1711 RTarget: "c4", 1712 Weight: 0, 1713 }, 1714 err: fmt.Errorf("Affinity weight cannot be zero"), 1715 }, 1716 { 1717 affinity: &Affinity{ 1718 Operand: "=", 1719 LTarget: "${meta.node_class}", 1720 RTarget: "c4", 1721 Weight: 110, 1722 }, 1723 err: fmt.Errorf("Affinity weight must be within the range [-100,100]"), 1724 }, 1725 { 1726 affinity: &Affinity{ 1727 Operand: "=", 1728 LTarget: "${node.class}", 1729 Weight: 10, 1730 }, 1731 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 1732 }, 1733 { 1734 affinity: &Affinity{ 1735 Operand: "version", 1736 LTarget: "${meta.os}", 1737 RTarget: ">>2.0", 1738 Weight: 110, 1739 }, 1740 err: fmt.Errorf("Version affinity is invalid"), 1741 }, 1742 { 1743 affinity: &Affinity{ 1744 Operand: "regexp", 1745 LTarget: "${meta.os}", 1746 RTarget: "\\K2.0", 1747 Weight: 100, 1748 }, 1749 err: fmt.Errorf("Regular expression failed to compile"), 1750 }, 1751 } 1752 1753 for _, tc := range testCases { 1754 t.Run(tc.name, func(t *testing.T) { 1755 err := tc.affinity.Validate() 1756 if tc.err != nil { 1757 require.NotNil(t, err) 1758 require.Contains(t, err.Error(), tc.err.Error()) 1759 } else { 1760 require.Nil(t, err) 1761 } 1762 }) 1763 } 1764 } 1765 1766 func TestUpdateStrategy_Validate(t *testing.T) { 1767 u := &UpdateStrategy{ 1768 MaxParallel: 0, 1769 HealthCheck: "foo", 1770 MinHealthyTime: -10, 1771 HealthyDeadline: -15, 1772 ProgressDeadline: -25, 1773 AutoRevert: false, 1774 Canary: -1, 1775 } 1776 1777 err := u.Validate() 1778 mErr := err.(*multierror.Error) 1779 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 1780 t.Fatalf("err: %s", err) 1781 } 1782 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") { 1783 t.Fatalf("err: %s", err) 1784 } 1785 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 1786 t.Fatalf("err: %s", err) 1787 } 1788 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 1789 t.Fatalf("err: %s", err) 1790 } 1791 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 1792 t.Fatalf("err: %s", err) 1793 } 1794 if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") { 1795 t.Fatalf("err: %s", err) 1796 } 1797 if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") { 1798 t.Fatalf("err: %s", err) 1799 } 1800 if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") { 1801 t.Fatalf("err: %s", err) 1802 } 1803 } 1804 1805 func TestResource_NetIndex(t *testing.T) { 1806 r := &Resources{ 1807 Networks: []*NetworkResource{ 1808 {Device: "eth0"}, 1809 {Device: "lo0"}, 1810 {Device: ""}, 1811 }, 1812 } 1813 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 1814 t.Fatalf("Bad: %d", idx) 1815 } 1816 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 1817 t.Fatalf("Bad: %d", idx) 1818 } 1819 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 1820 t.Fatalf("Bad: %d", idx) 1821 } 1822 } 1823 1824 func TestResource_Superset(t *testing.T) { 1825 r1 := &Resources{ 1826 CPU: 2000, 1827 MemoryMB: 2048, 1828 DiskMB: 10000, 1829 } 1830 r2 := &Resources{ 1831 CPU: 2000, 1832 MemoryMB: 1024, 1833 DiskMB: 5000, 1834 } 1835 1836 if s, _ := r1.Superset(r1); !s { 1837 t.Fatalf("bad") 1838 } 1839 if s, _ := r1.Superset(r2); !s { 1840 t.Fatalf("bad") 1841 } 1842 if s, _ := r2.Superset(r1); s { 1843 t.Fatalf("bad") 1844 } 1845 if s, _ := r2.Superset(r2); !s { 1846 t.Fatalf("bad") 1847 } 1848 } 1849 1850 func TestResource_Add(t *testing.T) { 1851 r1 := &Resources{ 1852 CPU: 2000, 1853 MemoryMB: 2048, 1854 DiskMB: 10000, 1855 Networks: []*NetworkResource{ 1856 { 1857 CIDR: "10.0.0.0/8", 1858 MBits: 100, 1859 ReservedPorts: []Port{{"ssh", 22}}, 1860 }, 1861 }, 1862 } 1863 r2 := &Resources{ 1864 CPU: 2000, 1865 MemoryMB: 1024, 1866 DiskMB: 5000, 1867 Networks: []*NetworkResource{ 1868 { 1869 IP: "10.0.0.1", 1870 MBits: 50, 1871 ReservedPorts: []Port{{"web", 80}}, 1872 }, 1873 }, 1874 } 1875 1876 err := r1.Add(r2) 1877 if err != nil { 1878 t.Fatalf("Err: %v", err) 1879 } 1880 1881 expect := &Resources{ 1882 CPU: 3000, 1883 MemoryMB: 3072, 1884 DiskMB: 15000, 1885 Networks: []*NetworkResource{ 1886 { 1887 CIDR: "10.0.0.0/8", 1888 MBits: 150, 1889 ReservedPorts: []Port{{"ssh", 22}, {"web", 80}}, 1890 }, 1891 }, 1892 } 1893 1894 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1895 t.Fatalf("bad: %#v %#v", expect, r1) 1896 } 1897 } 1898 1899 func TestResource_Add_Network(t *testing.T) { 1900 r1 := &Resources{} 1901 r2 := &Resources{ 1902 Networks: []*NetworkResource{ 1903 { 1904 MBits: 50, 1905 DynamicPorts: []Port{{"http", 0}, {"https", 0}}, 1906 }, 1907 }, 1908 } 1909 r3 := &Resources{ 1910 Networks: []*NetworkResource{ 1911 { 1912 MBits: 25, 1913 DynamicPorts: []Port{{"admin", 0}}, 1914 }, 1915 }, 1916 } 1917 1918 err := r1.Add(r2) 1919 if err != nil { 1920 t.Fatalf("Err: %v", err) 1921 } 1922 err = r1.Add(r3) 1923 if err != nil { 1924 t.Fatalf("Err: %v", err) 1925 } 1926 1927 expect := &Resources{ 1928 Networks: []*NetworkResource{ 1929 { 1930 MBits: 75, 1931 DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}}, 1932 }, 1933 }, 1934 } 1935 1936 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1937 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 1938 } 1939 } 1940 1941 func TestComparableResources_Subtract(t *testing.T) { 1942 r1 := &ComparableResources{ 1943 Flattened: AllocatedTaskResources{ 1944 Cpu: AllocatedCpuResources{ 1945 CpuShares: 2000, 1946 }, 1947 Memory: AllocatedMemoryResources{ 1948 MemoryMB: 2048, 1949 }, 1950 Networks: []*NetworkResource{ 1951 { 1952 CIDR: "10.0.0.0/8", 1953 MBits: 100, 1954 ReservedPorts: []Port{{"ssh", 22}}, 1955 }, 1956 }, 1957 }, 1958 Shared: AllocatedSharedResources{ 1959 DiskMB: 10000, 1960 }, 1961 } 1962 1963 r2 := &ComparableResources{ 1964 Flattened: AllocatedTaskResources{ 1965 Cpu: AllocatedCpuResources{ 1966 CpuShares: 1000, 1967 }, 1968 Memory: AllocatedMemoryResources{ 1969 MemoryMB: 1024, 1970 }, 1971 Networks: []*NetworkResource{ 1972 { 1973 CIDR: "10.0.0.0/8", 1974 MBits: 20, 1975 ReservedPorts: []Port{{"ssh", 22}}, 1976 }, 1977 }, 1978 }, 1979 Shared: AllocatedSharedResources{ 1980 DiskMB: 5000, 1981 }, 1982 } 1983 r1.Subtract(r2) 1984 1985 expect := &ComparableResources{ 1986 Flattened: AllocatedTaskResources{ 1987 Cpu: AllocatedCpuResources{ 1988 CpuShares: 1000, 1989 }, 1990 Memory: AllocatedMemoryResources{ 1991 MemoryMB: 1024, 1992 }, 1993 Networks: []*NetworkResource{ 1994 { 1995 CIDR: "10.0.0.0/8", 1996 MBits: 100, 1997 ReservedPorts: []Port{{"ssh", 22}}, 1998 }, 1999 }, 2000 }, 2001 Shared: AllocatedSharedResources{ 2002 DiskMB: 5000, 2003 }, 2004 } 2005 2006 require := require.New(t) 2007 require.Equal(expect, r1) 2008 } 2009 2010 func TestEncodeDecode(t *testing.T) { 2011 type FooRequest struct { 2012 Foo string 2013 Bar int 2014 Baz bool 2015 } 2016 arg := &FooRequest{ 2017 Foo: "test", 2018 Bar: 42, 2019 Baz: true, 2020 } 2021 buf, err := Encode(1, arg) 2022 if err != nil { 2023 t.Fatalf("err: %v", err) 2024 } 2025 2026 var out FooRequest 2027 err = Decode(buf[1:], &out) 2028 if err != nil { 2029 t.Fatalf("err: %v", err) 2030 } 2031 2032 if !reflect.DeepEqual(arg, &out) { 2033 t.Fatalf("bad: %#v %#v", arg, out) 2034 } 2035 } 2036 2037 func BenchmarkEncodeDecode(b *testing.B) { 2038 job := testJob() 2039 2040 for i := 0; i < b.N; i++ { 2041 buf, err := Encode(1, job) 2042 if err != nil { 2043 b.Fatalf("err: %v", err) 2044 } 2045 2046 var out Job 2047 err = Decode(buf[1:], &out) 2048 if err != nil { 2049 b.Fatalf("err: %v", err) 2050 } 2051 } 2052 } 2053 2054 func TestInvalidServiceCheck(t *testing.T) { 2055 s := Service{ 2056 Name: "service-name", 2057 PortLabel: "bar", 2058 Checks: []*ServiceCheck{ 2059 { 2060 Name: "check-name", 2061 Type: "lol", 2062 }, 2063 }, 2064 } 2065 if err := s.Validate(); err == nil { 2066 t.Fatalf("Service should be invalid (invalid type)") 2067 } 2068 2069 s = Service{ 2070 Name: "service.name", 2071 PortLabel: "bar", 2072 } 2073 if err := s.ValidateName(s.Name); err == nil { 2074 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2075 } 2076 2077 s = Service{ 2078 Name: "-my-service", 2079 PortLabel: "bar", 2080 } 2081 if err := s.Validate(); err == nil { 2082 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2083 } 2084 2085 s = Service{ 2086 Name: "my-service-${NOMAD_META_FOO}", 2087 PortLabel: "bar", 2088 } 2089 if err := s.Validate(); err != nil { 2090 t.Fatalf("Service should be valid: %v", err) 2091 } 2092 2093 s = Service{ 2094 Name: "my_service-${NOMAD_META_FOO}", 2095 PortLabel: "bar", 2096 } 2097 if err := s.Validate(); err == nil { 2098 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2099 } 2100 2101 s = Service{ 2102 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2103 PortLabel: "bar", 2104 } 2105 if err := s.ValidateName(s.Name); err == nil { 2106 t.Fatalf("Service should be invalid (too long): %v", err) 2107 } 2108 2109 s = Service{ 2110 Name: "service-name", 2111 Checks: []*ServiceCheck{ 2112 { 2113 Name: "check-tcp", 2114 Type: ServiceCheckTCP, 2115 Interval: 5 * time.Second, 2116 Timeout: 2 * time.Second, 2117 }, 2118 { 2119 Name: "check-http", 2120 Type: ServiceCheckHTTP, 2121 Path: "/foo", 2122 Interval: 5 * time.Second, 2123 Timeout: 2 * time.Second, 2124 }, 2125 }, 2126 } 2127 if err := s.Validate(); err == nil { 2128 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2129 } 2130 2131 s = Service{ 2132 Name: "service-name", 2133 Checks: []*ServiceCheck{ 2134 { 2135 Name: "check-script", 2136 Type: ServiceCheckScript, 2137 Command: "/bin/date", 2138 Interval: 5 * time.Second, 2139 Timeout: 2 * time.Second, 2140 }, 2141 }, 2142 } 2143 if err := s.Validate(); err != nil { 2144 t.Fatalf("un-expected error: %v", err) 2145 } 2146 } 2147 2148 func TestDistinctCheckID(t *testing.T) { 2149 c1 := ServiceCheck{ 2150 Name: "web-health", 2151 Type: "http", 2152 Path: "/health", 2153 Interval: 2 * time.Second, 2154 Timeout: 3 * time.Second, 2155 } 2156 c2 := ServiceCheck{ 2157 Name: "web-health", 2158 Type: "http", 2159 Path: "/health1", 2160 Interval: 2 * time.Second, 2161 Timeout: 3 * time.Second, 2162 } 2163 2164 c3 := ServiceCheck{ 2165 Name: "web-health", 2166 Type: "http", 2167 Path: "/health", 2168 Interval: 4 * time.Second, 2169 Timeout: 3 * time.Second, 2170 } 2171 serviceID := "123" 2172 c1Hash := c1.Hash(serviceID) 2173 c2Hash := c2.Hash(serviceID) 2174 c3Hash := c3.Hash(serviceID) 2175 2176 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2177 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2178 } 2179 2180 } 2181 2182 func TestService_Canonicalize(t *testing.T) { 2183 job := "example" 2184 taskGroup := "cache" 2185 task := "redis" 2186 2187 s := Service{ 2188 Name: "${TASK}-db", 2189 } 2190 2191 s.Canonicalize(job, taskGroup, task) 2192 if s.Name != "redis-db" { 2193 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2194 } 2195 2196 s.Name = "db" 2197 s.Canonicalize(job, taskGroup, task) 2198 if s.Name != "db" { 2199 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2200 } 2201 2202 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2203 s.Canonicalize(job, taskGroup, task) 2204 if s.Name != "example-cache-redis-db" { 2205 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2206 } 2207 2208 s.Name = "${BASE}-db" 2209 s.Canonicalize(job, taskGroup, task) 2210 if s.Name != "example-cache-redis-db" { 2211 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2212 } 2213 2214 } 2215 2216 func TestJob_ExpandServiceNames(t *testing.T) { 2217 j := &Job{ 2218 Name: "my-job", 2219 TaskGroups: []*TaskGroup{ 2220 { 2221 Name: "web", 2222 Tasks: []*Task{ 2223 { 2224 Name: "frontend", 2225 Services: []*Service{ 2226 { 2227 Name: "${BASE}-default", 2228 }, 2229 { 2230 Name: "jmx", 2231 }, 2232 }, 2233 }, 2234 }, 2235 }, 2236 { 2237 Name: "admin", 2238 Tasks: []*Task{ 2239 { 2240 Name: "admin-web", 2241 }, 2242 }, 2243 }, 2244 }, 2245 } 2246 2247 j.Canonicalize() 2248 2249 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 2250 if service1Name != "my-job-web-frontend-default" { 2251 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 2252 } 2253 2254 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 2255 if service2Name != "jmx" { 2256 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 2257 } 2258 2259 } 2260 2261 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 2262 // Create a config that is enabled but with no interval specified. 2263 p := &PeriodicConfig{Enabled: true} 2264 if err := p.Validate(); err == nil { 2265 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2266 } 2267 2268 // Create a config that is enabled, with a spec but no type specified. 2269 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2270 if err := p.Validate(); err == nil { 2271 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2272 } 2273 2274 // Create a config that is enabled, with a spec type but no spec specified. 2275 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2276 if err := p.Validate(); err == nil { 2277 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2278 } 2279 2280 // Create a config that is enabled, with a bad time zone. 2281 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2282 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2283 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2284 } 2285 } 2286 2287 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2288 specs := []string{"foo", "* *", "@foo"} 2289 for _, spec := range specs { 2290 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2291 p.Canonicalize() 2292 if err := p.Validate(); err == nil { 2293 t.Fatal("Invalid cron spec") 2294 } 2295 } 2296 } 2297 2298 func TestPeriodicConfig_ValidCron(t *testing.T) { 2299 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2300 for _, spec := range specs { 2301 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2302 p.Canonicalize() 2303 if err := p.Validate(); err != nil { 2304 t.Fatal("Passed valid cron") 2305 } 2306 } 2307 } 2308 2309 func TestPeriodicConfig_NextCron(t *testing.T) { 2310 require := require.New(t) 2311 2312 type testExpectation struct { 2313 Time time.Time 2314 HasError bool 2315 ErrorMsg string 2316 } 2317 2318 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2319 specs := []string{"0 0 29 2 * 1980", 2320 "*/5 * * * *", 2321 "1 15-0 * * 1-5"} 2322 expected := []*testExpectation{ 2323 { 2324 Time: time.Time{}, 2325 HasError: false, 2326 }, 2327 { 2328 Time: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2329 HasError: false, 2330 }, 2331 { 2332 Time: time.Time{}, 2333 HasError: true, 2334 ErrorMsg: "failed parsing cron expression", 2335 }, 2336 } 2337 2338 for i, spec := range specs { 2339 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2340 p.Canonicalize() 2341 n, err := p.Next(from) 2342 nextExpected := expected[i] 2343 2344 require.Equal(nextExpected.Time, n) 2345 require.Equal(err != nil, nextExpected.HasError) 2346 if err != nil { 2347 require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg)) 2348 } 2349 } 2350 } 2351 2352 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2353 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2354 for _, zone := range zones { 2355 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2356 p.Canonicalize() 2357 if err := p.Validate(); err != nil { 2358 t.Fatalf("Valid tz errored: %v", err) 2359 } 2360 } 2361 } 2362 2363 func TestPeriodicConfig_DST(t *testing.T) { 2364 require := require.New(t) 2365 2366 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2367 p := &PeriodicConfig{ 2368 Enabled: true, 2369 SpecType: PeriodicSpecCron, 2370 Spec: "0 2 11-12 3 * 2017", 2371 TimeZone: "America/Los_Angeles", 2372 } 2373 p.Canonicalize() 2374 2375 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2376 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2377 2378 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2379 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2380 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2381 2382 n1, err := p.Next(t1) 2383 require.Nil(err) 2384 2385 n2, err := p.Next(t2) 2386 require.Nil(err) 2387 2388 require.Equal(e1, n1.UTC()) 2389 require.Equal(e2, n2.UTC()) 2390 } 2391 2392 func TestRestartPolicy_Validate(t *testing.T) { 2393 // Policy with acceptable restart options passes 2394 p := &RestartPolicy{ 2395 Mode: RestartPolicyModeFail, 2396 Attempts: 0, 2397 Interval: 5 * time.Second, 2398 } 2399 if err := p.Validate(); err != nil { 2400 t.Fatalf("err: %v", err) 2401 } 2402 2403 // Policy with ambiguous restart options fails 2404 p = &RestartPolicy{ 2405 Mode: RestartPolicyModeDelay, 2406 Attempts: 0, 2407 Interval: 5 * time.Second, 2408 } 2409 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 2410 t.Fatalf("expect ambiguity error, got: %v", err) 2411 } 2412 2413 // Bad policy mode fails 2414 p = &RestartPolicy{ 2415 Mode: "nope", 2416 Attempts: 1, 2417 Interval: 5 * time.Second, 2418 } 2419 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 2420 t.Fatalf("expect mode error, got: %v", err) 2421 } 2422 2423 // Fails when attempts*delay does not fit inside interval 2424 p = &RestartPolicy{ 2425 Mode: RestartPolicyModeDelay, 2426 Attempts: 3, 2427 Delay: 5 * time.Second, 2428 Interval: 5 * time.Second, 2429 } 2430 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 2431 t.Fatalf("expect restart interval error, got: %v", err) 2432 } 2433 2434 // Fails when interval is to small 2435 p = &RestartPolicy{ 2436 Mode: RestartPolicyModeDelay, 2437 Attempts: 3, 2438 Delay: 5 * time.Second, 2439 Interval: 2 * time.Second, 2440 } 2441 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 2442 t.Fatalf("expect interval too small error, got: %v", err) 2443 } 2444 } 2445 2446 func TestReschedulePolicy_Validate(t *testing.T) { 2447 type testCase struct { 2448 desc string 2449 ReschedulePolicy *ReschedulePolicy 2450 errors []error 2451 } 2452 2453 testCases := []testCase{ 2454 { 2455 desc: "Nil", 2456 }, 2457 { 2458 desc: "Disabled", 2459 ReschedulePolicy: &ReschedulePolicy{ 2460 Attempts: 0, 2461 Interval: 0 * time.Second}, 2462 }, 2463 { 2464 desc: "Disabled", 2465 ReschedulePolicy: &ReschedulePolicy{ 2466 Attempts: -1, 2467 Interval: 5 * time.Minute}, 2468 }, 2469 { 2470 desc: "Valid Linear Delay", 2471 ReschedulePolicy: &ReschedulePolicy{ 2472 Attempts: 1, 2473 Interval: 5 * time.Minute, 2474 Delay: 10 * time.Second, 2475 DelayFunction: "constant"}, 2476 }, 2477 { 2478 desc: "Valid Exponential Delay", 2479 ReschedulePolicy: &ReschedulePolicy{ 2480 Attempts: 5, 2481 Interval: 1 * time.Hour, 2482 Delay: 30 * time.Second, 2483 MaxDelay: 5 * time.Minute, 2484 DelayFunction: "exponential"}, 2485 }, 2486 { 2487 desc: "Valid Fibonacci Delay", 2488 ReschedulePolicy: &ReschedulePolicy{ 2489 Attempts: 5, 2490 Interval: 15 * time.Minute, 2491 Delay: 10 * time.Second, 2492 MaxDelay: 5 * time.Minute, 2493 DelayFunction: "fibonacci"}, 2494 }, 2495 { 2496 desc: "Invalid delay function", 2497 ReschedulePolicy: &ReschedulePolicy{ 2498 Attempts: 1, 2499 Interval: 1 * time.Second, 2500 DelayFunction: "blah"}, 2501 errors: []error{ 2502 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2503 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2504 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 2505 }, 2506 }, 2507 { 2508 desc: "Invalid delay ceiling", 2509 ReschedulePolicy: &ReschedulePolicy{ 2510 Attempts: 1, 2511 Interval: 8 * time.Second, 2512 DelayFunction: "exponential", 2513 Delay: 15 * time.Second, 2514 MaxDelay: 5 * time.Second}, 2515 errors: []error{ 2516 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 2517 15*time.Second, 5*time.Second), 2518 }, 2519 }, 2520 { 2521 desc: "Invalid delay and interval", 2522 ReschedulePolicy: &ReschedulePolicy{ 2523 Attempts: 1, 2524 Interval: 1 * time.Second, 2525 DelayFunction: "constant"}, 2526 errors: []error{ 2527 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2528 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2529 }, 2530 }, { 2531 // Should suggest 2h40m as the interval 2532 desc: "Invalid Attempts - linear delay", 2533 ReschedulePolicy: &ReschedulePolicy{ 2534 Attempts: 10, 2535 Interval: 1 * time.Hour, 2536 Delay: 20 * time.Minute, 2537 DelayFunction: "constant", 2538 }, 2539 errors: []error{ 2540 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 2541 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 2542 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2543 200*time.Minute, 10), 2544 }, 2545 }, 2546 { 2547 // Should suggest 4h40m as the interval 2548 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 2549 desc: "Invalid Attempts - exponential delay", 2550 ReschedulePolicy: &ReschedulePolicy{ 2551 Attempts: 10, 2552 Interval: 30 * time.Minute, 2553 Delay: 5 * time.Minute, 2554 MaxDelay: 40 * time.Minute, 2555 DelayFunction: "exponential", 2556 }, 2557 errors: []error{ 2558 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2559 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 2560 "exponential", 40*time.Minute), 2561 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2562 280*time.Minute, 10), 2563 }, 2564 }, 2565 { 2566 // Should suggest 8h as the interval 2567 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 2568 desc: "Invalid Attempts - fibonacci delay", 2569 ReschedulePolicy: &ReschedulePolicy{ 2570 Attempts: 10, 2571 Interval: 1 * time.Hour, 2572 Delay: 20 * time.Minute, 2573 MaxDelay: 80 * time.Minute, 2574 DelayFunction: "fibonacci", 2575 }, 2576 errors: []error{ 2577 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2578 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 2579 "fibonacci", 80*time.Minute), 2580 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2581 480*time.Minute, 10), 2582 }, 2583 }, 2584 { 2585 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 2586 ReschedulePolicy: &ReschedulePolicy{ 2587 Attempts: 1, 2588 Unlimited: true, 2589 DelayFunction: "exponential", 2590 Delay: 5 * time.Minute, 2591 MaxDelay: 1 * time.Hour, 2592 }, 2593 errors: []error{ 2594 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 2595 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 2596 }, 2597 }, 2598 { 2599 desc: "Invalid Unlimited config", 2600 ReschedulePolicy: &ReschedulePolicy{ 2601 Attempts: 1, 2602 Interval: 1 * time.Second, 2603 Unlimited: true, 2604 DelayFunction: "exponential", 2605 }, 2606 errors: []error{ 2607 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2608 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2609 }, 2610 }, 2611 { 2612 desc: "Valid Unlimited config", 2613 ReschedulePolicy: &ReschedulePolicy{ 2614 Unlimited: true, 2615 DelayFunction: "exponential", 2616 Delay: 5 * time.Second, 2617 MaxDelay: 1 * time.Hour, 2618 }, 2619 }, 2620 } 2621 2622 for _, tc := range testCases { 2623 t.Run(tc.desc, func(t *testing.T) { 2624 require := require.New(t) 2625 gotErr := tc.ReschedulePolicy.Validate() 2626 if tc.errors != nil { 2627 // Validate all errors 2628 for _, err := range tc.errors { 2629 require.Contains(gotErr.Error(), err.Error()) 2630 } 2631 } else { 2632 require.Nil(gotErr) 2633 } 2634 }) 2635 } 2636 } 2637 2638 func TestAllocation_Index(t *testing.T) { 2639 a1 := Allocation{ 2640 Name: "example.cache[1]", 2641 TaskGroup: "cache", 2642 JobID: "example", 2643 Job: &Job{ 2644 ID: "example", 2645 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 2646 } 2647 e1 := uint(1) 2648 a2 := a1.Copy() 2649 a2.Name = "example.cache[713127]" 2650 e2 := uint(713127) 2651 2652 if a1.Index() != e1 || a2.Index() != e2 { 2653 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 2654 } 2655 } 2656 2657 func TestTaskArtifact_Validate_Source(t *testing.T) { 2658 valid := &TaskArtifact{GetterSource: "google.com"} 2659 if err := valid.Validate(); err != nil { 2660 t.Fatalf("unexpected error: %v", err) 2661 } 2662 } 2663 2664 func TestTaskArtifact_Validate_Dest(t *testing.T) { 2665 valid := &TaskArtifact{GetterSource: "google.com"} 2666 if err := valid.Validate(); err != nil { 2667 t.Fatalf("unexpected error: %v", err) 2668 } 2669 2670 valid.RelativeDest = "local/" 2671 if err := valid.Validate(); err != nil { 2672 t.Fatalf("unexpected error: %v", err) 2673 } 2674 2675 valid.RelativeDest = "local/.." 2676 if err := valid.Validate(); err != nil { 2677 t.Fatalf("unexpected error: %v", err) 2678 } 2679 2680 valid.RelativeDest = "local/../../.." 2681 if err := valid.Validate(); err == nil { 2682 t.Fatalf("expected error: %v", err) 2683 } 2684 } 2685 2686 // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the 2687 // fields change. 2688 func TestTaskArtifact_Hash(t *testing.T) { 2689 t.Parallel() 2690 2691 cases := []TaskArtifact{ 2692 {}, 2693 { 2694 GetterSource: "a", 2695 }, 2696 { 2697 GetterSource: "b", 2698 }, 2699 { 2700 GetterSource: "b", 2701 GetterOptions: map[string]string{"c": "c"}, 2702 }, 2703 { 2704 GetterSource: "b", 2705 GetterOptions: map[string]string{ 2706 "c": "c", 2707 "d": "d", 2708 }, 2709 }, 2710 { 2711 GetterSource: "b", 2712 GetterOptions: map[string]string{ 2713 "c": "c", 2714 "d": "e", 2715 }, 2716 }, 2717 { 2718 GetterSource: "b", 2719 GetterOptions: map[string]string{ 2720 "c": "c", 2721 "d": "e", 2722 }, 2723 GetterMode: "f", 2724 }, 2725 { 2726 GetterSource: "b", 2727 GetterOptions: map[string]string{ 2728 "c": "c", 2729 "d": "e", 2730 }, 2731 GetterMode: "g", 2732 }, 2733 { 2734 GetterSource: "b", 2735 GetterOptions: map[string]string{ 2736 "c": "c", 2737 "d": "e", 2738 }, 2739 GetterMode: "g", 2740 RelativeDest: "h", 2741 }, 2742 { 2743 GetterSource: "b", 2744 GetterOptions: map[string]string{ 2745 "c": "c", 2746 "d": "e", 2747 }, 2748 GetterMode: "g", 2749 RelativeDest: "i", 2750 }, 2751 } 2752 2753 // Map of hash to source 2754 hashes := make(map[string]TaskArtifact, len(cases)) 2755 for _, tc := range cases { 2756 h := tc.Hash() 2757 2758 // Hash should be deterministic 2759 require.Equal(t, h, tc.Hash()) 2760 2761 // Hash should be unique 2762 if orig, ok := hashes[h]; ok { 2763 require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n", 2764 pretty.Sprint(tc), pretty.Sprint(orig), 2765 ) 2766 } 2767 hashes[h] = tc 2768 } 2769 2770 require.Len(t, hashes, len(cases)) 2771 } 2772 2773 func TestAllocation_ShouldMigrate(t *testing.T) { 2774 alloc := Allocation{ 2775 PreviousAllocation: "123", 2776 TaskGroup: "foo", 2777 Job: &Job{ 2778 TaskGroups: []*TaskGroup{ 2779 { 2780 Name: "foo", 2781 EphemeralDisk: &EphemeralDisk{ 2782 Migrate: true, 2783 Sticky: true, 2784 }, 2785 }, 2786 }, 2787 }, 2788 } 2789 2790 if !alloc.ShouldMigrate() { 2791 t.Fatalf("bad: %v", alloc) 2792 } 2793 2794 alloc1 := Allocation{ 2795 PreviousAllocation: "123", 2796 TaskGroup: "foo", 2797 Job: &Job{ 2798 TaskGroups: []*TaskGroup{ 2799 { 2800 Name: "foo", 2801 EphemeralDisk: &EphemeralDisk{}, 2802 }, 2803 }, 2804 }, 2805 } 2806 2807 if alloc1.ShouldMigrate() { 2808 t.Fatalf("bad: %v", alloc) 2809 } 2810 2811 alloc2 := Allocation{ 2812 PreviousAllocation: "123", 2813 TaskGroup: "foo", 2814 Job: &Job{ 2815 TaskGroups: []*TaskGroup{ 2816 { 2817 Name: "foo", 2818 EphemeralDisk: &EphemeralDisk{ 2819 Sticky: false, 2820 Migrate: true, 2821 }, 2822 }, 2823 }, 2824 }, 2825 } 2826 2827 if alloc2.ShouldMigrate() { 2828 t.Fatalf("bad: %v", alloc) 2829 } 2830 2831 alloc3 := Allocation{ 2832 PreviousAllocation: "123", 2833 TaskGroup: "foo", 2834 Job: &Job{ 2835 TaskGroups: []*TaskGroup{ 2836 { 2837 Name: "foo", 2838 }, 2839 }, 2840 }, 2841 } 2842 2843 if alloc3.ShouldMigrate() { 2844 t.Fatalf("bad: %v", alloc) 2845 } 2846 2847 // No previous 2848 alloc4 := Allocation{ 2849 TaskGroup: "foo", 2850 Job: &Job{ 2851 TaskGroups: []*TaskGroup{ 2852 { 2853 Name: "foo", 2854 EphemeralDisk: &EphemeralDisk{ 2855 Migrate: true, 2856 Sticky: true, 2857 }, 2858 }, 2859 }, 2860 }, 2861 } 2862 2863 if alloc4.ShouldMigrate() { 2864 t.Fatalf("bad: %v", alloc4) 2865 } 2866 } 2867 2868 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 2869 cases := []struct { 2870 Input *TaskArtifact 2871 Err bool 2872 }{ 2873 { 2874 &TaskArtifact{ 2875 GetterSource: "foo.com", 2876 GetterOptions: map[string]string{ 2877 "checksum": "no-type", 2878 }, 2879 }, 2880 true, 2881 }, 2882 { 2883 &TaskArtifact{ 2884 GetterSource: "foo.com", 2885 GetterOptions: map[string]string{ 2886 "checksum": "md5:toosmall", 2887 }, 2888 }, 2889 true, 2890 }, 2891 { 2892 &TaskArtifact{ 2893 GetterSource: "foo.com", 2894 GetterOptions: map[string]string{ 2895 "checksum": "invalid:type", 2896 }, 2897 }, 2898 true, 2899 }, 2900 { 2901 &TaskArtifact{ 2902 GetterSource: "foo.com", 2903 GetterOptions: map[string]string{ 2904 "checksum": "md5:${ARTIFACT_CHECKSUM}", 2905 }, 2906 }, 2907 false, 2908 }, 2909 } 2910 2911 for i, tc := range cases { 2912 err := tc.Input.Validate() 2913 if (err != nil) != tc.Err { 2914 t.Fatalf("case %d: %v", i, err) 2915 continue 2916 } 2917 } 2918 } 2919 2920 func TestPlan_NormalizeAllocations(t *testing.T) { 2921 t.Parallel() 2922 plan := &Plan{ 2923 NodeUpdate: make(map[string][]*Allocation), 2924 NodePreemptions: make(map[string][]*Allocation), 2925 } 2926 stoppedAlloc := MockAlloc() 2927 desiredDesc := "Desired desc" 2928 plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost) 2929 preemptedAlloc := MockAlloc() 2930 preemptingAllocID := uuid.Generate() 2931 plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID) 2932 2933 plan.NormalizeAllocations() 2934 2935 actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0] 2936 expectedStoppedAlloc := &Allocation{ 2937 ID: stoppedAlloc.ID, 2938 DesiredDescription: desiredDesc, 2939 ClientStatus: AllocClientStatusLost, 2940 } 2941 assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc) 2942 actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0] 2943 expectedPreemptedAlloc := &Allocation{ 2944 ID: preemptedAlloc.ID, 2945 PreemptedByAllocation: preemptingAllocID, 2946 } 2947 assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc) 2948 } 2949 2950 func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 2951 t.Parallel() 2952 plan := &Plan{ 2953 NodeUpdate: make(map[string][]*Allocation), 2954 } 2955 alloc := MockAlloc() 2956 desiredDesc := "Desired desc" 2957 2958 plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost) 2959 2960 appendedAlloc := plan.NodeUpdate[alloc.NodeID][0] 2961 expectedAlloc := new(Allocation) 2962 *expectedAlloc = *alloc 2963 expectedAlloc.DesiredDescription = desiredDesc 2964 expectedAlloc.DesiredStatus = AllocDesiredStatusStop 2965 expectedAlloc.ClientStatus = AllocClientStatusLost 2966 expectedAlloc.Job = nil 2967 assert.Equal(t, expectedAlloc, appendedAlloc) 2968 assert.Equal(t, alloc.Job, plan.Job) 2969 } 2970 2971 func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 2972 t.Parallel() 2973 plan := &Plan{ 2974 NodePreemptions: make(map[string][]*Allocation), 2975 } 2976 alloc := MockAlloc() 2977 preemptingAllocID := uuid.Generate() 2978 2979 plan.AppendPreemptedAlloc(alloc, preemptingAllocID) 2980 2981 appendedAlloc := plan.NodePreemptions[alloc.NodeID][0] 2982 expectedAlloc := &Allocation{ 2983 ID: alloc.ID, 2984 PreemptedByAllocation: preemptingAllocID, 2985 JobID: alloc.JobID, 2986 Namespace: alloc.Namespace, 2987 DesiredStatus: AllocDesiredStatusEvict, 2988 DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID), 2989 AllocatedResources: alloc.AllocatedResources, 2990 TaskResources: alloc.TaskResources, 2991 SharedResources: alloc.SharedResources, 2992 } 2993 assert.Equal(t, expectedAlloc, appendedAlloc) 2994 } 2995 2996 func TestAllocation_MsgPackTags(t *testing.T) { 2997 t.Parallel() 2998 planType := reflect.TypeOf(Allocation{}) 2999 3000 msgPackTags, _ := planType.FieldByName("_struct") 3001 3002 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3003 } 3004 3005 func TestEvaluation_MsgPackTags(t *testing.T) { 3006 t.Parallel() 3007 planType := reflect.TypeOf(Evaluation{}) 3008 3009 msgPackTags, _ := planType.FieldByName("_struct") 3010 3011 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3012 } 3013 3014 func TestAllocation_Terminated(t *testing.T) { 3015 type desiredState struct { 3016 ClientStatus string 3017 DesiredStatus string 3018 Terminated bool 3019 } 3020 3021 harness := []desiredState{ 3022 { 3023 ClientStatus: AllocClientStatusPending, 3024 DesiredStatus: AllocDesiredStatusStop, 3025 Terminated: false, 3026 }, 3027 { 3028 ClientStatus: AllocClientStatusRunning, 3029 DesiredStatus: AllocDesiredStatusStop, 3030 Terminated: false, 3031 }, 3032 { 3033 ClientStatus: AllocClientStatusFailed, 3034 DesiredStatus: AllocDesiredStatusStop, 3035 Terminated: true, 3036 }, 3037 { 3038 ClientStatus: AllocClientStatusFailed, 3039 DesiredStatus: AllocDesiredStatusRun, 3040 Terminated: true, 3041 }, 3042 } 3043 3044 for _, state := range harness { 3045 alloc := Allocation{} 3046 alloc.DesiredStatus = state.DesiredStatus 3047 alloc.ClientStatus = state.ClientStatus 3048 if alloc.Terminated() != state.Terminated { 3049 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 3050 } 3051 } 3052 } 3053 3054 func TestAllocation_ShouldReschedule(t *testing.T) { 3055 type testCase struct { 3056 Desc string 3057 FailTime time.Time 3058 ClientStatus string 3059 DesiredStatus string 3060 ReschedulePolicy *ReschedulePolicy 3061 RescheduleTrackers []*RescheduleEvent 3062 ShouldReschedule bool 3063 } 3064 3065 fail := time.Now() 3066 3067 harness := []testCase{ 3068 { 3069 Desc: "Reschedule when desired state is stop", 3070 ClientStatus: AllocClientStatusPending, 3071 DesiredStatus: AllocDesiredStatusStop, 3072 FailTime: fail, 3073 ReschedulePolicy: nil, 3074 ShouldReschedule: false, 3075 }, 3076 { 3077 Desc: "Disabled rescheduling", 3078 ClientStatus: AllocClientStatusFailed, 3079 DesiredStatus: AllocDesiredStatusRun, 3080 FailTime: fail, 3081 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 3082 ShouldReschedule: false, 3083 }, 3084 { 3085 Desc: "Reschedule when client status is complete", 3086 ClientStatus: AllocClientStatusComplete, 3087 DesiredStatus: AllocDesiredStatusRun, 3088 FailTime: fail, 3089 ReschedulePolicy: nil, 3090 ShouldReschedule: false, 3091 }, 3092 { 3093 Desc: "Reschedule with nil reschedule policy", 3094 ClientStatus: AllocClientStatusFailed, 3095 DesiredStatus: AllocDesiredStatusRun, 3096 FailTime: fail, 3097 ReschedulePolicy: nil, 3098 ShouldReschedule: false, 3099 }, 3100 { 3101 Desc: "Reschedule with unlimited and attempts >0", 3102 ClientStatus: AllocClientStatusFailed, 3103 DesiredStatus: AllocDesiredStatusRun, 3104 FailTime: fail, 3105 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 3106 ShouldReschedule: true, 3107 }, 3108 { 3109 Desc: "Reschedule when client status is complete", 3110 ClientStatus: AllocClientStatusComplete, 3111 DesiredStatus: AllocDesiredStatusRun, 3112 FailTime: fail, 3113 ReschedulePolicy: nil, 3114 ShouldReschedule: false, 3115 }, 3116 { 3117 Desc: "Reschedule with policy when client status complete", 3118 ClientStatus: AllocClientStatusComplete, 3119 DesiredStatus: AllocDesiredStatusRun, 3120 FailTime: fail, 3121 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3122 ShouldReschedule: false, 3123 }, 3124 { 3125 Desc: "Reschedule with no previous attempts", 3126 ClientStatus: AllocClientStatusFailed, 3127 DesiredStatus: AllocDesiredStatusRun, 3128 FailTime: fail, 3129 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3130 ShouldReschedule: true, 3131 }, 3132 { 3133 Desc: "Reschedule with leftover attempts", 3134 ClientStatus: AllocClientStatusFailed, 3135 DesiredStatus: AllocDesiredStatusRun, 3136 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3137 FailTime: fail, 3138 RescheduleTrackers: []*RescheduleEvent{ 3139 { 3140 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 3141 }, 3142 }, 3143 ShouldReschedule: true, 3144 }, 3145 { 3146 Desc: "Reschedule with too old previous attempts", 3147 ClientStatus: AllocClientStatusFailed, 3148 DesiredStatus: AllocDesiredStatusRun, 3149 FailTime: fail, 3150 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 3151 RescheduleTrackers: []*RescheduleEvent{ 3152 { 3153 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 3154 }, 3155 }, 3156 ShouldReschedule: true, 3157 }, 3158 { 3159 Desc: "Reschedule with no leftover attempts", 3160 ClientStatus: AllocClientStatusFailed, 3161 DesiredStatus: AllocDesiredStatusRun, 3162 FailTime: fail, 3163 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3164 RescheduleTrackers: []*RescheduleEvent{ 3165 { 3166 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 3167 }, 3168 { 3169 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 3170 }, 3171 }, 3172 ShouldReschedule: false, 3173 }, 3174 } 3175 3176 for _, state := range harness { 3177 alloc := Allocation{} 3178 alloc.DesiredStatus = state.DesiredStatus 3179 alloc.ClientStatus = state.ClientStatus 3180 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 3181 3182 t.Run(state.Desc, func(t *testing.T) { 3183 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 3184 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 3185 } 3186 }) 3187 3188 } 3189 } 3190 3191 func TestAllocation_LastEventTime(t *testing.T) { 3192 type testCase struct { 3193 desc string 3194 taskState map[string]*TaskState 3195 expectedLastEventTime time.Time 3196 } 3197 3198 t1 := time.Now().UTC() 3199 3200 testCases := []testCase{ 3201 { 3202 desc: "nil task state", 3203 expectedLastEventTime: t1, 3204 }, 3205 { 3206 desc: "empty task state", 3207 taskState: make(map[string]*TaskState), 3208 expectedLastEventTime: t1, 3209 }, 3210 { 3211 desc: "Finished At not set", 3212 taskState: map[string]*TaskState{"foo": {State: "start", 3213 StartedAt: t1.Add(-2 * time.Hour)}}, 3214 expectedLastEventTime: t1, 3215 }, 3216 { 3217 desc: "One finished ", 3218 taskState: map[string]*TaskState{"foo": {State: "start", 3219 StartedAt: t1.Add(-2 * time.Hour), 3220 FinishedAt: t1.Add(-1 * time.Hour)}}, 3221 expectedLastEventTime: t1.Add(-1 * time.Hour), 3222 }, 3223 { 3224 desc: "Multiple task groups", 3225 taskState: map[string]*TaskState{"foo": {State: "start", 3226 StartedAt: t1.Add(-2 * time.Hour), 3227 FinishedAt: t1.Add(-1 * time.Hour)}, 3228 "bar": {State: "start", 3229 StartedAt: t1.Add(-2 * time.Hour), 3230 FinishedAt: t1.Add(-40 * time.Minute)}}, 3231 expectedLastEventTime: t1.Add(-40 * time.Minute), 3232 }, 3233 { 3234 desc: "No finishedAt set, one task event, should use modify time", 3235 taskState: map[string]*TaskState{"foo": { 3236 State: "run", 3237 StartedAt: t1.Add(-2 * time.Hour), 3238 Events: []*TaskEvent{ 3239 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 3240 }}, 3241 }, 3242 expectedLastEventTime: t1, 3243 }, 3244 } 3245 for _, tc := range testCases { 3246 t.Run(tc.desc, func(t *testing.T) { 3247 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 3248 alloc.TaskStates = tc.taskState 3249 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 3250 }) 3251 } 3252 } 3253 3254 func TestAllocation_NextDelay(t *testing.T) { 3255 type testCase struct { 3256 desc string 3257 reschedulePolicy *ReschedulePolicy 3258 alloc *Allocation 3259 expectedRescheduleTime time.Time 3260 expectedRescheduleEligible bool 3261 } 3262 now := time.Now() 3263 testCases := []testCase{ 3264 { 3265 desc: "Allocation hasn't failed yet", 3266 reschedulePolicy: &ReschedulePolicy{ 3267 DelayFunction: "constant", 3268 Delay: 5 * time.Second, 3269 }, 3270 alloc: &Allocation{}, 3271 expectedRescheduleTime: time.Time{}, 3272 expectedRescheduleEligible: false, 3273 }, 3274 { 3275 desc: "Allocation has no reschedule policy", 3276 alloc: &Allocation{}, 3277 expectedRescheduleTime: time.Time{}, 3278 expectedRescheduleEligible: false, 3279 }, 3280 { 3281 desc: "Allocation lacks task state", 3282 reschedulePolicy: &ReschedulePolicy{ 3283 DelayFunction: "constant", 3284 Delay: 5 * time.Second, 3285 Unlimited: true, 3286 }, 3287 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 3288 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 3289 expectedRescheduleEligible: true, 3290 }, 3291 { 3292 desc: "linear delay, unlimited restarts, no reschedule tracker", 3293 reschedulePolicy: &ReschedulePolicy{ 3294 DelayFunction: "constant", 3295 Delay: 5 * time.Second, 3296 Unlimited: true, 3297 }, 3298 alloc: &Allocation{ 3299 ClientStatus: AllocClientStatusFailed, 3300 TaskStates: map[string]*TaskState{"foo": {State: "dead", 3301 StartedAt: now.Add(-1 * time.Hour), 3302 FinishedAt: now.Add(-2 * time.Second)}}, 3303 }, 3304 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3305 expectedRescheduleEligible: true, 3306 }, 3307 { 3308 desc: "linear delay with reschedule tracker", 3309 reschedulePolicy: &ReschedulePolicy{ 3310 DelayFunction: "constant", 3311 Delay: 5 * time.Second, 3312 Interval: 10 * time.Minute, 3313 Attempts: 2, 3314 }, 3315 alloc: &Allocation{ 3316 ClientStatus: AllocClientStatusFailed, 3317 TaskStates: map[string]*TaskState{"foo": {State: "start", 3318 StartedAt: now.Add(-1 * time.Hour), 3319 FinishedAt: now.Add(-2 * time.Second)}}, 3320 RescheduleTracker: &RescheduleTracker{ 3321 Events: []*RescheduleEvent{{ 3322 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3323 Delay: 5 * time.Second, 3324 }}, 3325 }}, 3326 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3327 expectedRescheduleEligible: true, 3328 }, 3329 { 3330 desc: "linear delay with reschedule tracker, attempts exhausted", 3331 reschedulePolicy: &ReschedulePolicy{ 3332 DelayFunction: "constant", 3333 Delay: 5 * time.Second, 3334 Interval: 10 * time.Minute, 3335 Attempts: 2, 3336 }, 3337 alloc: &Allocation{ 3338 ClientStatus: AllocClientStatusFailed, 3339 TaskStates: map[string]*TaskState{"foo": {State: "start", 3340 StartedAt: now.Add(-1 * time.Hour), 3341 FinishedAt: now.Add(-2 * time.Second)}}, 3342 RescheduleTracker: &RescheduleTracker{ 3343 Events: []*RescheduleEvent{ 3344 { 3345 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 3346 Delay: 5 * time.Second, 3347 }, 3348 { 3349 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3350 Delay: 5 * time.Second, 3351 }, 3352 }, 3353 }}, 3354 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3355 expectedRescheduleEligible: false, 3356 }, 3357 { 3358 desc: "exponential delay - no reschedule tracker", 3359 reschedulePolicy: &ReschedulePolicy{ 3360 DelayFunction: "exponential", 3361 Delay: 5 * time.Second, 3362 MaxDelay: 90 * time.Second, 3363 Unlimited: true, 3364 }, 3365 alloc: &Allocation{ 3366 ClientStatus: AllocClientStatusFailed, 3367 TaskStates: map[string]*TaskState{"foo": {State: "start", 3368 StartedAt: now.Add(-1 * time.Hour), 3369 FinishedAt: now.Add(-2 * time.Second)}}, 3370 }, 3371 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3372 expectedRescheduleEligible: true, 3373 }, 3374 { 3375 desc: "exponential delay with reschedule tracker", 3376 reschedulePolicy: &ReschedulePolicy{ 3377 DelayFunction: "exponential", 3378 Delay: 5 * time.Second, 3379 MaxDelay: 90 * time.Second, 3380 Unlimited: true, 3381 }, 3382 alloc: &Allocation{ 3383 ClientStatus: AllocClientStatusFailed, 3384 TaskStates: map[string]*TaskState{"foo": {State: "start", 3385 StartedAt: now.Add(-1 * time.Hour), 3386 FinishedAt: now.Add(-2 * time.Second)}}, 3387 RescheduleTracker: &RescheduleTracker{ 3388 Events: []*RescheduleEvent{ 3389 { 3390 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3391 Delay: 5 * time.Second, 3392 }, 3393 { 3394 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3395 Delay: 10 * time.Second, 3396 }, 3397 { 3398 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3399 Delay: 20 * time.Second, 3400 }, 3401 }, 3402 }}, 3403 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3404 expectedRescheduleEligible: true, 3405 }, 3406 { 3407 desc: "exponential delay with delay ceiling reached", 3408 reschedulePolicy: &ReschedulePolicy{ 3409 DelayFunction: "exponential", 3410 Delay: 5 * time.Second, 3411 MaxDelay: 90 * time.Second, 3412 Unlimited: true, 3413 }, 3414 alloc: &Allocation{ 3415 ClientStatus: AllocClientStatusFailed, 3416 TaskStates: map[string]*TaskState{"foo": {State: "start", 3417 StartedAt: now.Add(-1 * time.Hour), 3418 FinishedAt: now.Add(-15 * time.Second)}}, 3419 RescheduleTracker: &RescheduleTracker{ 3420 Events: []*RescheduleEvent{ 3421 { 3422 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3423 Delay: 5 * time.Second, 3424 }, 3425 { 3426 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3427 Delay: 10 * time.Second, 3428 }, 3429 { 3430 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3431 Delay: 20 * time.Second, 3432 }, 3433 { 3434 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3435 Delay: 40 * time.Second, 3436 }, 3437 { 3438 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3439 Delay: 80 * time.Second, 3440 }, 3441 }, 3442 }}, 3443 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 3444 expectedRescheduleEligible: true, 3445 }, 3446 { 3447 // Test case where most recent reschedule ran longer than delay ceiling 3448 desc: "exponential delay, delay ceiling reset condition met", 3449 reschedulePolicy: &ReschedulePolicy{ 3450 DelayFunction: "exponential", 3451 Delay: 5 * time.Second, 3452 MaxDelay: 90 * time.Second, 3453 Unlimited: true, 3454 }, 3455 alloc: &Allocation{ 3456 ClientStatus: AllocClientStatusFailed, 3457 TaskStates: map[string]*TaskState{"foo": {State: "start", 3458 StartedAt: now.Add(-1 * time.Hour), 3459 FinishedAt: now.Add(-15 * time.Minute)}}, 3460 RescheduleTracker: &RescheduleTracker{ 3461 Events: []*RescheduleEvent{ 3462 { 3463 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3464 Delay: 5 * time.Second, 3465 }, 3466 { 3467 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3468 Delay: 10 * time.Second, 3469 }, 3470 { 3471 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3472 Delay: 20 * time.Second, 3473 }, 3474 { 3475 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3476 Delay: 40 * time.Second, 3477 }, 3478 { 3479 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3480 Delay: 80 * time.Second, 3481 }, 3482 { 3483 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3484 Delay: 90 * time.Second, 3485 }, 3486 { 3487 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3488 Delay: 90 * time.Second, 3489 }, 3490 }, 3491 }}, 3492 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 3493 expectedRescheduleEligible: true, 3494 }, 3495 { 3496 desc: "fibonacci delay - no reschedule tracker", 3497 reschedulePolicy: &ReschedulePolicy{ 3498 DelayFunction: "fibonacci", 3499 Delay: 5 * time.Second, 3500 MaxDelay: 90 * time.Second, 3501 Unlimited: true, 3502 }, 3503 alloc: &Allocation{ 3504 ClientStatus: AllocClientStatusFailed, 3505 TaskStates: map[string]*TaskState{"foo": {State: "start", 3506 StartedAt: now.Add(-1 * time.Hour), 3507 FinishedAt: now.Add(-2 * time.Second)}}}, 3508 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3509 expectedRescheduleEligible: true, 3510 }, 3511 { 3512 desc: "fibonacci delay with reschedule tracker", 3513 reschedulePolicy: &ReschedulePolicy{ 3514 DelayFunction: "fibonacci", 3515 Delay: 5 * time.Second, 3516 MaxDelay: 90 * time.Second, 3517 Unlimited: true, 3518 }, 3519 alloc: &Allocation{ 3520 ClientStatus: AllocClientStatusFailed, 3521 TaskStates: map[string]*TaskState{"foo": {State: "start", 3522 StartedAt: now.Add(-1 * time.Hour), 3523 FinishedAt: now.Add(-2 * time.Second)}}, 3524 RescheduleTracker: &RescheduleTracker{ 3525 Events: []*RescheduleEvent{ 3526 { 3527 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3528 Delay: 5 * time.Second, 3529 }, 3530 { 3531 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 3532 Delay: 5 * time.Second, 3533 }, 3534 }, 3535 }}, 3536 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 3537 expectedRescheduleEligible: true, 3538 }, 3539 { 3540 desc: "fibonacci delay with more events", 3541 reschedulePolicy: &ReschedulePolicy{ 3542 DelayFunction: "fibonacci", 3543 Delay: 5 * time.Second, 3544 MaxDelay: 90 * time.Second, 3545 Unlimited: true, 3546 }, 3547 alloc: &Allocation{ 3548 ClientStatus: AllocClientStatusFailed, 3549 TaskStates: map[string]*TaskState{"foo": {State: "start", 3550 StartedAt: now.Add(-1 * time.Hour), 3551 FinishedAt: now.Add(-2 * time.Second)}}, 3552 RescheduleTracker: &RescheduleTracker{ 3553 Events: []*RescheduleEvent{ 3554 { 3555 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3556 Delay: 5 * time.Second, 3557 }, 3558 { 3559 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3560 Delay: 5 * time.Second, 3561 }, 3562 { 3563 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3564 Delay: 10 * time.Second, 3565 }, 3566 { 3567 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3568 Delay: 15 * time.Second, 3569 }, 3570 { 3571 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3572 Delay: 25 * time.Second, 3573 }, 3574 }, 3575 }}, 3576 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3577 expectedRescheduleEligible: true, 3578 }, 3579 { 3580 desc: "fibonacci delay with delay ceiling reached", 3581 reschedulePolicy: &ReschedulePolicy{ 3582 DelayFunction: "fibonacci", 3583 Delay: 5 * time.Second, 3584 MaxDelay: 50 * time.Second, 3585 Unlimited: true, 3586 }, 3587 alloc: &Allocation{ 3588 ClientStatus: AllocClientStatusFailed, 3589 TaskStates: map[string]*TaskState{"foo": {State: "start", 3590 StartedAt: now.Add(-1 * time.Hour), 3591 FinishedAt: now.Add(-15 * time.Second)}}, 3592 RescheduleTracker: &RescheduleTracker{ 3593 Events: []*RescheduleEvent{ 3594 { 3595 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3596 Delay: 5 * time.Second, 3597 }, 3598 { 3599 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3600 Delay: 5 * time.Second, 3601 }, 3602 { 3603 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3604 Delay: 10 * time.Second, 3605 }, 3606 { 3607 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3608 Delay: 15 * time.Second, 3609 }, 3610 { 3611 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3612 Delay: 25 * time.Second, 3613 }, 3614 { 3615 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3616 Delay: 40 * time.Second, 3617 }, 3618 }, 3619 }}, 3620 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 3621 expectedRescheduleEligible: true, 3622 }, 3623 { 3624 desc: "fibonacci delay with delay reset condition met", 3625 reschedulePolicy: &ReschedulePolicy{ 3626 DelayFunction: "fibonacci", 3627 Delay: 5 * time.Second, 3628 MaxDelay: 50 * time.Second, 3629 Unlimited: true, 3630 }, 3631 alloc: &Allocation{ 3632 ClientStatus: AllocClientStatusFailed, 3633 TaskStates: map[string]*TaskState{"foo": {State: "start", 3634 StartedAt: now.Add(-1 * time.Hour), 3635 FinishedAt: now.Add(-5 * time.Minute)}}, 3636 RescheduleTracker: &RescheduleTracker{ 3637 Events: []*RescheduleEvent{ 3638 { 3639 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3640 Delay: 5 * time.Second, 3641 }, 3642 { 3643 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3644 Delay: 5 * time.Second, 3645 }, 3646 { 3647 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3648 Delay: 10 * time.Second, 3649 }, 3650 { 3651 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3652 Delay: 15 * time.Second, 3653 }, 3654 { 3655 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3656 Delay: 25 * time.Second, 3657 }, 3658 { 3659 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3660 Delay: 40 * time.Second, 3661 }, 3662 }, 3663 }}, 3664 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 3665 expectedRescheduleEligible: true, 3666 }, 3667 { 3668 desc: "fibonacci delay with the most recent event that reset delay value", 3669 reschedulePolicy: &ReschedulePolicy{ 3670 DelayFunction: "fibonacci", 3671 Delay: 5 * time.Second, 3672 MaxDelay: 50 * time.Second, 3673 Unlimited: true, 3674 }, 3675 alloc: &Allocation{ 3676 ClientStatus: AllocClientStatusFailed, 3677 TaskStates: map[string]*TaskState{"foo": {State: "start", 3678 StartedAt: now.Add(-1 * time.Hour), 3679 FinishedAt: now.Add(-5 * time.Second)}}, 3680 RescheduleTracker: &RescheduleTracker{ 3681 Events: []*RescheduleEvent{ 3682 { 3683 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3684 Delay: 5 * time.Second, 3685 }, 3686 { 3687 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3688 Delay: 5 * time.Second, 3689 }, 3690 { 3691 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3692 Delay: 10 * time.Second, 3693 }, 3694 { 3695 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3696 Delay: 15 * time.Second, 3697 }, 3698 { 3699 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3700 Delay: 25 * time.Second, 3701 }, 3702 { 3703 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3704 Delay: 40 * time.Second, 3705 }, 3706 { 3707 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3708 Delay: 50 * time.Second, 3709 }, 3710 { 3711 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 3712 Delay: 5 * time.Second, 3713 }, 3714 }, 3715 }}, 3716 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 3717 expectedRescheduleEligible: true, 3718 }, 3719 } 3720 for _, tc := range testCases { 3721 t.Run(tc.desc, func(t *testing.T) { 3722 require := require.New(t) 3723 j := testJob() 3724 if tc.reschedulePolicy != nil { 3725 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 3726 } 3727 tc.alloc.Job = j 3728 tc.alloc.TaskGroup = j.TaskGroups[0].Name 3729 reschedTime, allowed := tc.alloc.NextRescheduleTime() 3730 require.Equal(tc.expectedRescheduleEligible, allowed) 3731 require.Equal(tc.expectedRescheduleTime, reschedTime) 3732 }) 3733 } 3734 3735 } 3736 3737 func TestRescheduleTracker_Copy(t *testing.T) { 3738 type testCase struct { 3739 original *RescheduleTracker 3740 expected *RescheduleTracker 3741 } 3742 3743 cases := []testCase{ 3744 {nil, nil}, 3745 {&RescheduleTracker{Events: []*RescheduleEvent{ 3746 {RescheduleTime: 2, 3747 PrevAllocID: "12", 3748 PrevNodeID: "12", 3749 Delay: 30 * time.Second}, 3750 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 3751 {RescheduleTime: 2, 3752 PrevAllocID: "12", 3753 PrevNodeID: "12", 3754 Delay: 30 * time.Second}, 3755 }}}, 3756 } 3757 3758 for _, tc := range cases { 3759 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 3760 t.Fatalf("expected %v but got %v", *tc.expected, *got) 3761 } 3762 } 3763 } 3764 3765 func TestVault_Validate(t *testing.T) { 3766 v := &Vault{ 3767 Env: true, 3768 ChangeMode: VaultChangeModeNoop, 3769 } 3770 3771 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 3772 t.Fatalf("Expected policy list empty error") 3773 } 3774 3775 v.Policies = []string{"foo", "root"} 3776 v.ChangeMode = VaultChangeModeSignal 3777 3778 err := v.Validate() 3779 if err == nil { 3780 t.Fatalf("Expected validation errors") 3781 } 3782 3783 if !strings.Contains(err.Error(), "Signal must") { 3784 t.Fatalf("Expected signal empty error") 3785 } 3786 if !strings.Contains(err.Error(), "root") { 3787 t.Fatalf("Expected root error") 3788 } 3789 } 3790 3791 func TestParameterizedJobConfig_Validate(t *testing.T) { 3792 d := &ParameterizedJobConfig{ 3793 Payload: "foo", 3794 } 3795 3796 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 3797 t.Fatalf("Expected unknown payload requirement: %v", err) 3798 } 3799 3800 d.Payload = DispatchPayloadOptional 3801 d.MetaOptional = []string{"foo", "bar"} 3802 d.MetaRequired = []string{"bar", "baz"} 3803 3804 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 3805 t.Fatalf("Expected meta not being disjoint error: %v", err) 3806 } 3807 } 3808 3809 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 3810 job := testJob() 3811 job.ParameterizedJob = &ParameterizedJobConfig{ 3812 Payload: DispatchPayloadOptional, 3813 } 3814 job.Type = JobTypeSystem 3815 3816 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 3817 t.Fatalf("Expected bad scheduler tpye: %v", err) 3818 } 3819 } 3820 3821 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 3822 d := &ParameterizedJobConfig{} 3823 d.Canonicalize() 3824 if d.Payload != DispatchPayloadOptional { 3825 t.Fatalf("Canonicalize failed") 3826 } 3827 } 3828 3829 func TestDispatchPayloadConfig_Validate(t *testing.T) { 3830 d := &DispatchPayloadConfig{ 3831 File: "foo", 3832 } 3833 3834 // task/local/haha 3835 if err := d.Validate(); err != nil { 3836 t.Fatalf("bad: %v", err) 3837 } 3838 3839 // task/haha 3840 d.File = "../haha" 3841 if err := d.Validate(); err != nil { 3842 t.Fatalf("bad: %v", err) 3843 } 3844 3845 // ../haha 3846 d.File = "../../../haha" 3847 if err := d.Validate(); err == nil { 3848 t.Fatalf("bad: %v", err) 3849 } 3850 } 3851 3852 func TestIsRecoverable(t *testing.T) { 3853 if IsRecoverable(nil) { 3854 t.Errorf("nil should not be recoverable") 3855 } 3856 if IsRecoverable(NewRecoverableError(nil, true)) { 3857 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 3858 } 3859 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 3860 t.Errorf("Custom errors should not be recoverable") 3861 } 3862 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 3863 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 3864 } 3865 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 3866 t.Errorf("Explicitly recoverable errors *should* be recoverable") 3867 } 3868 } 3869 3870 func TestACLTokenValidate(t *testing.T) { 3871 tk := &ACLToken{} 3872 3873 // Missing a type 3874 err := tk.Validate() 3875 assert.NotNil(t, err) 3876 if !strings.Contains(err.Error(), "client or management") { 3877 t.Fatalf("bad: %v", err) 3878 } 3879 3880 // Missing policies 3881 tk.Type = ACLClientToken 3882 err = tk.Validate() 3883 assert.NotNil(t, err) 3884 if !strings.Contains(err.Error(), "missing policies") { 3885 t.Fatalf("bad: %v", err) 3886 } 3887 3888 // Invalid policies 3889 tk.Type = ACLManagementToken 3890 tk.Policies = []string{"foo"} 3891 err = tk.Validate() 3892 assert.NotNil(t, err) 3893 if !strings.Contains(err.Error(), "associated with policies") { 3894 t.Fatalf("bad: %v", err) 3895 } 3896 3897 // Name too long policies 3898 tk.Name = "" 3899 for i := 0; i < 8; i++ { 3900 tk.Name += uuid.Generate() 3901 } 3902 tk.Policies = nil 3903 err = tk.Validate() 3904 assert.NotNil(t, err) 3905 if !strings.Contains(err.Error(), "too long") { 3906 t.Fatalf("bad: %v", err) 3907 } 3908 3909 // Make it valid 3910 tk.Name = "foo" 3911 err = tk.Validate() 3912 assert.Nil(t, err) 3913 } 3914 3915 func TestACLTokenPolicySubset(t *testing.T) { 3916 tk := &ACLToken{ 3917 Type: ACLClientToken, 3918 Policies: []string{"foo", "bar", "baz"}, 3919 } 3920 3921 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3922 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3923 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3924 assert.Equal(t, true, tk.PolicySubset([]string{})) 3925 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 3926 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 3927 3928 tk = &ACLToken{ 3929 Type: ACLManagementToken, 3930 } 3931 3932 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3933 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3934 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3935 assert.Equal(t, true, tk.PolicySubset([]string{})) 3936 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 3937 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 3938 } 3939 3940 func TestACLTokenSetHash(t *testing.T) { 3941 tk := &ACLToken{ 3942 Name: "foo", 3943 Type: ACLClientToken, 3944 Policies: []string{"foo", "bar"}, 3945 Global: false, 3946 } 3947 out1 := tk.SetHash() 3948 assert.NotNil(t, out1) 3949 assert.NotNil(t, tk.Hash) 3950 assert.Equal(t, out1, tk.Hash) 3951 3952 tk.Policies = []string{"foo"} 3953 out2 := tk.SetHash() 3954 assert.NotNil(t, out2) 3955 assert.NotNil(t, tk.Hash) 3956 assert.Equal(t, out2, tk.Hash) 3957 assert.NotEqual(t, out1, out2) 3958 } 3959 3960 func TestACLPolicySetHash(t *testing.T) { 3961 ap := &ACLPolicy{ 3962 Name: "foo", 3963 Description: "great policy", 3964 Rules: "node { policy = \"read\" }", 3965 } 3966 out1 := ap.SetHash() 3967 assert.NotNil(t, out1) 3968 assert.NotNil(t, ap.Hash) 3969 assert.Equal(t, out1, ap.Hash) 3970 3971 ap.Rules = "node { policy = \"write\" }" 3972 out2 := ap.SetHash() 3973 assert.NotNil(t, out2) 3974 assert.NotNil(t, ap.Hash) 3975 assert.Equal(t, out2, ap.Hash) 3976 assert.NotEqual(t, out1, out2) 3977 } 3978 3979 func TestTaskEventPopulate(t *testing.T) { 3980 prepopulatedEvent := NewTaskEvent(TaskSetup) 3981 prepopulatedEvent.DisplayMessage = "Hola" 3982 testcases := []struct { 3983 event *TaskEvent 3984 expectedMsg string 3985 }{ 3986 {nil, ""}, 3987 {prepopulatedEvent, "Hola"}, 3988 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 3989 {NewTaskEvent(TaskStarted), "Task started by client"}, 3990 {NewTaskEvent(TaskReceived), "Task received by client"}, 3991 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 3992 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 3993 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 3994 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 3995 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 3996 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 3997 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 3998 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 3999 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 4000 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 4001 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 4002 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 4003 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 4004 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 4005 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 4006 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 4007 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 4008 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 4009 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 4010 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 4011 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 4012 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 4013 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 4014 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 4015 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 4016 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 4017 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 4018 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 4019 {NewTaskEvent("Unknown Type, No message"), ""}, 4020 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 4021 } 4022 4023 for _, tc := range testcases { 4024 tc.event.PopulateEventDisplayMessage() 4025 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 4026 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 4027 } 4028 } 4029 } 4030 4031 func TestNetworkResourcesEquals(t *testing.T) { 4032 require := require.New(t) 4033 var networkResourcesTest = []struct { 4034 input []*NetworkResource 4035 expected bool 4036 errorMsg string 4037 }{ 4038 { 4039 []*NetworkResource{ 4040 { 4041 IP: "10.0.0.1", 4042 MBits: 50, 4043 ReservedPorts: []Port{{"web", 80}}, 4044 }, 4045 { 4046 IP: "10.0.0.1", 4047 MBits: 50, 4048 ReservedPorts: []Port{{"web", 80}}, 4049 }, 4050 }, 4051 true, 4052 "Equal network resources should return true", 4053 }, 4054 { 4055 []*NetworkResource{ 4056 { 4057 IP: "10.0.0.0", 4058 MBits: 50, 4059 ReservedPorts: []Port{{"web", 80}}, 4060 }, 4061 { 4062 IP: "10.0.0.1", 4063 MBits: 50, 4064 ReservedPorts: []Port{{"web", 80}}, 4065 }, 4066 }, 4067 false, 4068 "Different IP addresses should return false", 4069 }, 4070 { 4071 []*NetworkResource{ 4072 { 4073 IP: "10.0.0.1", 4074 MBits: 40, 4075 ReservedPorts: []Port{{"web", 80}}, 4076 }, 4077 { 4078 IP: "10.0.0.1", 4079 MBits: 50, 4080 ReservedPorts: []Port{{"web", 80}}, 4081 }, 4082 }, 4083 false, 4084 "Different MBits values should return false", 4085 }, 4086 { 4087 []*NetworkResource{ 4088 { 4089 IP: "10.0.0.1", 4090 MBits: 50, 4091 ReservedPorts: []Port{{"web", 80}}, 4092 }, 4093 { 4094 IP: "10.0.0.1", 4095 MBits: 50, 4096 ReservedPorts: []Port{{"web", 80}, {"web", 80}}, 4097 }, 4098 }, 4099 false, 4100 "Different ReservedPorts lengths should return false", 4101 }, 4102 { 4103 []*NetworkResource{ 4104 { 4105 IP: "10.0.0.1", 4106 MBits: 50, 4107 ReservedPorts: []Port{{"web", 80}}, 4108 }, 4109 { 4110 IP: "10.0.0.1", 4111 MBits: 50, 4112 ReservedPorts: []Port{}, 4113 }, 4114 }, 4115 false, 4116 "Empty and non empty ReservedPorts values should return false", 4117 }, 4118 { 4119 []*NetworkResource{ 4120 { 4121 IP: "10.0.0.1", 4122 MBits: 50, 4123 ReservedPorts: []Port{{"web", 80}}, 4124 }, 4125 { 4126 IP: "10.0.0.1", 4127 MBits: 50, 4128 ReservedPorts: []Port{{"notweb", 80}}, 4129 }, 4130 }, 4131 false, 4132 "Different valued ReservedPorts values should return false", 4133 }, 4134 { 4135 []*NetworkResource{ 4136 { 4137 IP: "10.0.0.1", 4138 MBits: 50, 4139 DynamicPorts: []Port{{"web", 80}}, 4140 }, 4141 { 4142 IP: "10.0.0.1", 4143 MBits: 50, 4144 DynamicPorts: []Port{{"web", 80}, {"web", 80}}, 4145 }, 4146 }, 4147 false, 4148 "Different DynamicPorts lengths should return false", 4149 }, 4150 { 4151 []*NetworkResource{ 4152 { 4153 IP: "10.0.0.1", 4154 MBits: 50, 4155 DynamicPorts: []Port{{"web", 80}}, 4156 }, 4157 { 4158 IP: "10.0.0.1", 4159 MBits: 50, 4160 DynamicPorts: []Port{}, 4161 }, 4162 }, 4163 false, 4164 "Empty and non empty DynamicPorts values should return false", 4165 }, 4166 { 4167 []*NetworkResource{ 4168 { 4169 IP: "10.0.0.1", 4170 MBits: 50, 4171 DynamicPorts: []Port{{"web", 80}}, 4172 }, 4173 { 4174 IP: "10.0.0.1", 4175 MBits: 50, 4176 DynamicPorts: []Port{{"notweb", 80}}, 4177 }, 4178 }, 4179 false, 4180 "Different valued DynamicPorts values should return false", 4181 }, 4182 } 4183 for _, testCase := range networkResourcesTest { 4184 first := testCase.input[0] 4185 second := testCase.input[1] 4186 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 4187 } 4188 } 4189 4190 func TestNode_Canonicalize(t *testing.T) { 4191 t.Parallel() 4192 require := require.New(t) 4193 4194 // Make sure the eligiblity is set properly 4195 node := &Node{} 4196 node.Canonicalize() 4197 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 4198 4199 node = &Node{ 4200 Drain: true, 4201 } 4202 node.Canonicalize() 4203 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 4204 } 4205 4206 func TestNode_Copy(t *testing.T) { 4207 t.Parallel() 4208 require := require.New(t) 4209 4210 node := &Node{ 4211 ID: uuid.Generate(), 4212 SecretID: uuid.Generate(), 4213 Datacenter: "dc1", 4214 Name: "foobar", 4215 Attributes: map[string]string{ 4216 "kernel.name": "linux", 4217 "arch": "x86", 4218 "nomad.version": "0.5.0", 4219 "driver.exec": "1", 4220 "driver.mock_driver": "1", 4221 }, 4222 Resources: &Resources{ 4223 CPU: 4000, 4224 MemoryMB: 8192, 4225 DiskMB: 100 * 1024, 4226 Networks: []*NetworkResource{ 4227 { 4228 Device: "eth0", 4229 CIDR: "192.168.0.100/32", 4230 MBits: 1000, 4231 }, 4232 }, 4233 }, 4234 Reserved: &Resources{ 4235 CPU: 100, 4236 MemoryMB: 256, 4237 DiskMB: 4 * 1024, 4238 Networks: []*NetworkResource{ 4239 { 4240 Device: "eth0", 4241 IP: "192.168.0.100", 4242 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 4243 MBits: 1, 4244 }, 4245 }, 4246 }, 4247 NodeResources: &NodeResources{ 4248 Cpu: NodeCpuResources{ 4249 CpuShares: 4000, 4250 }, 4251 Memory: NodeMemoryResources{ 4252 MemoryMB: 8192, 4253 }, 4254 Disk: NodeDiskResources{ 4255 DiskMB: 100 * 1024, 4256 }, 4257 Networks: []*NetworkResource{ 4258 { 4259 Device: "eth0", 4260 CIDR: "192.168.0.100/32", 4261 MBits: 1000, 4262 }, 4263 }, 4264 }, 4265 ReservedResources: &NodeReservedResources{ 4266 Cpu: NodeReservedCpuResources{ 4267 CpuShares: 100, 4268 }, 4269 Memory: NodeReservedMemoryResources{ 4270 MemoryMB: 256, 4271 }, 4272 Disk: NodeReservedDiskResources{ 4273 DiskMB: 4 * 1024, 4274 }, 4275 Networks: NodeReservedNetworkResources{ 4276 ReservedHostPorts: "22", 4277 }, 4278 }, 4279 Links: map[string]string{ 4280 "consul": "foobar.dc1", 4281 }, 4282 Meta: map[string]string{ 4283 "pci-dss": "true", 4284 "database": "mysql", 4285 "version": "5.6", 4286 }, 4287 NodeClass: "linux-medium-pci", 4288 Status: NodeStatusReady, 4289 SchedulingEligibility: NodeSchedulingEligible, 4290 Drivers: map[string]*DriverInfo{ 4291 "mock_driver": { 4292 Attributes: map[string]string{"running": "1"}, 4293 Detected: true, 4294 Healthy: true, 4295 HealthDescription: "Currently active", 4296 UpdateTime: time.Now(), 4297 }, 4298 }, 4299 } 4300 node.ComputeClass() 4301 4302 node2 := node.Copy() 4303 4304 require.Equal(node.Attributes, node2.Attributes) 4305 require.Equal(node.Resources, node2.Resources) 4306 require.Equal(node.Reserved, node2.Reserved) 4307 require.Equal(node.Links, node2.Links) 4308 require.Equal(node.Meta, node2.Meta) 4309 require.Equal(node.Events, node2.Events) 4310 require.Equal(node.DrainStrategy, node2.DrainStrategy) 4311 require.Equal(node.Drivers, node2.Drivers) 4312 } 4313 4314 func TestSpread_Validate(t *testing.T) { 4315 type tc struct { 4316 spread *Spread 4317 err error 4318 name string 4319 } 4320 4321 testCases := []tc{ 4322 { 4323 spread: &Spread{}, 4324 err: fmt.Errorf("Missing spread attribute"), 4325 name: "empty spread", 4326 }, 4327 { 4328 spread: &Spread{ 4329 Attribute: "${node.datacenter}", 4330 Weight: -1, 4331 }, 4332 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 4333 name: "Invalid weight", 4334 }, 4335 { 4336 spread: &Spread{ 4337 Attribute: "${node.datacenter}", 4338 Weight: 110, 4339 }, 4340 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 4341 name: "Invalid weight", 4342 }, 4343 { 4344 spread: &Spread{ 4345 Attribute: "${node.datacenter}", 4346 Weight: 50, 4347 SpreadTarget: []*SpreadTarget{ 4348 { 4349 Value: "dc1", 4350 Percent: 25, 4351 }, 4352 { 4353 Value: "dc2", 4354 Percent: 150, 4355 }, 4356 }, 4357 }, 4358 err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"), 4359 name: "Invalid percentages", 4360 }, 4361 { 4362 spread: &Spread{ 4363 Attribute: "${node.datacenter}", 4364 Weight: 50, 4365 SpreadTarget: []*SpreadTarget{ 4366 { 4367 Value: "dc1", 4368 Percent: 75, 4369 }, 4370 { 4371 Value: "dc2", 4372 Percent: 75, 4373 }, 4374 }, 4375 }, 4376 err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150), 4377 name: "Invalid percentages", 4378 }, 4379 { 4380 spread: &Spread{ 4381 Attribute: "${node.datacenter}", 4382 Weight: 50, 4383 SpreadTarget: []*SpreadTarget{ 4384 { 4385 Value: "dc1", 4386 Percent: 25, 4387 }, 4388 { 4389 Value: "dc1", 4390 Percent: 50, 4391 }, 4392 }, 4393 }, 4394 err: fmt.Errorf("Spread target value \"dc1\" already defined"), 4395 name: "No spread targets", 4396 }, 4397 { 4398 spread: &Spread{ 4399 Attribute: "${node.datacenter}", 4400 Weight: 50, 4401 SpreadTarget: []*SpreadTarget{ 4402 { 4403 Value: "dc1", 4404 Percent: 25, 4405 }, 4406 { 4407 Value: "dc2", 4408 Percent: 50, 4409 }, 4410 }, 4411 }, 4412 err: nil, 4413 name: "Valid spread", 4414 }, 4415 } 4416 4417 for _, tc := range testCases { 4418 t.Run(tc.name, func(t *testing.T) { 4419 err := tc.spread.Validate() 4420 if tc.err != nil { 4421 require.NotNil(t, err) 4422 require.Contains(t, err.Error(), tc.err.Error()) 4423 } else { 4424 require.Nil(t, err) 4425 } 4426 }) 4427 } 4428 } 4429 4430 func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { 4431 require := require.New(t) 4432 cases := []struct { 4433 Input string 4434 Parsed []uint64 4435 Err bool 4436 }{ 4437 { 4438 "1,2,3", 4439 []uint64{1, 2, 3}, 4440 false, 4441 }, 4442 { 4443 "3,1,2,1,2,3,1-3", 4444 []uint64{1, 2, 3}, 4445 false, 4446 }, 4447 { 4448 "3-1", 4449 nil, 4450 true, 4451 }, 4452 { 4453 "1-3,2-4", 4454 []uint64{1, 2, 3, 4}, 4455 false, 4456 }, 4457 { 4458 "1-3,4,5-5,6,7,8-10", 4459 []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 4460 false, 4461 }, 4462 } 4463 4464 for i, tc := range cases { 4465 r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input} 4466 out, err := r.ParseReservedHostPorts() 4467 if (err != nil) != tc.Err { 4468 t.Fatalf("test case %d: %v", i, err) 4469 continue 4470 } 4471 4472 require.Equal(out, tc.Parsed) 4473 } 4474 }