github.com/ThomasObenaus/nomad@v0.11.1/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 15 "github.com/kr/pretty" 16 "github.com/stretchr/testify/assert" 17 "github.com/stretchr/testify/require" 18 ) 19 20 func TestJob_Validate(t *testing.T) { 21 j := &Job{} 22 err := j.Validate() 23 mErr := err.(*multierror.Error) 24 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 25 t.Fatalf("err: %s", err) 26 } 27 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 28 t.Fatalf("err: %s", err) 29 } 30 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 31 t.Fatalf("err: %s", err) 32 } 33 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 34 t.Fatalf("err: %s", err) 35 } 36 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 37 t.Fatalf("err: %s", err) 38 } 39 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 40 t.Fatalf("err: %s", err) 41 } 42 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 43 t.Fatalf("err: %s", err) 44 } 45 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 46 t.Fatalf("err: %s", err) 47 } 48 49 j = &Job{ 50 Type: "invalid-job-type", 51 } 52 err = j.Validate() 53 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 54 t.Errorf("expected %s but found: %v", expected, err) 55 } 56 57 j = &Job{ 58 Type: JobTypeService, 59 Periodic: &PeriodicConfig{ 60 Enabled: true, 61 }, 62 } 63 err = j.Validate() 64 mErr = err.(*multierror.Error) 65 if !strings.Contains(mErr.Error(), "Periodic") { 66 t.Fatalf("err: %s", err) 67 } 68 69 j = &Job{ 70 Region: "global", 71 ID: uuid.Generate(), 72 Namespace: "test", 73 Name: "my-job", 74 Type: JobTypeService, 75 Priority: 50, 76 Datacenters: []string{"dc1"}, 77 TaskGroups: []*TaskGroup{ 78 { 79 Name: "web", 80 RestartPolicy: &RestartPolicy{ 81 Interval: 5 * time.Minute, 82 Delay: 10 * time.Second, 83 Attempts: 10, 84 }, 85 }, 86 { 87 Name: "web", 88 RestartPolicy: &RestartPolicy{ 89 Interval: 5 * time.Minute, 90 Delay: 10 * time.Second, 91 Attempts: 10, 92 }, 93 }, 94 { 95 RestartPolicy: &RestartPolicy{ 96 Interval: 5 * time.Minute, 97 Delay: 10 * time.Second, 98 Attempts: 10, 99 }, 100 }, 101 }, 102 } 103 err = j.Validate() 104 mErr = err.(*multierror.Error) 105 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 106 t.Fatalf("err: %s", err) 107 } 108 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 109 t.Fatalf("err: %s", err) 110 } 111 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 112 t.Fatalf("err: %s", err) 113 } 114 115 // test for empty datacenters 116 j = &Job{ 117 Datacenters: []string{""}, 118 } 119 err = j.Validate() 120 mErr = err.(*multierror.Error) 121 if !strings.Contains(mErr.Error(), "datacenter must be non-empty string") { 122 t.Fatalf("err: %s", err) 123 } 124 } 125 126 func TestJob_ValidateScaling(t *testing.T) { 127 require := require.New(t) 128 129 p := &ScalingPolicy{ 130 Policy: nil, // allowed to be nil 131 Min: 5, 132 Max: 5, 133 Enabled: true, 134 } 135 job := testJob() 136 job.TaskGroups[0].Scaling = p 137 job.TaskGroups[0].Count = 5 138 139 require.NoError(job.Validate()) 140 141 // min <= max 142 p.Max = 0 143 p.Min = 10 144 err := job.Validate() 145 require.Error(err) 146 mErr := err.(*multierror.Error) 147 require.Len(mErr.Errors, 1) 148 require.Contains(mErr.Errors[0].Error(), "maximum count must not be less than minimum count") 149 require.Contains(mErr.Errors[0].Error(), "task group count must not be less than minimum count in scaling policy") 150 require.Contains(mErr.Errors[0].Error(), "task group count must not be greater than maximum count in scaling policy") 151 152 // count <= max 153 p.Max = 0 154 p.Min = 5 155 job.TaskGroups[0].Count = 5 156 err = job.Validate() 157 require.Error(err) 158 mErr = err.(*multierror.Error) 159 require.Len(mErr.Errors, 1) 160 require.Contains(mErr.Errors[0].Error(), "maximum count must not be less than minimum count") 161 require.Contains(mErr.Errors[0].Error(), "task group count must not be greater than maximum count in scaling policy") 162 163 // min <= count 164 job.TaskGroups[0].Count = 0 165 p.Min = 5 166 p.Max = 5 167 err = job.Validate() 168 require.Error(err) 169 mErr = err.(*multierror.Error) 170 require.Len(mErr.Errors, 1) 171 require.Contains(mErr.Errors[0].Error(), "task group count must not be less than minimum count in scaling policy") 172 } 173 174 func TestJob_Warnings(t *testing.T) { 175 cases := []struct { 176 Name string 177 Job *Job 178 Expected []string 179 }{ 180 { 181 Name: "Higher counts for update stanza", 182 Expected: []string{"max parallel count is greater"}, 183 Job: &Job{ 184 Type: JobTypeService, 185 TaskGroups: []*TaskGroup{ 186 { 187 Name: "foo", 188 Count: 2, 189 Update: &UpdateStrategy{ 190 MaxParallel: 10, 191 }, 192 }, 193 }, 194 }, 195 }, 196 { 197 Name: "AutoPromote mixed TaskGroups", 198 Expected: []string{"auto_promote must be true for all groups"}, 199 Job: &Job{ 200 Type: JobTypeService, 201 TaskGroups: []*TaskGroup{ 202 { 203 Update: &UpdateStrategy{ 204 AutoPromote: true, 205 }, 206 }, 207 { 208 Update: &UpdateStrategy{ 209 AutoPromote: false, 210 }, 211 }, 212 }, 213 }, 214 }, 215 { 216 Name: "Template.VaultGrace Deprecated", 217 Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."}, 218 Job: &Job{ 219 Type: JobTypeService, 220 TaskGroups: []*TaskGroup{ 221 { 222 Tasks: []*Task{ 223 { 224 Templates: []*Template{ 225 { 226 VaultGrace: 1, 227 }, 228 }, 229 }, 230 }, 231 }, 232 }, 233 }, 234 }, 235 } 236 237 for _, c := range cases { 238 t.Run(c.Name, func(t *testing.T) { 239 warnings := c.Job.Warnings() 240 if warnings == nil { 241 if len(c.Expected) == 0 { 242 return 243 } else { 244 t.Fatal("Got no warnings when they were expected") 245 } 246 } 247 248 a := warnings.Error() 249 for _, e := range c.Expected { 250 if !strings.Contains(a, e) { 251 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 252 } 253 } 254 }) 255 } 256 } 257 258 func TestJob_SpecChanged(t *testing.T) { 259 // Get a base test job 260 base := testJob() 261 262 // Only modify the indexes/mutable state of the job 263 mutatedBase := base.Copy() 264 mutatedBase.Status = "foo" 265 mutatedBase.ModifyIndex = base.ModifyIndex + 100 266 267 // changed contains a spec change that should be detected 268 change := base.Copy() 269 change.Priority = 99 270 271 cases := []struct { 272 Name string 273 Original *Job 274 New *Job 275 Changed bool 276 }{ 277 { 278 Name: "Same job except mutable indexes", 279 Changed: false, 280 Original: base, 281 New: mutatedBase, 282 }, 283 { 284 Name: "Different", 285 Changed: true, 286 Original: base, 287 New: change, 288 }, 289 } 290 291 for _, c := range cases { 292 t.Run(c.Name, func(t *testing.T) { 293 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 294 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 295 } 296 }) 297 } 298 } 299 300 func testJob() *Job { 301 return &Job{ 302 Region: "global", 303 ID: uuid.Generate(), 304 Namespace: "test", 305 Name: "my-job", 306 Type: JobTypeService, 307 Priority: 50, 308 AllAtOnce: false, 309 Datacenters: []string{"dc1"}, 310 Constraints: []*Constraint{ 311 { 312 LTarget: "$attr.kernel.name", 313 RTarget: "linux", 314 Operand: "=", 315 }, 316 }, 317 Periodic: &PeriodicConfig{ 318 Enabled: false, 319 }, 320 TaskGroups: []*TaskGroup{ 321 { 322 Name: "web", 323 Count: 10, 324 EphemeralDisk: DefaultEphemeralDisk(), 325 RestartPolicy: &RestartPolicy{ 326 Mode: RestartPolicyModeFail, 327 Attempts: 3, 328 Interval: 10 * time.Minute, 329 Delay: 1 * time.Minute, 330 }, 331 ReschedulePolicy: &ReschedulePolicy{ 332 Interval: 5 * time.Minute, 333 Attempts: 10, 334 Delay: 5 * time.Second, 335 DelayFunction: "constant", 336 }, 337 Tasks: []*Task{ 338 { 339 Name: "web", 340 Driver: "exec", 341 Config: map[string]interface{}{ 342 "command": "/bin/date", 343 }, 344 Env: map[string]string{ 345 "FOO": "bar", 346 }, 347 Artifacts: []*TaskArtifact{ 348 { 349 GetterSource: "http://foo.com", 350 }, 351 }, 352 Services: []*Service{ 353 { 354 Name: "${TASK}-frontend", 355 PortLabel: "http", 356 }, 357 }, 358 Resources: &Resources{ 359 CPU: 500, 360 MemoryMB: 256, 361 Networks: []*NetworkResource{ 362 { 363 MBits: 50, 364 DynamicPorts: []Port{{Label: "http"}}, 365 }, 366 }, 367 }, 368 LogConfig: &LogConfig{ 369 MaxFiles: 10, 370 MaxFileSizeMB: 1, 371 }, 372 }, 373 }, 374 Meta: map[string]string{ 375 "elb_check_type": "http", 376 "elb_check_interval": "30s", 377 "elb_check_min": "3", 378 }, 379 }, 380 }, 381 Meta: map[string]string{ 382 "owner": "armon", 383 }, 384 } 385 } 386 387 func TestJob_Copy(t *testing.T) { 388 j := testJob() 389 c := j.Copy() 390 if !reflect.DeepEqual(j, c) { 391 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 392 } 393 } 394 395 func TestJob_IsPeriodic(t *testing.T) { 396 j := &Job{ 397 Type: JobTypeService, 398 Periodic: &PeriodicConfig{ 399 Enabled: true, 400 }, 401 } 402 if !j.IsPeriodic() { 403 t.Fatalf("IsPeriodic() returned false on periodic job") 404 } 405 406 j = &Job{ 407 Type: JobTypeService, 408 } 409 if j.IsPeriodic() { 410 t.Fatalf("IsPeriodic() returned true on non-periodic job") 411 } 412 } 413 414 func TestJob_IsPeriodicActive(t *testing.T) { 415 cases := []struct { 416 job *Job 417 active bool 418 }{ 419 { 420 job: &Job{ 421 Type: JobTypeService, 422 Periodic: &PeriodicConfig{ 423 Enabled: true, 424 }, 425 }, 426 active: true, 427 }, 428 { 429 job: &Job{ 430 Type: JobTypeService, 431 Periodic: &PeriodicConfig{ 432 Enabled: false, 433 }, 434 }, 435 active: false, 436 }, 437 { 438 job: &Job{ 439 Type: JobTypeService, 440 Periodic: &PeriodicConfig{ 441 Enabled: true, 442 }, 443 Stop: true, 444 }, 445 active: false, 446 }, 447 { 448 job: &Job{ 449 Type: JobTypeService, 450 Periodic: &PeriodicConfig{ 451 Enabled: false, 452 }, 453 ParameterizedJob: &ParameterizedJobConfig{}, 454 }, 455 active: false, 456 }, 457 } 458 459 for i, c := range cases { 460 if act := c.job.IsPeriodicActive(); act != c.active { 461 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 462 } 463 } 464 } 465 466 func TestJob_SystemJob_Validate(t *testing.T) { 467 j := testJob() 468 j.Type = JobTypeSystem 469 j.TaskGroups[0].ReschedulePolicy = nil 470 j.Canonicalize() 471 472 err := j.Validate() 473 if err == nil || !strings.Contains(err.Error(), "exceed") { 474 t.Fatalf("expect error due to count") 475 } 476 477 j.TaskGroups[0].Count = 0 478 if err := j.Validate(); err != nil { 479 t.Fatalf("unexpected err: %v", err) 480 } 481 482 j.TaskGroups[0].Count = 1 483 if err := j.Validate(); err != nil { 484 t.Fatalf("unexpected err: %v", err) 485 } 486 487 // Add affinities at job, task group and task level, that should fail validation 488 489 j.Affinities = []*Affinity{{ 490 Operand: "=", 491 LTarget: "${node.datacenter}", 492 RTarget: "dc1", 493 }} 494 j.TaskGroups[0].Affinities = []*Affinity{{ 495 Operand: "=", 496 LTarget: "${meta.rack}", 497 RTarget: "r1", 498 }} 499 j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{ 500 Operand: "=", 501 LTarget: "${meta.rack}", 502 RTarget: "r1", 503 }} 504 err = j.Validate() 505 require.NotNil(t, err) 506 require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") 507 508 // Add spread at job and task group level, that should fail validation 509 j.Spreads = []*Spread{{ 510 Attribute: "${node.datacenter}", 511 Weight: 100, 512 }} 513 j.TaskGroups[0].Spreads = []*Spread{{ 514 Attribute: "${node.datacenter}", 515 Weight: 100, 516 }} 517 518 err = j.Validate() 519 require.NotNil(t, err) 520 require.Contains(t, err.Error(), "System jobs may not have a spread stanza") 521 522 } 523 524 func TestJob_VaultPolicies(t *testing.T) { 525 j0 := &Job{} 526 e0 := make(map[string]map[string]*Vault, 0) 527 528 vj1 := &Vault{ 529 Policies: []string{ 530 "p1", 531 "p2", 532 }, 533 } 534 vj2 := &Vault{ 535 Policies: []string{ 536 "p3", 537 "p4", 538 }, 539 } 540 vj3 := &Vault{ 541 Policies: []string{ 542 "p5", 543 }, 544 } 545 j1 := &Job{ 546 TaskGroups: []*TaskGroup{ 547 { 548 Name: "foo", 549 Tasks: []*Task{ 550 { 551 Name: "t1", 552 }, 553 { 554 Name: "t2", 555 Vault: vj1, 556 }, 557 }, 558 }, 559 { 560 Name: "bar", 561 Tasks: []*Task{ 562 { 563 Name: "t3", 564 Vault: vj2, 565 }, 566 { 567 Name: "t4", 568 Vault: vj3, 569 }, 570 }, 571 }, 572 }, 573 } 574 575 e1 := map[string]map[string]*Vault{ 576 "foo": { 577 "t2": vj1, 578 }, 579 "bar": { 580 "t3": vj2, 581 "t4": vj3, 582 }, 583 } 584 585 cases := []struct { 586 Job *Job 587 Expected map[string]map[string]*Vault 588 }{ 589 { 590 Job: j0, 591 Expected: e0, 592 }, 593 { 594 Job: j1, 595 Expected: e1, 596 }, 597 } 598 599 for i, c := range cases { 600 got := c.Job.VaultPolicies() 601 if !reflect.DeepEqual(got, c.Expected) { 602 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 603 } 604 } 605 } 606 607 func TestJob_ConnectTasks(t *testing.T) { 608 t.Parallel() 609 r := require.New(t) 610 611 // todo(shoenig): this will need some updates when we support connect native 612 // tasks, which will have a different Kind format, probably. 613 614 j0 := &Job{ 615 TaskGroups: []*TaskGroup{{ 616 Name: "tg1", 617 Tasks: []*Task{{ 618 Name: "connect-proxy-task1", 619 Kind: "connect-proxy:task1", 620 }, { 621 Name: "task2", 622 Kind: "task2", 623 }, { 624 Name: "connect-proxy-task3", 625 Kind: "connect-proxy:task3", 626 }}, 627 }, { 628 Name: "tg2", 629 Tasks: []*Task{{ 630 Name: "task1", 631 Kind: "task1", 632 }, { 633 Name: "connect-proxy-task2", 634 Kind: "connect-proxy:task2", 635 }}, 636 }}, 637 } 638 639 connectTasks := j0.ConnectTasks() 640 641 exp := map[string][]string{ 642 "tg1": {"connect-proxy-task1", "connect-proxy-task3"}, 643 "tg2": {"connect-proxy-task2"}, 644 } 645 r.Equal(exp, connectTasks) 646 } 647 648 func TestJob_RequiredSignals(t *testing.T) { 649 j0 := &Job{} 650 e0 := make(map[string]map[string][]string, 0) 651 652 vj1 := &Vault{ 653 Policies: []string{"p1"}, 654 ChangeMode: VaultChangeModeNoop, 655 } 656 vj2 := &Vault{ 657 Policies: []string{"p1"}, 658 ChangeMode: VaultChangeModeSignal, 659 ChangeSignal: "SIGUSR1", 660 } 661 tj1 := &Template{ 662 SourcePath: "foo", 663 DestPath: "bar", 664 ChangeMode: TemplateChangeModeNoop, 665 } 666 tj2 := &Template{ 667 SourcePath: "foo", 668 DestPath: "bar", 669 ChangeMode: TemplateChangeModeSignal, 670 ChangeSignal: "SIGUSR2", 671 } 672 j1 := &Job{ 673 TaskGroups: []*TaskGroup{ 674 { 675 Name: "foo", 676 Tasks: []*Task{ 677 { 678 Name: "t1", 679 }, 680 { 681 Name: "t2", 682 Vault: vj2, 683 Templates: []*Template{tj2}, 684 }, 685 }, 686 }, 687 { 688 Name: "bar", 689 Tasks: []*Task{ 690 { 691 Name: "t3", 692 Vault: vj1, 693 Templates: []*Template{tj1}, 694 }, 695 { 696 Name: "t4", 697 Vault: vj2, 698 }, 699 }, 700 }, 701 }, 702 } 703 704 e1 := map[string]map[string][]string{ 705 "foo": { 706 "t2": {"SIGUSR1", "SIGUSR2"}, 707 }, 708 "bar": { 709 "t4": {"SIGUSR1"}, 710 }, 711 } 712 713 j2 := &Job{ 714 TaskGroups: []*TaskGroup{ 715 { 716 Name: "foo", 717 Tasks: []*Task{ 718 { 719 Name: "t1", 720 KillSignal: "SIGQUIT", 721 }, 722 }, 723 }, 724 }, 725 } 726 727 e2 := map[string]map[string][]string{ 728 "foo": { 729 "t1": {"SIGQUIT"}, 730 }, 731 } 732 733 cases := []struct { 734 Job *Job 735 Expected map[string]map[string][]string 736 }{ 737 { 738 Job: j0, 739 Expected: e0, 740 }, 741 { 742 Job: j1, 743 Expected: e1, 744 }, 745 { 746 Job: j2, 747 Expected: e2, 748 }, 749 } 750 751 for i, c := range cases { 752 got := c.Job.RequiredSignals() 753 if !reflect.DeepEqual(got, c.Expected) { 754 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 755 } 756 } 757 } 758 759 // test new Equal comparisons for components of Jobs 760 func TestJob_PartEqual(t *testing.T) { 761 ns := &Networks{} 762 require.True(t, ns.Equals(&Networks{})) 763 764 ns = &Networks{ 765 &NetworkResource{Device: "eth0"}, 766 } 767 require.True(t, ns.Equals(&Networks{ 768 &NetworkResource{Device: "eth0"}, 769 })) 770 771 ns = &Networks{ 772 &NetworkResource{Device: "eth0"}, 773 &NetworkResource{Device: "eth1"}, 774 &NetworkResource{Device: "eth2"}, 775 } 776 require.True(t, ns.Equals(&Networks{ 777 &NetworkResource{Device: "eth2"}, 778 &NetworkResource{Device: "eth0"}, 779 &NetworkResource{Device: "eth1"}, 780 })) 781 782 cs := &Constraints{ 783 &Constraint{"left0", "right0", "=", ""}, 784 &Constraint{"left1", "right1", "=", ""}, 785 &Constraint{"left2", "right2", "=", ""}, 786 } 787 require.True(t, cs.Equals(&Constraints{ 788 &Constraint{"left0", "right0", "=", ""}, 789 &Constraint{"left2", "right2", "=", ""}, 790 &Constraint{"left1", "right1", "=", ""}, 791 })) 792 793 as := &Affinities{ 794 &Affinity{"left0", "right0", "=", 0, ""}, 795 &Affinity{"left1", "right1", "=", 0, ""}, 796 &Affinity{"left2", "right2", "=", 0, ""}, 797 } 798 require.True(t, as.Equals(&Affinities{ 799 &Affinity{"left0", "right0", "=", 0, ""}, 800 &Affinity{"left2", "right2", "=", 0, ""}, 801 &Affinity{"left1", "right1", "=", 0, ""}, 802 })) 803 } 804 805 func TestTask_UsesConnect(t *testing.T) { 806 t.Parallel() 807 808 t.Run("normal task", func(t *testing.T) { 809 task := testJob().TaskGroups[0].Tasks[0] 810 usesConnect := task.UsesConnect() 811 require.False(t, usesConnect) 812 }) 813 814 t.Run("sidecar proxy", func(t *testing.T) { 815 task := &Task{ 816 Name: "connect-proxy-task1", 817 Kind: "connect-proxy:task1", 818 } 819 usesConnect := task.UsesConnect() 820 require.True(t, usesConnect) 821 }) 822 823 // todo(shoenig): add native case 824 } 825 826 func TestTaskGroup_UsesConnect(t *testing.T) { 827 t.Parallel() 828 829 try := func(t *testing.T, tg *TaskGroup, exp bool) { 830 result := tg.UsesConnect() 831 require.Equal(t, exp, result) 832 } 833 834 t.Run("tg uses native", func(t *testing.T) { 835 try(t, &TaskGroup{ 836 Services: []*Service{ 837 {Connect: nil}, 838 {Connect: &ConsulConnect{Native: true}}, 839 }, 840 }, true) 841 }) 842 843 t.Run("tg uses sidecar", func(t *testing.T) { 844 try(t, &TaskGroup{ 845 Services: []*Service{{ 846 Connect: &ConsulConnect{ 847 SidecarService: &ConsulSidecarService{ 848 Port: "9090", 849 }, 850 }, 851 }}, 852 }, true) 853 }) 854 855 t.Run("tg does not use connect", func(t *testing.T) { 856 try(t, &TaskGroup{ 857 Services: []*Service{ 858 {Connect: nil}, 859 }, 860 }, false) 861 }) 862 } 863 864 func TestTaskGroup_Validate(t *testing.T) { 865 j := testJob() 866 tg := &TaskGroup{ 867 Count: -1, 868 RestartPolicy: &RestartPolicy{ 869 Interval: 5 * time.Minute, 870 Delay: 10 * time.Second, 871 Attempts: 10, 872 Mode: RestartPolicyModeDelay, 873 }, 874 ReschedulePolicy: &ReschedulePolicy{ 875 Interval: 5 * time.Minute, 876 Attempts: 5, 877 Delay: 5 * time.Second, 878 }, 879 } 880 err := tg.Validate(j) 881 mErr := err.(*multierror.Error) 882 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 883 t.Fatalf("err: %s", err) 884 } 885 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 886 t.Fatalf("err: %s", err) 887 } 888 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 889 t.Fatalf("err: %s", err) 890 } 891 892 tg = &TaskGroup{ 893 Tasks: []*Task{ 894 { 895 Name: "task-a", 896 Resources: &Resources{ 897 Networks: []*NetworkResource{ 898 { 899 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 900 }, 901 }, 902 }, 903 }, 904 { 905 Name: "task-b", 906 Resources: &Resources{ 907 Networks: []*NetworkResource{ 908 { 909 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 910 }, 911 }, 912 }, 913 }, 914 }, 915 } 916 err = tg.Validate(&Job{}) 917 expected := `Static port 123 already reserved by task-a:foo` 918 if !strings.Contains(err.Error(), expected) { 919 t.Errorf("expected %s but found: %v", expected, err) 920 } 921 922 tg = &TaskGroup{ 923 Tasks: []*Task{ 924 { 925 Name: "task-a", 926 Resources: &Resources{ 927 Networks: []*NetworkResource{ 928 { 929 ReservedPorts: []Port{ 930 {Label: "foo", Value: 123}, 931 {Label: "bar", Value: 123}, 932 }, 933 }, 934 }, 935 }, 936 }, 937 }, 938 } 939 err = tg.Validate(&Job{}) 940 expected = `Static port 123 already reserved by task-a:foo` 941 if !strings.Contains(err.Error(), expected) { 942 t.Errorf("expected %s but found: %v", expected, err) 943 } 944 945 tg = &TaskGroup{ 946 Name: "web", 947 Count: 1, 948 Tasks: []*Task{ 949 {Name: "web", Leader: true}, 950 {Name: "web", Leader: true}, 951 {}, 952 }, 953 RestartPolicy: &RestartPolicy{ 954 Interval: 5 * time.Minute, 955 Delay: 10 * time.Second, 956 Attempts: 10, 957 Mode: RestartPolicyModeDelay, 958 }, 959 ReschedulePolicy: &ReschedulePolicy{ 960 Interval: 5 * time.Minute, 961 Attempts: 10, 962 Delay: 5 * time.Second, 963 DelayFunction: "constant", 964 }, 965 } 966 967 err = tg.Validate(j) 968 mErr = err.(*multierror.Error) 969 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 970 t.Fatalf("err: %s", err) 971 } 972 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 973 t.Fatalf("err: %s", err) 974 } 975 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 976 t.Fatalf("err: %s", err) 977 } 978 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 979 t.Fatalf("err: %s", err) 980 } 981 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 982 t.Fatalf("err: %s", err) 983 } 984 985 tg = &TaskGroup{ 986 Name: "web", 987 Count: 1, 988 Tasks: []*Task{ 989 {Name: "web", Leader: true}, 990 }, 991 Update: DefaultUpdateStrategy.Copy(), 992 } 993 j.Type = JobTypeBatch 994 err = tg.Validate(j) 995 if !strings.Contains(err.Error(), "does not allow update block") { 996 t.Fatalf("err: %s", err) 997 } 998 999 tg = &TaskGroup{ 1000 Count: -1, 1001 RestartPolicy: &RestartPolicy{ 1002 Interval: 5 * time.Minute, 1003 Delay: 10 * time.Second, 1004 Attempts: 10, 1005 Mode: RestartPolicyModeDelay, 1006 }, 1007 ReschedulePolicy: &ReschedulePolicy{ 1008 Interval: 5 * time.Minute, 1009 Attempts: 5, 1010 Delay: 5 * time.Second, 1011 }, 1012 } 1013 j.Type = JobTypeSystem 1014 err = tg.Validate(j) 1015 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 1016 t.Fatalf("err: %s", err) 1017 } 1018 1019 tg = &TaskGroup{ 1020 Networks: []*NetworkResource{ 1021 { 1022 DynamicPorts: []Port{{"http", 0, 80}}, 1023 }, 1024 }, 1025 Tasks: []*Task{ 1026 { 1027 Resources: &Resources{ 1028 Networks: []*NetworkResource{ 1029 { 1030 DynamicPorts: []Port{{"http", 0, 80}}, 1031 }, 1032 }, 1033 }, 1034 }, 1035 }, 1036 } 1037 err = tg.Validate(j) 1038 require.Contains(t, err.Error(), "Port label http already in use") 1039 require.Contains(t, err.Error(), "Port mapped to 80 already in use") 1040 1041 tg = &TaskGroup{ 1042 Volumes: map[string]*VolumeRequest{ 1043 "foo": { 1044 Type: "nothost", 1045 Source: "foo", 1046 }, 1047 }, 1048 Tasks: []*Task{ 1049 { 1050 Name: "task-a", 1051 Resources: &Resources{}, 1052 }, 1053 }, 1054 } 1055 err = tg.Validate(&Job{}) 1056 require.Contains(t, err.Error(), `Volume foo has unrecognised type nothost`) 1057 1058 tg = &TaskGroup{ 1059 Volumes: map[string]*VolumeRequest{ 1060 "foo": { 1061 Type: "host", 1062 }, 1063 }, 1064 Tasks: []*Task{ 1065 { 1066 Name: "task-a", 1067 Resources: &Resources{}, 1068 }, 1069 }, 1070 } 1071 err = tg.Validate(&Job{}) 1072 require.Contains(t, err.Error(), `Volume foo has an empty source`) 1073 1074 tg = &TaskGroup{ 1075 Volumes: map[string]*VolumeRequest{ 1076 "foo": { 1077 Type: "host", 1078 }, 1079 }, 1080 Tasks: []*Task{ 1081 { 1082 Name: "task-a", 1083 Resources: &Resources{}, 1084 VolumeMounts: []*VolumeMount{ 1085 { 1086 Volume: "", 1087 }, 1088 }, 1089 }, 1090 { 1091 Name: "task-b", 1092 Resources: &Resources{}, 1093 VolumeMounts: []*VolumeMount{ 1094 { 1095 Volume: "foob", 1096 }, 1097 }, 1098 }, 1099 }, 1100 } 1101 err = tg.Validate(&Job{}) 1102 expected = `Task task-a has a volume mount (0) referencing an empty volume` 1103 require.Contains(t, err.Error(), expected) 1104 1105 expected = `Task task-b has a volume mount (0) referencing undefined volume foob` 1106 require.Contains(t, err.Error(), expected) 1107 1108 taskA := &Task{Name: "task-a"} 1109 tg = &TaskGroup{ 1110 Name: "group-a", 1111 Services: []*Service{ 1112 { 1113 Name: "service-a", 1114 Checks: []*ServiceCheck{ 1115 { 1116 Name: "check-a", 1117 Type: "tcp", 1118 TaskName: "task-b", 1119 PortLabel: "http", 1120 Interval: time.Duration(1 * time.Second), 1121 Timeout: time.Duration(1 * time.Second), 1122 }, 1123 }, 1124 }, 1125 }, 1126 Tasks: []*Task{taskA}, 1127 } 1128 err = tg.Validate(&Job{}) 1129 expected = `Check check-a invalid: refers to non-existent task task-b` 1130 require.Contains(t, err.Error(), expected) 1131 1132 expected = `Check check-a invalid: only script and gRPC checks should have tasks` 1133 require.Contains(t, err.Error(), expected) 1134 1135 } 1136 1137 func TestTask_Validate(t *testing.T) { 1138 task := &Task{} 1139 ephemeralDisk := DefaultEphemeralDisk() 1140 err := task.Validate(ephemeralDisk, JobTypeBatch, nil) 1141 mErr := err.(*multierror.Error) 1142 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 1143 t.Fatalf("err: %s", err) 1144 } 1145 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 1146 t.Fatalf("err: %s", err) 1147 } 1148 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 1149 t.Fatalf("err: %s", err) 1150 } 1151 1152 task = &Task{Name: "web/foo"} 1153 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1154 mErr = err.(*multierror.Error) 1155 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 1156 t.Fatalf("err: %s", err) 1157 } 1158 1159 task = &Task{ 1160 Name: "web", 1161 Driver: "docker", 1162 Resources: &Resources{ 1163 CPU: 100, 1164 MemoryMB: 100, 1165 }, 1166 LogConfig: DefaultLogConfig(), 1167 } 1168 ephemeralDisk.SizeMB = 200 1169 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1170 if err != nil { 1171 t.Fatalf("err: %s", err) 1172 } 1173 1174 task.Constraints = append(task.Constraints, 1175 &Constraint{ 1176 Operand: ConstraintDistinctHosts, 1177 }, 1178 &Constraint{ 1179 Operand: ConstraintDistinctProperty, 1180 LTarget: "${meta.rack}", 1181 }) 1182 1183 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1184 mErr = err.(*multierror.Error) 1185 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 1186 t.Fatalf("err: %s", err) 1187 } 1188 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 1189 t.Fatalf("err: %s", err) 1190 } 1191 } 1192 1193 func TestTask_Validate_Services(t *testing.T) { 1194 s1 := &Service{ 1195 Name: "service-name", 1196 PortLabel: "bar", 1197 Checks: []*ServiceCheck{ 1198 { 1199 Name: "check-name", 1200 Type: ServiceCheckTCP, 1201 Interval: 0 * time.Second, 1202 }, 1203 { 1204 Name: "check-name", 1205 Type: ServiceCheckTCP, 1206 Timeout: 2 * time.Second, 1207 }, 1208 { 1209 Name: "check-name", 1210 Type: ServiceCheckTCP, 1211 Interval: 1 * time.Second, 1212 }, 1213 }, 1214 } 1215 1216 s2 := &Service{ 1217 Name: "service-name", 1218 PortLabel: "bar", 1219 } 1220 1221 s3 := &Service{ 1222 Name: "service-A", 1223 PortLabel: "a", 1224 } 1225 s4 := &Service{ 1226 Name: "service-A", 1227 PortLabel: "b", 1228 } 1229 1230 ephemeralDisk := DefaultEphemeralDisk() 1231 ephemeralDisk.SizeMB = 200 1232 task := &Task{ 1233 Name: "web", 1234 Driver: "docker", 1235 Resources: &Resources{ 1236 CPU: 100, 1237 MemoryMB: 100, 1238 }, 1239 Services: []*Service{s1, s2}, 1240 } 1241 1242 task1 := &Task{ 1243 Name: "web", 1244 Driver: "docker", 1245 Resources: DefaultResources(), 1246 Services: []*Service{s3, s4}, 1247 LogConfig: DefaultLogConfig(), 1248 } 1249 task1.Resources.Networks = []*NetworkResource{ 1250 { 1251 MBits: 10, 1252 DynamicPorts: []Port{ 1253 { 1254 Label: "a", 1255 Value: 1000, 1256 }, 1257 { 1258 Label: "b", 1259 Value: 2000, 1260 }, 1261 }, 1262 }, 1263 } 1264 1265 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1266 if err == nil { 1267 t.Fatal("expected an error") 1268 } 1269 1270 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 1271 t.Fatalf("err: %v", err) 1272 } 1273 1274 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 1275 t.Fatalf("err: %v", err) 1276 } 1277 1278 if !strings.Contains(err.Error(), "missing required value interval") { 1279 t.Fatalf("err: %v", err) 1280 } 1281 1282 if !strings.Contains(err.Error(), "cannot be less than") { 1283 t.Fatalf("err: %v", err) 1284 } 1285 1286 if err = task1.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1287 t.Fatalf("err : %v", err) 1288 } 1289 } 1290 1291 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1292 ephemeralDisk := DefaultEphemeralDisk() 1293 getTask := func(s *Service) *Task { 1294 task := &Task{ 1295 Name: "web", 1296 Driver: "docker", 1297 Resources: DefaultResources(), 1298 Services: []*Service{s}, 1299 LogConfig: DefaultLogConfig(), 1300 } 1301 task.Resources.Networks = []*NetworkResource{ 1302 { 1303 MBits: 10, 1304 DynamicPorts: []Port{ 1305 { 1306 Label: "http", 1307 Value: 80, 1308 }, 1309 }, 1310 }, 1311 } 1312 return task 1313 } 1314 1315 cases := []*Service{ 1316 { 1317 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1318 Name: "DriverModeWithLabel", 1319 PortLabel: "http", 1320 AddressMode: AddressModeDriver, 1321 }, 1322 { 1323 Name: "DriverModeWithPort", 1324 PortLabel: "80", 1325 AddressMode: AddressModeDriver, 1326 }, 1327 { 1328 Name: "HostModeWithLabel", 1329 PortLabel: "http", 1330 AddressMode: AddressModeHost, 1331 }, 1332 { 1333 Name: "HostModeWithoutLabel", 1334 AddressMode: AddressModeHost, 1335 }, 1336 { 1337 Name: "DriverModeWithoutLabel", 1338 AddressMode: AddressModeDriver, 1339 }, 1340 } 1341 1342 for _, service := range cases { 1343 task := getTask(service) 1344 t.Run(service.Name, func(t *testing.T) { 1345 if err := task.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1346 t.Fatalf("unexpected err: %v", err) 1347 } 1348 }) 1349 } 1350 } 1351 1352 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1353 ephemeralDisk := DefaultEphemeralDisk() 1354 getTask := func(s *Service) *Task { 1355 task := &Task{ 1356 Name: "web", 1357 Driver: "docker", 1358 Resources: DefaultResources(), 1359 Services: []*Service{s}, 1360 LogConfig: DefaultLogConfig(), 1361 } 1362 task.Resources.Networks = []*NetworkResource{ 1363 { 1364 MBits: 10, 1365 DynamicPorts: []Port{ 1366 { 1367 Label: "http", 1368 Value: 80, 1369 }, 1370 }, 1371 }, 1372 } 1373 return task 1374 } 1375 1376 cases := []*Service{ 1377 { 1378 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1379 Name: "DriverModeWithLabel", 1380 PortLabel: "asdf", 1381 AddressMode: AddressModeDriver, 1382 }, 1383 { 1384 Name: "HostModeWithLabel", 1385 PortLabel: "asdf", 1386 AddressMode: AddressModeHost, 1387 }, 1388 { 1389 Name: "HostModeWithPort", 1390 PortLabel: "80", 1391 AddressMode: AddressModeHost, 1392 }, 1393 } 1394 1395 for _, service := range cases { 1396 task := getTask(service) 1397 t.Run(service.Name, func(t *testing.T) { 1398 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1399 if err == nil { 1400 t.Fatalf("expected an error") 1401 } 1402 //t.Logf("err: %v", err) 1403 }) 1404 } 1405 } 1406 1407 func TestTask_Validate_Service_Check(t *testing.T) { 1408 1409 invalidCheck := ServiceCheck{ 1410 Name: "check-name", 1411 Command: "/bin/true", 1412 Type: ServiceCheckScript, 1413 Interval: 10 * time.Second, 1414 } 1415 1416 err := invalidCheck.validate() 1417 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1418 t.Fatalf("expected a timeout validation error but received: %q", err) 1419 } 1420 1421 check1 := ServiceCheck{ 1422 Name: "check-name", 1423 Type: ServiceCheckTCP, 1424 Interval: 10 * time.Second, 1425 Timeout: 2 * time.Second, 1426 } 1427 1428 if err := check1.validate(); err != nil { 1429 t.Fatalf("err: %v", err) 1430 } 1431 1432 check1.InitialStatus = "foo" 1433 err = check1.validate() 1434 if err == nil { 1435 t.Fatal("Expected an error") 1436 } 1437 1438 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1439 t.Fatalf("err: %v", err) 1440 } 1441 1442 check1.InitialStatus = api.HealthCritical 1443 err = check1.validate() 1444 if err != nil { 1445 t.Fatalf("err: %v", err) 1446 } 1447 1448 check1.InitialStatus = api.HealthPassing 1449 err = check1.validate() 1450 if err != nil { 1451 t.Fatalf("err: %v", err) 1452 } 1453 1454 check1.InitialStatus = "" 1455 err = check1.validate() 1456 if err != nil { 1457 t.Fatalf("err: %v", err) 1458 } 1459 1460 check2 := ServiceCheck{ 1461 Name: "check-name-2", 1462 Type: ServiceCheckHTTP, 1463 Interval: 10 * time.Second, 1464 Timeout: 2 * time.Second, 1465 Path: "/foo/bar", 1466 } 1467 1468 err = check2.validate() 1469 if err != nil { 1470 t.Fatalf("err: %v", err) 1471 } 1472 1473 check2.Path = "" 1474 err = check2.validate() 1475 if err == nil { 1476 t.Fatal("Expected an error") 1477 } 1478 if !strings.Contains(err.Error(), "valid http path") { 1479 t.Fatalf("err: %v", err) 1480 } 1481 1482 check2.Path = "http://www.example.com" 1483 err = check2.validate() 1484 if err == nil { 1485 t.Fatal("Expected an error") 1486 } 1487 if !strings.Contains(err.Error(), "relative http path") { 1488 t.Fatalf("err: %v", err) 1489 } 1490 1491 t.Run("check expose", func(t *testing.T) { 1492 t.Run("type http", func(t *testing.T) { 1493 require.NoError(t, (&ServiceCheck{ 1494 Type: ServiceCheckHTTP, 1495 Interval: 1 * time.Second, 1496 Timeout: 1 * time.Second, 1497 Path: "/health", 1498 Expose: true, 1499 }).validate()) 1500 }) 1501 t.Run("type tcp", func(t *testing.T) { 1502 require.EqualError(t, (&ServiceCheck{ 1503 Type: ServiceCheckTCP, 1504 Interval: 1 * time.Second, 1505 Timeout: 1 * time.Second, 1506 Expose: true, 1507 }).validate(), "expose may only be set on HTTP or gRPC checks") 1508 }) 1509 }) 1510 } 1511 1512 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1513 // inherit address mode but do inherit ports. 1514 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1515 getTask := func(s *Service) *Task { 1516 return &Task{ 1517 Resources: &Resources{ 1518 Networks: []*NetworkResource{ 1519 { 1520 DynamicPorts: []Port{ 1521 { 1522 Label: "http", 1523 Value: 9999, 1524 }, 1525 }, 1526 }, 1527 }, 1528 }, 1529 Services: []*Service{s}, 1530 } 1531 } 1532 1533 cases := []struct { 1534 Service *Service 1535 ErrContains string 1536 }{ 1537 { 1538 Service: &Service{ 1539 Name: "invalid-driver", 1540 PortLabel: "80", 1541 AddressMode: "host", 1542 }, 1543 ErrContains: `port label "80" referenced`, 1544 }, 1545 { 1546 Service: &Service{ 1547 Name: "http-driver-fail-1", 1548 PortLabel: "80", 1549 AddressMode: "driver", 1550 Checks: []*ServiceCheck{ 1551 { 1552 Name: "invalid-check-1", 1553 Type: "tcp", 1554 Interval: time.Second, 1555 Timeout: time.Second, 1556 }, 1557 }, 1558 }, 1559 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1560 }, 1561 { 1562 Service: &Service{ 1563 Name: "http-driver-fail-2", 1564 PortLabel: "80", 1565 AddressMode: "driver", 1566 Checks: []*ServiceCheck{ 1567 { 1568 Name: "invalid-check-2", 1569 Type: "tcp", 1570 PortLabel: "80", 1571 Interval: time.Second, 1572 Timeout: time.Second, 1573 }, 1574 }, 1575 }, 1576 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1577 }, 1578 { 1579 Service: &Service{ 1580 Name: "http-driver-fail-3", 1581 PortLabel: "80", 1582 AddressMode: "driver", 1583 Checks: []*ServiceCheck{ 1584 { 1585 Name: "invalid-check-3", 1586 Type: "tcp", 1587 PortLabel: "missing-port-label", 1588 Interval: time.Second, 1589 Timeout: time.Second, 1590 }, 1591 }, 1592 }, 1593 ErrContains: `port label "missing-port-label" referenced`, 1594 }, 1595 { 1596 Service: &Service{ 1597 Name: "http-driver-passes", 1598 PortLabel: "80", 1599 AddressMode: "driver", 1600 Checks: []*ServiceCheck{ 1601 { 1602 Name: "valid-script-check", 1603 Type: "script", 1604 Command: "ok", 1605 Interval: time.Second, 1606 Timeout: time.Second, 1607 }, 1608 { 1609 Name: "valid-host-check", 1610 Type: "tcp", 1611 PortLabel: "http", 1612 Interval: time.Second, 1613 Timeout: time.Second, 1614 }, 1615 { 1616 Name: "valid-driver-check", 1617 Type: "tcp", 1618 AddressMode: "driver", 1619 Interval: time.Second, 1620 Timeout: time.Second, 1621 }, 1622 }, 1623 }, 1624 }, 1625 { 1626 Service: &Service{ 1627 Name: "empty-address-3673-passes-1", 1628 Checks: []*ServiceCheck{ 1629 { 1630 Name: "valid-port-label", 1631 Type: "tcp", 1632 PortLabel: "http", 1633 Interval: time.Second, 1634 Timeout: time.Second, 1635 }, 1636 { 1637 Name: "empty-is-ok", 1638 Type: "script", 1639 Command: "ok", 1640 Interval: time.Second, 1641 Timeout: time.Second, 1642 }, 1643 }, 1644 }, 1645 }, 1646 { 1647 Service: &Service{ 1648 Name: "empty-address-3673-passes-2", 1649 }, 1650 }, 1651 { 1652 Service: &Service{ 1653 Name: "empty-address-3673-fails", 1654 Checks: []*ServiceCheck{ 1655 { 1656 Name: "empty-is-not-ok", 1657 Type: "tcp", 1658 Interval: time.Second, 1659 Timeout: time.Second, 1660 }, 1661 }, 1662 }, 1663 ErrContains: `invalid: check requires a port but neither check nor service`, 1664 }, 1665 } 1666 1667 for _, tc := range cases { 1668 tc := tc 1669 task := getTask(tc.Service) 1670 t.Run(tc.Service.Name, func(t *testing.T) { 1671 err := validateServices(task) 1672 if err == nil && tc.ErrContains == "" { 1673 // Ok! 1674 return 1675 } 1676 if err == nil { 1677 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1678 } 1679 if !strings.Contains(err.Error(), tc.ErrContains) { 1680 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1681 } 1682 }) 1683 } 1684 } 1685 1686 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 1687 t.Parallel() 1688 // Bad (no port) 1689 invalidGRPC := &ServiceCheck{ 1690 Type: ServiceCheckGRPC, 1691 Interval: time.Second, 1692 Timeout: time.Second, 1693 } 1694 service := &Service{ 1695 Name: "test", 1696 Checks: []*ServiceCheck{invalidGRPC}, 1697 } 1698 1699 assert.Error(t, service.Validate()) 1700 1701 // Good 1702 service.Checks[0] = &ServiceCheck{ 1703 Type: ServiceCheckGRPC, 1704 Interval: time.Second, 1705 Timeout: time.Second, 1706 PortLabel: "some-port-label", 1707 } 1708 1709 assert.NoError(t, service.Validate()) 1710 } 1711 1712 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1713 t.Parallel() 1714 invalidCheckRestart := &CheckRestart{ 1715 Limit: -1, 1716 Grace: -1, 1717 } 1718 1719 err := invalidCheckRestart.Validate() 1720 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1721 assert.Len(t, err.(*multierror.Error).Errors, 2) 1722 1723 validCheckRestart := &CheckRestart{} 1724 assert.Nil(t, validCheckRestart.Validate()) 1725 1726 validCheckRestart.Limit = 1 1727 validCheckRestart.Grace = 1 1728 assert.Nil(t, validCheckRestart.Validate()) 1729 } 1730 1731 func TestTask_Validate_ConnectProxyKind(t *testing.T) { 1732 ephemeralDisk := DefaultEphemeralDisk() 1733 getTask := func(kind TaskKind, leader bool) *Task { 1734 task := &Task{ 1735 Name: "web", 1736 Driver: "docker", 1737 Resources: DefaultResources(), 1738 LogConfig: DefaultLogConfig(), 1739 Kind: kind, 1740 Leader: leader, 1741 } 1742 task.Resources.Networks = []*NetworkResource{ 1743 { 1744 MBits: 10, 1745 DynamicPorts: []Port{ 1746 { 1747 Label: "http", 1748 Value: 80, 1749 }, 1750 }, 1751 }, 1752 } 1753 return task 1754 } 1755 1756 cases := []struct { 1757 Desc string 1758 Kind TaskKind 1759 Leader bool 1760 Service *Service 1761 TgService []*Service 1762 ErrContains string 1763 }{ 1764 { 1765 Desc: "Not connect", 1766 Kind: "test", 1767 }, 1768 { 1769 Desc: "Invalid because of service in task definition", 1770 Kind: "connect-proxy:redis", 1771 Service: &Service{ 1772 Name: "redis", 1773 }, 1774 ErrContains: "Connect proxy task must not have a service stanza", 1775 }, 1776 { 1777 Desc: "Leader should not be set", 1778 Kind: "connect-proxy:redis", 1779 Leader: true, 1780 Service: &Service{ 1781 Name: "redis", 1782 }, 1783 ErrContains: "Connect proxy task must not have leader set", 1784 }, 1785 { 1786 Desc: "Service name invalid", 1787 Kind: "connect-proxy:redis:test", 1788 Service: &Service{ 1789 Name: "redis", 1790 }, 1791 ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`, 1792 }, 1793 { 1794 Desc: "Service name not found in group", 1795 Kind: "connect-proxy:redis", 1796 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1797 }, 1798 { 1799 Desc: "Connect stanza not configured in group", 1800 Kind: "connect-proxy:redis", 1801 TgService: []*Service{{ 1802 Name: "redis", 1803 }}, 1804 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1805 }, 1806 { 1807 Desc: "Valid connect proxy kind", 1808 Kind: "connect-proxy:redis", 1809 TgService: []*Service{{ 1810 Name: "redis", 1811 Connect: &ConsulConnect{ 1812 SidecarService: &ConsulSidecarService{ 1813 Port: "db", 1814 }, 1815 }, 1816 }}, 1817 }, 1818 } 1819 1820 for _, tc := range cases { 1821 tc := tc 1822 task := getTask(tc.Kind, tc.Leader) 1823 if tc.Service != nil { 1824 task.Services = []*Service{tc.Service} 1825 } 1826 t.Run(tc.Desc, func(t *testing.T) { 1827 err := task.Validate(ephemeralDisk, "service", tc.TgService) 1828 if err == nil && tc.ErrContains == "" { 1829 // Ok! 1830 return 1831 } 1832 require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains) 1833 require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err) 1834 }) 1835 } 1836 1837 } 1838 func TestTask_Validate_LogConfig(t *testing.T) { 1839 task := &Task{ 1840 LogConfig: DefaultLogConfig(), 1841 } 1842 ephemeralDisk := &EphemeralDisk{ 1843 SizeMB: 1, 1844 } 1845 1846 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1847 mErr := err.(*multierror.Error) 1848 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1849 t.Fatalf("err: %s", err) 1850 } 1851 } 1852 1853 func TestTask_Validate_CSIPluginConfig(t *testing.T) { 1854 table := []struct { 1855 name string 1856 pc *TaskCSIPluginConfig 1857 expectedErr string 1858 }{ 1859 { 1860 name: "no errors when not specified", 1861 pc: nil, 1862 }, 1863 { 1864 name: "requires non-empty plugin id", 1865 pc: &TaskCSIPluginConfig{}, 1866 expectedErr: "CSIPluginConfig must have a non-empty PluginID", 1867 }, 1868 { 1869 name: "requires valid plugin type", 1870 pc: &TaskCSIPluginConfig{ 1871 ID: "com.hashicorp.csi", 1872 Type: "nonsense", 1873 }, 1874 expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"", 1875 }, 1876 } 1877 1878 for _, tt := range table { 1879 t.Run(tt.name, func(t *testing.T) { 1880 task := &Task{ 1881 CSIPluginConfig: tt.pc, 1882 } 1883 ephemeralDisk := &EphemeralDisk{ 1884 SizeMB: 1, 1885 } 1886 1887 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1888 mErr := err.(*multierror.Error) 1889 if tt.expectedErr != "" { 1890 if !strings.Contains(mErr.Errors[4].Error(), tt.expectedErr) { 1891 t.Fatalf("err: %s", err) 1892 } 1893 } else { 1894 if len(mErr.Errors) != 4 { 1895 t.Fatalf("unexpected err: %s", mErr.Errors[4]) 1896 } 1897 } 1898 }) 1899 } 1900 } 1901 1902 func TestTask_Validate_Template(t *testing.T) { 1903 1904 bad := &Template{} 1905 task := &Task{ 1906 Templates: []*Template{bad}, 1907 } 1908 ephemeralDisk := &EphemeralDisk{ 1909 SizeMB: 1, 1910 } 1911 1912 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1913 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1914 t.Fatalf("err: %s", err) 1915 } 1916 1917 // Have two templates that share the same destination 1918 good := &Template{ 1919 SourcePath: "foo", 1920 DestPath: "local/foo", 1921 ChangeMode: "noop", 1922 } 1923 1924 task.Templates = []*Template{good, good} 1925 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1926 if !strings.Contains(err.Error(), "same destination as") { 1927 t.Fatalf("err: %s", err) 1928 } 1929 1930 // Env templates can't use signals 1931 task.Templates = []*Template{ 1932 { 1933 Envvars: true, 1934 ChangeMode: "signal", 1935 }, 1936 } 1937 1938 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1939 if err == nil { 1940 t.Fatalf("expected error from Template.Validate") 1941 } 1942 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1943 t.Errorf("expected to find %q but found %v", expected, err) 1944 } 1945 } 1946 1947 func TestTemplate_Validate(t *testing.T) { 1948 cases := []struct { 1949 Tmpl *Template 1950 Fail bool 1951 ContainsErrs []string 1952 }{ 1953 { 1954 Tmpl: &Template{}, 1955 Fail: true, 1956 ContainsErrs: []string{ 1957 "specify a source path", 1958 "specify a destination", 1959 TemplateChangeModeInvalidError.Error(), 1960 }, 1961 }, 1962 { 1963 Tmpl: &Template{ 1964 Splay: -100, 1965 }, 1966 Fail: true, 1967 ContainsErrs: []string{ 1968 "positive splay", 1969 }, 1970 }, 1971 { 1972 Tmpl: &Template{ 1973 ChangeMode: "foo", 1974 }, 1975 Fail: true, 1976 ContainsErrs: []string{ 1977 TemplateChangeModeInvalidError.Error(), 1978 }, 1979 }, 1980 { 1981 Tmpl: &Template{ 1982 ChangeMode: "signal", 1983 }, 1984 Fail: true, 1985 ContainsErrs: []string{ 1986 "specify signal value", 1987 }, 1988 }, 1989 { 1990 Tmpl: &Template{ 1991 SourcePath: "foo", 1992 DestPath: "../../root", 1993 ChangeMode: "noop", 1994 }, 1995 Fail: true, 1996 ContainsErrs: []string{ 1997 "destination escapes", 1998 }, 1999 }, 2000 { 2001 Tmpl: &Template{ 2002 SourcePath: "foo", 2003 DestPath: "local/foo", 2004 ChangeMode: "noop", 2005 }, 2006 Fail: false, 2007 }, 2008 { 2009 Tmpl: &Template{ 2010 SourcePath: "foo", 2011 DestPath: "local/foo", 2012 ChangeMode: "noop", 2013 Perms: "0444", 2014 }, 2015 Fail: false, 2016 }, 2017 { 2018 Tmpl: &Template{ 2019 SourcePath: "foo", 2020 DestPath: "local/foo", 2021 ChangeMode: "noop", 2022 Perms: "zza", 2023 }, 2024 Fail: true, 2025 ContainsErrs: []string{ 2026 "as octal", 2027 }, 2028 }, 2029 } 2030 2031 for i, c := range cases { 2032 err := c.Tmpl.Validate() 2033 if err != nil { 2034 if !c.Fail { 2035 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 2036 } 2037 2038 e := err.Error() 2039 for _, exp := range c.ContainsErrs { 2040 if !strings.Contains(e, exp) { 2041 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 2042 } 2043 } 2044 } else if c.Fail { 2045 t.Fatalf("Case %d: should have failed: %v", i+1, err) 2046 } 2047 } 2048 } 2049 2050 func TestConstraint_Validate(t *testing.T) { 2051 c := &Constraint{} 2052 err := c.Validate() 2053 mErr := err.(*multierror.Error) 2054 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 2055 t.Fatalf("err: %s", err) 2056 } 2057 2058 c = &Constraint{ 2059 LTarget: "$attr.kernel.name", 2060 RTarget: "linux", 2061 Operand: "=", 2062 } 2063 err = c.Validate() 2064 require.NoError(t, err) 2065 2066 // Perform additional regexp validation 2067 c.Operand = ConstraintRegex 2068 c.RTarget = "(foo" 2069 err = c.Validate() 2070 mErr = err.(*multierror.Error) 2071 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 2072 t.Fatalf("err: %s", err) 2073 } 2074 2075 // Perform version validation 2076 c.Operand = ConstraintVersion 2077 c.RTarget = "~> foo" 2078 err = c.Validate() 2079 mErr = err.(*multierror.Error) 2080 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 2081 t.Fatalf("err: %s", err) 2082 } 2083 2084 // Perform semver validation 2085 c.Operand = ConstraintSemver 2086 err = c.Validate() 2087 require.Error(t, err) 2088 require.Contains(t, err.Error(), "Malformed constraint") 2089 2090 c.RTarget = ">= 0.6.1" 2091 require.NoError(t, c.Validate()) 2092 2093 // Perform distinct_property validation 2094 c.Operand = ConstraintDistinctProperty 2095 c.RTarget = "0" 2096 err = c.Validate() 2097 mErr = err.(*multierror.Error) 2098 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 2099 t.Fatalf("err: %s", err) 2100 } 2101 2102 c.RTarget = "-1" 2103 err = c.Validate() 2104 mErr = err.(*multierror.Error) 2105 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 2106 t.Fatalf("err: %s", err) 2107 } 2108 2109 // Perform distinct_hosts validation 2110 c.Operand = ConstraintDistinctHosts 2111 c.LTarget = "" 2112 c.RTarget = "" 2113 if err := c.Validate(); err != nil { 2114 t.Fatalf("expected valid constraint: %v", err) 2115 } 2116 2117 // Perform set_contains* validation 2118 c.RTarget = "" 2119 for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} { 2120 c.Operand = o 2121 err = c.Validate() 2122 mErr = err.(*multierror.Error) 2123 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 2124 t.Fatalf("err: %s", err) 2125 } 2126 } 2127 2128 // Perform LTarget validation 2129 c.Operand = ConstraintRegex 2130 c.RTarget = "foo" 2131 c.LTarget = "" 2132 err = c.Validate() 2133 mErr = err.(*multierror.Error) 2134 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 2135 t.Fatalf("err: %s", err) 2136 } 2137 2138 // Perform constraint type validation 2139 c.Operand = "foo" 2140 err = c.Validate() 2141 mErr = err.(*multierror.Error) 2142 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 2143 t.Fatalf("err: %s", err) 2144 } 2145 } 2146 2147 func TestAffinity_Validate(t *testing.T) { 2148 2149 type tc struct { 2150 affinity *Affinity 2151 err error 2152 name string 2153 } 2154 2155 testCases := []tc{ 2156 { 2157 affinity: &Affinity{}, 2158 err: fmt.Errorf("Missing affinity operand"), 2159 }, 2160 { 2161 affinity: &Affinity{ 2162 Operand: "foo", 2163 LTarget: "${meta.node_class}", 2164 Weight: 10, 2165 }, 2166 err: fmt.Errorf("Unknown affinity operator \"foo\""), 2167 }, 2168 { 2169 affinity: &Affinity{ 2170 Operand: "=", 2171 LTarget: "${meta.node_class}", 2172 Weight: 10, 2173 }, 2174 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2175 }, 2176 { 2177 affinity: &Affinity{ 2178 Operand: "=", 2179 LTarget: "${meta.node_class}", 2180 RTarget: "c4", 2181 Weight: 0, 2182 }, 2183 err: fmt.Errorf("Affinity weight cannot be zero"), 2184 }, 2185 { 2186 affinity: &Affinity{ 2187 Operand: "=", 2188 LTarget: "${meta.node_class}", 2189 RTarget: "c4", 2190 Weight: 110, 2191 }, 2192 err: fmt.Errorf("Affinity weight must be within the range [-100,100]"), 2193 }, 2194 { 2195 affinity: &Affinity{ 2196 Operand: "=", 2197 LTarget: "${node.class}", 2198 Weight: 10, 2199 }, 2200 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2201 }, 2202 { 2203 affinity: &Affinity{ 2204 Operand: "version", 2205 LTarget: "${meta.os}", 2206 RTarget: ">>2.0", 2207 Weight: 110, 2208 }, 2209 err: fmt.Errorf("Version affinity is invalid"), 2210 }, 2211 { 2212 affinity: &Affinity{ 2213 Operand: "regexp", 2214 LTarget: "${meta.os}", 2215 RTarget: "\\K2.0", 2216 Weight: 100, 2217 }, 2218 err: fmt.Errorf("Regular expression failed to compile"), 2219 }, 2220 } 2221 2222 for _, tc := range testCases { 2223 t.Run(tc.name, func(t *testing.T) { 2224 err := tc.affinity.Validate() 2225 if tc.err != nil { 2226 require.NotNil(t, err) 2227 require.Contains(t, err.Error(), tc.err.Error()) 2228 } else { 2229 require.Nil(t, err) 2230 } 2231 }) 2232 } 2233 } 2234 2235 func TestUpdateStrategy_Validate(t *testing.T) { 2236 u := &UpdateStrategy{ 2237 MaxParallel: -1, 2238 HealthCheck: "foo", 2239 MinHealthyTime: -10, 2240 HealthyDeadline: -15, 2241 ProgressDeadline: -25, 2242 AutoRevert: false, 2243 Canary: -1, 2244 } 2245 2246 err := u.Validate() 2247 mErr := err.(*multierror.Error) 2248 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 2249 t.Fatalf("err: %s", err) 2250 } 2251 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than zero") { 2252 t.Fatalf("err: %s", err) 2253 } 2254 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 2255 t.Fatalf("err: %s", err) 2256 } 2257 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 2258 t.Fatalf("err: %s", err) 2259 } 2260 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 2261 t.Fatalf("err: %s", err) 2262 } 2263 if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") { 2264 t.Fatalf("err: %s", err) 2265 } 2266 if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") { 2267 t.Fatalf("err: %s", err) 2268 } 2269 if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") { 2270 t.Fatalf("err: %s", err) 2271 } 2272 } 2273 2274 func TestResource_NetIndex(t *testing.T) { 2275 r := &Resources{ 2276 Networks: []*NetworkResource{ 2277 {Device: "eth0"}, 2278 {Device: "lo0"}, 2279 {Device: ""}, 2280 }, 2281 } 2282 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 2283 t.Fatalf("Bad: %d", idx) 2284 } 2285 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 2286 t.Fatalf("Bad: %d", idx) 2287 } 2288 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 2289 t.Fatalf("Bad: %d", idx) 2290 } 2291 } 2292 2293 func TestResource_Superset(t *testing.T) { 2294 r1 := &Resources{ 2295 CPU: 2000, 2296 MemoryMB: 2048, 2297 DiskMB: 10000, 2298 } 2299 r2 := &Resources{ 2300 CPU: 2000, 2301 MemoryMB: 1024, 2302 DiskMB: 5000, 2303 } 2304 2305 if s, _ := r1.Superset(r1); !s { 2306 t.Fatalf("bad") 2307 } 2308 if s, _ := r1.Superset(r2); !s { 2309 t.Fatalf("bad") 2310 } 2311 if s, _ := r2.Superset(r1); s { 2312 t.Fatalf("bad") 2313 } 2314 if s, _ := r2.Superset(r2); !s { 2315 t.Fatalf("bad") 2316 } 2317 } 2318 2319 func TestResource_Add(t *testing.T) { 2320 r1 := &Resources{ 2321 CPU: 2000, 2322 MemoryMB: 2048, 2323 DiskMB: 10000, 2324 Networks: []*NetworkResource{ 2325 { 2326 CIDR: "10.0.0.0/8", 2327 MBits: 100, 2328 ReservedPorts: []Port{{"ssh", 22, 0}}, 2329 }, 2330 }, 2331 } 2332 r2 := &Resources{ 2333 CPU: 2000, 2334 MemoryMB: 1024, 2335 DiskMB: 5000, 2336 Networks: []*NetworkResource{ 2337 { 2338 IP: "10.0.0.1", 2339 MBits: 50, 2340 ReservedPorts: []Port{{"web", 80, 0}}, 2341 }, 2342 }, 2343 } 2344 2345 err := r1.Add(r2) 2346 if err != nil { 2347 t.Fatalf("Err: %v", err) 2348 } 2349 2350 expect := &Resources{ 2351 CPU: 3000, 2352 MemoryMB: 3072, 2353 DiskMB: 15000, 2354 Networks: []*NetworkResource{ 2355 { 2356 CIDR: "10.0.0.0/8", 2357 MBits: 150, 2358 ReservedPorts: []Port{{"ssh", 22, 0}, {"web", 80, 0}}, 2359 }, 2360 }, 2361 } 2362 2363 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2364 t.Fatalf("bad: %#v %#v", expect, r1) 2365 } 2366 } 2367 2368 func TestResource_Add_Network(t *testing.T) { 2369 r1 := &Resources{} 2370 r2 := &Resources{ 2371 Networks: []*NetworkResource{ 2372 { 2373 MBits: 50, 2374 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}}, 2375 }, 2376 }, 2377 } 2378 r3 := &Resources{ 2379 Networks: []*NetworkResource{ 2380 { 2381 MBits: 25, 2382 DynamicPorts: []Port{{"admin", 0, 8080}}, 2383 }, 2384 }, 2385 } 2386 2387 err := r1.Add(r2) 2388 if err != nil { 2389 t.Fatalf("Err: %v", err) 2390 } 2391 err = r1.Add(r3) 2392 if err != nil { 2393 t.Fatalf("Err: %v", err) 2394 } 2395 2396 expect := &Resources{ 2397 Networks: []*NetworkResource{ 2398 { 2399 MBits: 75, 2400 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}}, 2401 }, 2402 }, 2403 } 2404 2405 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2406 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 2407 } 2408 } 2409 2410 func TestComparableResources_Subtract(t *testing.T) { 2411 r1 := &ComparableResources{ 2412 Flattened: AllocatedTaskResources{ 2413 Cpu: AllocatedCpuResources{ 2414 CpuShares: 2000, 2415 }, 2416 Memory: AllocatedMemoryResources{ 2417 MemoryMB: 2048, 2418 }, 2419 Networks: []*NetworkResource{ 2420 { 2421 CIDR: "10.0.0.0/8", 2422 MBits: 100, 2423 ReservedPorts: []Port{{"ssh", 22, 0}}, 2424 }, 2425 }, 2426 }, 2427 Shared: AllocatedSharedResources{ 2428 DiskMB: 10000, 2429 }, 2430 } 2431 2432 r2 := &ComparableResources{ 2433 Flattened: AllocatedTaskResources{ 2434 Cpu: AllocatedCpuResources{ 2435 CpuShares: 1000, 2436 }, 2437 Memory: AllocatedMemoryResources{ 2438 MemoryMB: 1024, 2439 }, 2440 Networks: []*NetworkResource{ 2441 { 2442 CIDR: "10.0.0.0/8", 2443 MBits: 20, 2444 ReservedPorts: []Port{{"ssh", 22, 0}}, 2445 }, 2446 }, 2447 }, 2448 Shared: AllocatedSharedResources{ 2449 DiskMB: 5000, 2450 }, 2451 } 2452 r1.Subtract(r2) 2453 2454 expect := &ComparableResources{ 2455 Flattened: AllocatedTaskResources{ 2456 Cpu: AllocatedCpuResources{ 2457 CpuShares: 1000, 2458 }, 2459 Memory: AllocatedMemoryResources{ 2460 MemoryMB: 1024, 2461 }, 2462 Networks: []*NetworkResource{ 2463 { 2464 CIDR: "10.0.0.0/8", 2465 MBits: 100, 2466 ReservedPorts: []Port{{"ssh", 22, 0}}, 2467 }, 2468 }, 2469 }, 2470 Shared: AllocatedSharedResources{ 2471 DiskMB: 5000, 2472 }, 2473 } 2474 2475 require := require.New(t) 2476 require.Equal(expect, r1) 2477 } 2478 2479 func TestEncodeDecode(t *testing.T) { 2480 type FooRequest struct { 2481 Foo string 2482 Bar int 2483 Baz bool 2484 } 2485 arg := &FooRequest{ 2486 Foo: "test", 2487 Bar: 42, 2488 Baz: true, 2489 } 2490 buf, err := Encode(1, arg) 2491 if err != nil { 2492 t.Fatalf("err: %v", err) 2493 } 2494 2495 var out FooRequest 2496 err = Decode(buf[1:], &out) 2497 if err != nil { 2498 t.Fatalf("err: %v", err) 2499 } 2500 2501 if !reflect.DeepEqual(arg, &out) { 2502 t.Fatalf("bad: %#v %#v", arg, out) 2503 } 2504 } 2505 2506 func BenchmarkEncodeDecode(b *testing.B) { 2507 job := testJob() 2508 2509 for i := 0; i < b.N; i++ { 2510 buf, err := Encode(1, job) 2511 if err != nil { 2512 b.Fatalf("err: %v", err) 2513 } 2514 2515 var out Job 2516 err = Decode(buf[1:], &out) 2517 if err != nil { 2518 b.Fatalf("err: %v", err) 2519 } 2520 } 2521 } 2522 2523 func TestInvalidServiceCheck(t *testing.T) { 2524 s := Service{ 2525 Name: "service-name", 2526 PortLabel: "bar", 2527 Checks: []*ServiceCheck{ 2528 { 2529 Name: "check-name", 2530 Type: "lol", 2531 }, 2532 }, 2533 } 2534 if err := s.Validate(); err == nil { 2535 t.Fatalf("Service should be invalid (invalid type)") 2536 } 2537 2538 s = Service{ 2539 Name: "service.name", 2540 PortLabel: "bar", 2541 } 2542 if err := s.ValidateName(s.Name); err == nil { 2543 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2544 } 2545 2546 s = Service{ 2547 Name: "-my-service", 2548 PortLabel: "bar", 2549 } 2550 if err := s.Validate(); err == nil { 2551 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2552 } 2553 2554 s = Service{ 2555 Name: "my-service-${NOMAD_META_FOO}", 2556 PortLabel: "bar", 2557 } 2558 if err := s.Validate(); err != nil { 2559 t.Fatalf("Service should be valid: %v", err) 2560 } 2561 2562 s = Service{ 2563 Name: "my_service-${NOMAD_META_FOO}", 2564 PortLabel: "bar", 2565 } 2566 if err := s.Validate(); err == nil { 2567 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2568 } 2569 2570 s = Service{ 2571 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2572 PortLabel: "bar", 2573 } 2574 if err := s.ValidateName(s.Name); err == nil { 2575 t.Fatalf("Service should be invalid (too long): %v", err) 2576 } 2577 2578 s = Service{ 2579 Name: "service-name", 2580 Checks: []*ServiceCheck{ 2581 { 2582 Name: "check-tcp", 2583 Type: ServiceCheckTCP, 2584 Interval: 5 * time.Second, 2585 Timeout: 2 * time.Second, 2586 }, 2587 { 2588 Name: "check-http", 2589 Type: ServiceCheckHTTP, 2590 Path: "/foo", 2591 Interval: 5 * time.Second, 2592 Timeout: 2 * time.Second, 2593 }, 2594 }, 2595 } 2596 if err := s.Validate(); err == nil { 2597 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2598 } 2599 2600 s = Service{ 2601 Name: "service-name", 2602 Checks: []*ServiceCheck{ 2603 { 2604 Name: "check-script", 2605 Type: ServiceCheckScript, 2606 Command: "/bin/date", 2607 Interval: 5 * time.Second, 2608 Timeout: 2 * time.Second, 2609 }, 2610 }, 2611 } 2612 if err := s.Validate(); err != nil { 2613 t.Fatalf("un-expected error: %v", err) 2614 } 2615 2616 s = Service{ 2617 Name: "service-name", 2618 Checks: []*ServiceCheck{ 2619 { 2620 Name: "tcp-check", 2621 Type: ServiceCheckTCP, 2622 Interval: 5 * time.Second, 2623 Timeout: 2 * time.Second, 2624 }, 2625 }, 2626 Connect: &ConsulConnect{ 2627 SidecarService: &ConsulSidecarService{}, 2628 }, 2629 } 2630 require.Error(t, s.Validate()) 2631 } 2632 2633 func TestDistinctCheckID(t *testing.T) { 2634 c1 := ServiceCheck{ 2635 Name: "web-health", 2636 Type: "http", 2637 Path: "/health", 2638 Interval: 2 * time.Second, 2639 Timeout: 3 * time.Second, 2640 } 2641 c2 := ServiceCheck{ 2642 Name: "web-health", 2643 Type: "http", 2644 Path: "/health1", 2645 Interval: 2 * time.Second, 2646 Timeout: 3 * time.Second, 2647 } 2648 2649 c3 := ServiceCheck{ 2650 Name: "web-health", 2651 Type: "http", 2652 Path: "/health", 2653 Interval: 4 * time.Second, 2654 Timeout: 3 * time.Second, 2655 } 2656 serviceID := "123" 2657 c1Hash := c1.Hash(serviceID) 2658 c2Hash := c2.Hash(serviceID) 2659 c3Hash := c3.Hash(serviceID) 2660 2661 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2662 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2663 } 2664 2665 } 2666 2667 func TestService_Canonicalize(t *testing.T) { 2668 job := "example" 2669 taskGroup := "cache" 2670 task := "redis" 2671 2672 s := Service{ 2673 Name: "${TASK}-db", 2674 } 2675 2676 s.Canonicalize(job, taskGroup, task) 2677 if s.Name != "redis-db" { 2678 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2679 } 2680 2681 s.Name = "db" 2682 s.Canonicalize(job, taskGroup, task) 2683 if s.Name != "db" { 2684 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2685 } 2686 2687 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2688 s.Canonicalize(job, taskGroup, task) 2689 if s.Name != "example-cache-redis-db" { 2690 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2691 } 2692 2693 s.Name = "${BASE}-db" 2694 s.Canonicalize(job, taskGroup, task) 2695 if s.Name != "example-cache-redis-db" { 2696 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2697 } 2698 2699 } 2700 2701 func TestService_Validate(t *testing.T) { 2702 s := Service{ 2703 Name: "testservice", 2704 } 2705 2706 s.Canonicalize("testjob", "testgroup", "testtask") 2707 2708 // Base service should be valid 2709 require.NoError(t, s.Validate()) 2710 2711 // Native Connect should be valid 2712 s.Connect = &ConsulConnect{ 2713 Native: true, 2714 } 2715 require.NoError(t, s.Validate()) 2716 2717 // Native Connect + Sidecar should be invalid 2718 s.Connect.SidecarService = &ConsulSidecarService{} 2719 require.Error(t, s.Validate()) 2720 } 2721 2722 func TestService_Equals(t *testing.T) { 2723 s := Service{ 2724 Name: "testservice", 2725 } 2726 2727 s.Canonicalize("testjob", "testgroup", "testtask") 2728 2729 o := s.Copy() 2730 2731 // Base service should be equal to copy of itself 2732 require.True(t, s.Equals(o)) 2733 2734 // create a helper to assert a diff and reset the struct 2735 assertDiff := func() { 2736 require.False(t, s.Equals(o)) 2737 o = s.Copy() 2738 require.True(t, s.Equals(o), "bug in copy") 2739 } 2740 2741 // Changing any field should cause inequality 2742 o.Name = "diff" 2743 assertDiff() 2744 2745 o.PortLabel = "diff" 2746 assertDiff() 2747 2748 o.AddressMode = AddressModeDriver 2749 assertDiff() 2750 2751 o.Tags = []string{"diff"} 2752 assertDiff() 2753 2754 o.CanaryTags = []string{"diff"} 2755 assertDiff() 2756 2757 o.Checks = []*ServiceCheck{{Name: "diff"}} 2758 assertDiff() 2759 2760 o.Connect = &ConsulConnect{Native: true} 2761 assertDiff() 2762 2763 o.EnableTagOverride = true 2764 assertDiff() 2765 } 2766 2767 func TestJob_ExpandServiceNames(t *testing.T) { 2768 j := &Job{ 2769 Name: "my-job", 2770 TaskGroups: []*TaskGroup{ 2771 { 2772 Name: "web", 2773 Tasks: []*Task{ 2774 { 2775 Name: "frontend", 2776 Services: []*Service{ 2777 { 2778 Name: "${BASE}-default", 2779 }, 2780 { 2781 Name: "jmx", 2782 }, 2783 }, 2784 }, 2785 }, 2786 }, 2787 { 2788 Name: "admin", 2789 Tasks: []*Task{ 2790 { 2791 Name: "admin-web", 2792 }, 2793 }, 2794 }, 2795 }, 2796 } 2797 2798 j.Canonicalize() 2799 2800 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 2801 if service1Name != "my-job-web-frontend-default" { 2802 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 2803 } 2804 2805 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 2806 if service2Name != "jmx" { 2807 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 2808 } 2809 2810 } 2811 2812 func TestJob_CombinedTaskMeta(t *testing.T) { 2813 j := &Job{ 2814 Meta: map[string]string{ 2815 "job_test": "job", 2816 "group_test": "job", 2817 "task_test": "job", 2818 }, 2819 TaskGroups: []*TaskGroup{ 2820 { 2821 Name: "group", 2822 Meta: map[string]string{ 2823 "group_test": "group", 2824 "task_test": "group", 2825 }, 2826 Tasks: []*Task{ 2827 { 2828 Name: "task", 2829 Meta: map[string]string{ 2830 "task_test": "task", 2831 }, 2832 }, 2833 }, 2834 }, 2835 }, 2836 } 2837 2838 require := require.New(t) 2839 require.EqualValues(map[string]string{ 2840 "job_test": "job", 2841 "group_test": "group", 2842 "task_test": "task", 2843 }, j.CombinedTaskMeta("group", "task")) 2844 require.EqualValues(map[string]string{ 2845 "job_test": "job", 2846 "group_test": "group", 2847 "task_test": "group", 2848 }, j.CombinedTaskMeta("group", "")) 2849 require.EqualValues(map[string]string{ 2850 "job_test": "job", 2851 "group_test": "job", 2852 "task_test": "job", 2853 }, j.CombinedTaskMeta("", "task")) 2854 2855 } 2856 2857 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 2858 // Create a config that is enabled but with no interval specified. 2859 p := &PeriodicConfig{Enabled: true} 2860 if err := p.Validate(); err == nil { 2861 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2862 } 2863 2864 // Create a config that is enabled, with a spec but no type specified. 2865 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2866 if err := p.Validate(); err == nil { 2867 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2868 } 2869 2870 // Create a config that is enabled, with a spec type but no spec specified. 2871 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2872 if err := p.Validate(); err == nil { 2873 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2874 } 2875 2876 // Create a config that is enabled, with a bad time zone. 2877 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2878 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2879 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2880 } 2881 } 2882 2883 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2884 specs := []string{"foo", "* *", "@foo"} 2885 for _, spec := range specs { 2886 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2887 p.Canonicalize() 2888 if err := p.Validate(); err == nil { 2889 t.Fatal("Invalid cron spec") 2890 } 2891 } 2892 } 2893 2894 func TestPeriodicConfig_ValidCron(t *testing.T) { 2895 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2896 for _, spec := range specs { 2897 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2898 p.Canonicalize() 2899 if err := p.Validate(); err != nil { 2900 t.Fatal("Passed valid cron") 2901 } 2902 } 2903 } 2904 2905 func TestPeriodicConfig_NextCron(t *testing.T) { 2906 require := require.New(t) 2907 2908 type testExpectation struct { 2909 Time time.Time 2910 HasError bool 2911 ErrorMsg string 2912 } 2913 2914 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2915 specs := []string{"0 0 29 2 * 1980", 2916 "*/5 * * * *", 2917 "1 15-0 * * 1-5"} 2918 expected := []*testExpectation{ 2919 { 2920 Time: time.Time{}, 2921 HasError: false, 2922 }, 2923 { 2924 Time: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2925 HasError: false, 2926 }, 2927 { 2928 Time: time.Time{}, 2929 HasError: true, 2930 ErrorMsg: "failed parsing cron expression", 2931 }, 2932 } 2933 2934 for i, spec := range specs { 2935 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2936 p.Canonicalize() 2937 n, err := p.Next(from) 2938 nextExpected := expected[i] 2939 2940 require.Equal(nextExpected.Time, n) 2941 require.Equal(err != nil, nextExpected.HasError) 2942 if err != nil { 2943 require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg)) 2944 } 2945 } 2946 } 2947 2948 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2949 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2950 for _, zone := range zones { 2951 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2952 p.Canonicalize() 2953 if err := p.Validate(); err != nil { 2954 t.Fatalf("Valid tz errored: %v", err) 2955 } 2956 } 2957 } 2958 2959 func TestPeriodicConfig_DST(t *testing.T) { 2960 require := require.New(t) 2961 2962 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2963 p := &PeriodicConfig{ 2964 Enabled: true, 2965 SpecType: PeriodicSpecCron, 2966 Spec: "0 2 11-12 3 * 2017", 2967 TimeZone: "America/Los_Angeles", 2968 } 2969 p.Canonicalize() 2970 2971 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2972 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2973 2974 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2975 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2976 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2977 2978 n1, err := p.Next(t1) 2979 require.Nil(err) 2980 2981 n2, err := p.Next(t2) 2982 require.Nil(err) 2983 2984 require.Equal(e1, n1.UTC()) 2985 require.Equal(e2, n2.UTC()) 2986 } 2987 2988 func TestTaskLifecycleConfig_Validate(t *testing.T) { 2989 testCases := []struct { 2990 name string 2991 tlc *TaskLifecycleConfig 2992 err error 2993 }{ 2994 { 2995 name: "prestart completed", 2996 tlc: &TaskLifecycleConfig{ 2997 Hook: "prestart", 2998 Sidecar: false, 2999 }, 3000 err: nil, 3001 }, 3002 { 3003 name: "prestart running", 3004 tlc: &TaskLifecycleConfig{ 3005 Hook: "prestart", 3006 Sidecar: true, 3007 }, 3008 err: nil, 3009 }, 3010 { 3011 name: "no hook", 3012 tlc: &TaskLifecycleConfig{ 3013 Sidecar: true, 3014 }, 3015 err: fmt.Errorf("no lifecycle hook provided"), 3016 }, 3017 } 3018 3019 for _, tc := range testCases { 3020 t.Run(tc.name, func(t *testing.T) { 3021 err := tc.tlc.Validate() 3022 if tc.err != nil { 3023 require.Error(t, err) 3024 require.Contains(t, err.Error(), tc.err.Error()) 3025 } else { 3026 require.Nil(t, err) 3027 } 3028 }) 3029 3030 } 3031 } 3032 3033 func TestRestartPolicy_Validate(t *testing.T) { 3034 // Policy with acceptable restart options passes 3035 p := &RestartPolicy{ 3036 Mode: RestartPolicyModeFail, 3037 Attempts: 0, 3038 Interval: 5 * time.Second, 3039 } 3040 if err := p.Validate(); err != nil { 3041 t.Fatalf("err: %v", err) 3042 } 3043 3044 // Policy with ambiguous restart options fails 3045 p = &RestartPolicy{ 3046 Mode: RestartPolicyModeDelay, 3047 Attempts: 0, 3048 Interval: 5 * time.Second, 3049 } 3050 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 3051 t.Fatalf("expect ambiguity error, got: %v", err) 3052 } 3053 3054 // Bad policy mode fails 3055 p = &RestartPolicy{ 3056 Mode: "nope", 3057 Attempts: 1, 3058 Interval: 5 * time.Second, 3059 } 3060 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 3061 t.Fatalf("expect mode error, got: %v", err) 3062 } 3063 3064 // Fails when attempts*delay does not fit inside interval 3065 p = &RestartPolicy{ 3066 Mode: RestartPolicyModeDelay, 3067 Attempts: 3, 3068 Delay: 5 * time.Second, 3069 Interval: 5 * time.Second, 3070 } 3071 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 3072 t.Fatalf("expect restart interval error, got: %v", err) 3073 } 3074 3075 // Fails when interval is to small 3076 p = &RestartPolicy{ 3077 Mode: RestartPolicyModeDelay, 3078 Attempts: 3, 3079 Delay: 5 * time.Second, 3080 Interval: 2 * time.Second, 3081 } 3082 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 3083 t.Fatalf("expect interval too small error, got: %v", err) 3084 } 3085 } 3086 3087 func TestReschedulePolicy_Validate(t *testing.T) { 3088 type testCase struct { 3089 desc string 3090 ReschedulePolicy *ReschedulePolicy 3091 errors []error 3092 } 3093 3094 testCases := []testCase{ 3095 { 3096 desc: "Nil", 3097 }, 3098 { 3099 desc: "Disabled", 3100 ReschedulePolicy: &ReschedulePolicy{ 3101 Attempts: 0, 3102 Interval: 0 * time.Second}, 3103 }, 3104 { 3105 desc: "Disabled", 3106 ReschedulePolicy: &ReschedulePolicy{ 3107 Attempts: -1, 3108 Interval: 5 * time.Minute}, 3109 }, 3110 { 3111 desc: "Valid Linear Delay", 3112 ReschedulePolicy: &ReschedulePolicy{ 3113 Attempts: 1, 3114 Interval: 5 * time.Minute, 3115 Delay: 10 * time.Second, 3116 DelayFunction: "constant"}, 3117 }, 3118 { 3119 desc: "Valid Exponential Delay", 3120 ReschedulePolicy: &ReschedulePolicy{ 3121 Attempts: 5, 3122 Interval: 1 * time.Hour, 3123 Delay: 30 * time.Second, 3124 MaxDelay: 5 * time.Minute, 3125 DelayFunction: "exponential"}, 3126 }, 3127 { 3128 desc: "Valid Fibonacci Delay", 3129 ReschedulePolicy: &ReschedulePolicy{ 3130 Attempts: 5, 3131 Interval: 15 * time.Minute, 3132 Delay: 10 * time.Second, 3133 MaxDelay: 5 * time.Minute, 3134 DelayFunction: "fibonacci"}, 3135 }, 3136 { 3137 desc: "Invalid delay function", 3138 ReschedulePolicy: &ReschedulePolicy{ 3139 Attempts: 1, 3140 Interval: 1 * time.Second, 3141 DelayFunction: "blah"}, 3142 errors: []error{ 3143 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3144 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3145 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 3146 }, 3147 }, 3148 { 3149 desc: "Invalid delay ceiling", 3150 ReschedulePolicy: &ReschedulePolicy{ 3151 Attempts: 1, 3152 Interval: 8 * time.Second, 3153 DelayFunction: "exponential", 3154 Delay: 15 * time.Second, 3155 MaxDelay: 5 * time.Second}, 3156 errors: []error{ 3157 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 3158 15*time.Second, 5*time.Second), 3159 }, 3160 }, 3161 { 3162 desc: "Invalid delay and interval", 3163 ReschedulePolicy: &ReschedulePolicy{ 3164 Attempts: 1, 3165 Interval: 1 * time.Second, 3166 DelayFunction: "constant"}, 3167 errors: []error{ 3168 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3169 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3170 }, 3171 }, { 3172 // Should suggest 2h40m as the interval 3173 desc: "Invalid Attempts - linear delay", 3174 ReschedulePolicy: &ReschedulePolicy{ 3175 Attempts: 10, 3176 Interval: 1 * time.Hour, 3177 Delay: 20 * time.Minute, 3178 DelayFunction: "constant", 3179 }, 3180 errors: []error{ 3181 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 3182 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 3183 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3184 200*time.Minute, 10), 3185 }, 3186 }, 3187 { 3188 // Should suggest 4h40m as the interval 3189 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 3190 desc: "Invalid Attempts - exponential delay", 3191 ReschedulePolicy: &ReschedulePolicy{ 3192 Attempts: 10, 3193 Interval: 30 * time.Minute, 3194 Delay: 5 * time.Minute, 3195 MaxDelay: 40 * time.Minute, 3196 DelayFunction: "exponential", 3197 }, 3198 errors: []error{ 3199 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3200 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 3201 "exponential", 40*time.Minute), 3202 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3203 280*time.Minute, 10), 3204 }, 3205 }, 3206 { 3207 // Should suggest 8h as the interval 3208 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 3209 desc: "Invalid Attempts - fibonacci delay", 3210 ReschedulePolicy: &ReschedulePolicy{ 3211 Attempts: 10, 3212 Interval: 1 * time.Hour, 3213 Delay: 20 * time.Minute, 3214 MaxDelay: 80 * time.Minute, 3215 DelayFunction: "fibonacci", 3216 }, 3217 errors: []error{ 3218 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3219 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 3220 "fibonacci", 80*time.Minute), 3221 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3222 480*time.Minute, 10), 3223 }, 3224 }, 3225 { 3226 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 3227 ReschedulePolicy: &ReschedulePolicy{ 3228 Attempts: 1, 3229 Unlimited: true, 3230 DelayFunction: "exponential", 3231 Delay: 5 * time.Minute, 3232 MaxDelay: 1 * time.Hour, 3233 }, 3234 errors: []error{ 3235 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 3236 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 3237 }, 3238 }, 3239 { 3240 desc: "Invalid Unlimited config", 3241 ReschedulePolicy: &ReschedulePolicy{ 3242 Attempts: 1, 3243 Interval: 1 * time.Second, 3244 Unlimited: true, 3245 DelayFunction: "exponential", 3246 }, 3247 errors: []error{ 3248 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3249 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3250 }, 3251 }, 3252 { 3253 desc: "Valid Unlimited config", 3254 ReschedulePolicy: &ReschedulePolicy{ 3255 Unlimited: true, 3256 DelayFunction: "exponential", 3257 Delay: 5 * time.Second, 3258 MaxDelay: 1 * time.Hour, 3259 }, 3260 }, 3261 } 3262 3263 for _, tc := range testCases { 3264 t.Run(tc.desc, func(t *testing.T) { 3265 require := require.New(t) 3266 gotErr := tc.ReschedulePolicy.Validate() 3267 if tc.errors != nil { 3268 // Validate all errors 3269 for _, err := range tc.errors { 3270 require.Contains(gotErr.Error(), err.Error()) 3271 } 3272 } else { 3273 require.Nil(gotErr) 3274 } 3275 }) 3276 } 3277 } 3278 3279 func TestAllocation_Index(t *testing.T) { 3280 a1 := Allocation{ 3281 Name: "example.cache[1]", 3282 TaskGroup: "cache", 3283 JobID: "example", 3284 Job: &Job{ 3285 ID: "example", 3286 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 3287 } 3288 e1 := uint(1) 3289 a2 := a1.Copy() 3290 a2.Name = "example.cache[713127]" 3291 e2 := uint(713127) 3292 3293 if a1.Index() != e1 || a2.Index() != e2 { 3294 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 3295 } 3296 } 3297 3298 func TestTaskArtifact_Validate_Source(t *testing.T) { 3299 valid := &TaskArtifact{GetterSource: "google.com"} 3300 if err := valid.Validate(); err != nil { 3301 t.Fatalf("unexpected error: %v", err) 3302 } 3303 } 3304 3305 func TestTaskArtifact_Validate_Dest(t *testing.T) { 3306 valid := &TaskArtifact{GetterSource: "google.com"} 3307 if err := valid.Validate(); err != nil { 3308 t.Fatalf("unexpected error: %v", err) 3309 } 3310 3311 valid.RelativeDest = "local/" 3312 if err := valid.Validate(); err != nil { 3313 t.Fatalf("unexpected error: %v", err) 3314 } 3315 3316 valid.RelativeDest = "local/.." 3317 if err := valid.Validate(); err != nil { 3318 t.Fatalf("unexpected error: %v", err) 3319 } 3320 3321 valid.RelativeDest = "local/../../.." 3322 if err := valid.Validate(); err == nil { 3323 t.Fatalf("expected error: %v", err) 3324 } 3325 } 3326 3327 // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the 3328 // fields change. 3329 func TestTaskArtifact_Hash(t *testing.T) { 3330 t.Parallel() 3331 3332 cases := []TaskArtifact{ 3333 {}, 3334 { 3335 GetterSource: "a", 3336 }, 3337 { 3338 GetterSource: "b", 3339 }, 3340 { 3341 GetterSource: "b", 3342 GetterOptions: map[string]string{"c": "c"}, 3343 }, 3344 { 3345 GetterSource: "b", 3346 GetterOptions: map[string]string{ 3347 "c": "c", 3348 "d": "d", 3349 }, 3350 }, 3351 { 3352 GetterSource: "b", 3353 GetterOptions: map[string]string{ 3354 "c": "c", 3355 "d": "e", 3356 }, 3357 }, 3358 { 3359 GetterSource: "b", 3360 GetterOptions: map[string]string{ 3361 "c": "c", 3362 "d": "e", 3363 }, 3364 GetterMode: "f", 3365 }, 3366 { 3367 GetterSource: "b", 3368 GetterOptions: map[string]string{ 3369 "c": "c", 3370 "d": "e", 3371 }, 3372 GetterMode: "g", 3373 }, 3374 { 3375 GetterSource: "b", 3376 GetterOptions: map[string]string{ 3377 "c": "c", 3378 "d": "e", 3379 }, 3380 GetterMode: "g", 3381 RelativeDest: "h", 3382 }, 3383 { 3384 GetterSource: "b", 3385 GetterOptions: map[string]string{ 3386 "c": "c", 3387 "d": "e", 3388 }, 3389 GetterMode: "g", 3390 RelativeDest: "i", 3391 }, 3392 } 3393 3394 // Map of hash to source 3395 hashes := make(map[string]TaskArtifact, len(cases)) 3396 for _, tc := range cases { 3397 h := tc.Hash() 3398 3399 // Hash should be deterministic 3400 require.Equal(t, h, tc.Hash()) 3401 3402 // Hash should be unique 3403 if orig, ok := hashes[h]; ok { 3404 require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n", 3405 pretty.Sprint(tc), pretty.Sprint(orig), 3406 ) 3407 } 3408 hashes[h] = tc 3409 } 3410 3411 require.Len(t, hashes, len(cases)) 3412 } 3413 3414 func TestAllocation_ShouldMigrate(t *testing.T) { 3415 alloc := Allocation{ 3416 PreviousAllocation: "123", 3417 TaskGroup: "foo", 3418 Job: &Job{ 3419 TaskGroups: []*TaskGroup{ 3420 { 3421 Name: "foo", 3422 EphemeralDisk: &EphemeralDisk{ 3423 Migrate: true, 3424 Sticky: true, 3425 }, 3426 }, 3427 }, 3428 }, 3429 } 3430 3431 if !alloc.ShouldMigrate() { 3432 t.Fatalf("bad: %v", alloc) 3433 } 3434 3435 alloc1 := Allocation{ 3436 PreviousAllocation: "123", 3437 TaskGroup: "foo", 3438 Job: &Job{ 3439 TaskGroups: []*TaskGroup{ 3440 { 3441 Name: "foo", 3442 EphemeralDisk: &EphemeralDisk{}, 3443 }, 3444 }, 3445 }, 3446 } 3447 3448 if alloc1.ShouldMigrate() { 3449 t.Fatalf("bad: %v", alloc) 3450 } 3451 3452 alloc2 := Allocation{ 3453 PreviousAllocation: "123", 3454 TaskGroup: "foo", 3455 Job: &Job{ 3456 TaskGroups: []*TaskGroup{ 3457 { 3458 Name: "foo", 3459 EphemeralDisk: &EphemeralDisk{ 3460 Sticky: false, 3461 Migrate: true, 3462 }, 3463 }, 3464 }, 3465 }, 3466 } 3467 3468 if alloc2.ShouldMigrate() { 3469 t.Fatalf("bad: %v", alloc) 3470 } 3471 3472 alloc3 := Allocation{ 3473 PreviousAllocation: "123", 3474 TaskGroup: "foo", 3475 Job: &Job{ 3476 TaskGroups: []*TaskGroup{ 3477 { 3478 Name: "foo", 3479 }, 3480 }, 3481 }, 3482 } 3483 3484 if alloc3.ShouldMigrate() { 3485 t.Fatalf("bad: %v", alloc) 3486 } 3487 3488 // No previous 3489 alloc4 := Allocation{ 3490 TaskGroup: "foo", 3491 Job: &Job{ 3492 TaskGroups: []*TaskGroup{ 3493 { 3494 Name: "foo", 3495 EphemeralDisk: &EphemeralDisk{ 3496 Migrate: true, 3497 Sticky: true, 3498 }, 3499 }, 3500 }, 3501 }, 3502 } 3503 3504 if alloc4.ShouldMigrate() { 3505 t.Fatalf("bad: %v", alloc4) 3506 } 3507 } 3508 3509 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 3510 cases := []struct { 3511 Input *TaskArtifact 3512 Err bool 3513 }{ 3514 { 3515 &TaskArtifact{ 3516 GetterSource: "foo.com", 3517 GetterOptions: map[string]string{ 3518 "checksum": "no-type", 3519 }, 3520 }, 3521 true, 3522 }, 3523 { 3524 &TaskArtifact{ 3525 GetterSource: "foo.com", 3526 GetterOptions: map[string]string{ 3527 "checksum": "md5:toosmall", 3528 }, 3529 }, 3530 true, 3531 }, 3532 { 3533 &TaskArtifact{ 3534 GetterSource: "foo.com", 3535 GetterOptions: map[string]string{ 3536 "checksum": "invalid:type", 3537 }, 3538 }, 3539 true, 3540 }, 3541 { 3542 &TaskArtifact{ 3543 GetterSource: "foo.com", 3544 GetterOptions: map[string]string{ 3545 "checksum": "md5:${ARTIFACT_CHECKSUM}", 3546 }, 3547 }, 3548 false, 3549 }, 3550 } 3551 3552 for i, tc := range cases { 3553 err := tc.Input.Validate() 3554 if (err != nil) != tc.Err { 3555 t.Fatalf("case %d: %v", i, err) 3556 continue 3557 } 3558 } 3559 } 3560 3561 func TestPlan_NormalizeAllocations(t *testing.T) { 3562 t.Parallel() 3563 plan := &Plan{ 3564 NodeUpdate: make(map[string][]*Allocation), 3565 NodePreemptions: make(map[string][]*Allocation), 3566 } 3567 stoppedAlloc := MockAlloc() 3568 desiredDesc := "Desired desc" 3569 plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost) 3570 preemptedAlloc := MockAlloc() 3571 preemptingAllocID := uuid.Generate() 3572 plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID) 3573 3574 plan.NormalizeAllocations() 3575 3576 actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0] 3577 expectedStoppedAlloc := &Allocation{ 3578 ID: stoppedAlloc.ID, 3579 DesiredDescription: desiredDesc, 3580 ClientStatus: AllocClientStatusLost, 3581 } 3582 assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc) 3583 actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0] 3584 expectedPreemptedAlloc := &Allocation{ 3585 ID: preemptedAlloc.ID, 3586 PreemptedByAllocation: preemptingAllocID, 3587 } 3588 assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc) 3589 } 3590 3591 func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3592 t.Parallel() 3593 plan := &Plan{ 3594 NodeUpdate: make(map[string][]*Allocation), 3595 } 3596 alloc := MockAlloc() 3597 desiredDesc := "Desired desc" 3598 3599 plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost) 3600 3601 appendedAlloc := plan.NodeUpdate[alloc.NodeID][0] 3602 expectedAlloc := new(Allocation) 3603 *expectedAlloc = *alloc 3604 expectedAlloc.DesiredDescription = desiredDesc 3605 expectedAlloc.DesiredStatus = AllocDesiredStatusStop 3606 expectedAlloc.ClientStatus = AllocClientStatusLost 3607 expectedAlloc.Job = nil 3608 assert.Equal(t, expectedAlloc, appendedAlloc) 3609 assert.Equal(t, alloc.Job, plan.Job) 3610 } 3611 3612 func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3613 t.Parallel() 3614 plan := &Plan{ 3615 NodePreemptions: make(map[string][]*Allocation), 3616 } 3617 alloc := MockAlloc() 3618 preemptingAllocID := uuid.Generate() 3619 3620 plan.AppendPreemptedAlloc(alloc, preemptingAllocID) 3621 3622 appendedAlloc := plan.NodePreemptions[alloc.NodeID][0] 3623 expectedAlloc := &Allocation{ 3624 ID: alloc.ID, 3625 PreemptedByAllocation: preemptingAllocID, 3626 JobID: alloc.JobID, 3627 Namespace: alloc.Namespace, 3628 DesiredStatus: AllocDesiredStatusEvict, 3629 DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID), 3630 AllocatedResources: alloc.AllocatedResources, 3631 TaskResources: alloc.TaskResources, 3632 SharedResources: alloc.SharedResources, 3633 } 3634 assert.Equal(t, expectedAlloc, appendedAlloc) 3635 } 3636 3637 func TestAllocation_MsgPackTags(t *testing.T) { 3638 t.Parallel() 3639 planType := reflect.TypeOf(Allocation{}) 3640 3641 msgPackTags, _ := planType.FieldByName("_struct") 3642 3643 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3644 } 3645 3646 func TestEvaluation_MsgPackTags(t *testing.T) { 3647 t.Parallel() 3648 planType := reflect.TypeOf(Evaluation{}) 3649 3650 msgPackTags, _ := planType.FieldByName("_struct") 3651 3652 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3653 } 3654 3655 func TestAllocation_Terminated(t *testing.T) { 3656 type desiredState struct { 3657 ClientStatus string 3658 DesiredStatus string 3659 Terminated bool 3660 } 3661 3662 harness := []desiredState{ 3663 { 3664 ClientStatus: AllocClientStatusPending, 3665 DesiredStatus: AllocDesiredStatusStop, 3666 Terminated: false, 3667 }, 3668 { 3669 ClientStatus: AllocClientStatusRunning, 3670 DesiredStatus: AllocDesiredStatusStop, 3671 Terminated: false, 3672 }, 3673 { 3674 ClientStatus: AllocClientStatusFailed, 3675 DesiredStatus: AllocDesiredStatusStop, 3676 Terminated: true, 3677 }, 3678 { 3679 ClientStatus: AllocClientStatusFailed, 3680 DesiredStatus: AllocDesiredStatusRun, 3681 Terminated: true, 3682 }, 3683 } 3684 3685 for _, state := range harness { 3686 alloc := Allocation{} 3687 alloc.DesiredStatus = state.DesiredStatus 3688 alloc.ClientStatus = state.ClientStatus 3689 if alloc.Terminated() != state.Terminated { 3690 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 3691 } 3692 } 3693 } 3694 3695 func TestAllocation_ShouldReschedule(t *testing.T) { 3696 type testCase struct { 3697 Desc string 3698 FailTime time.Time 3699 ClientStatus string 3700 DesiredStatus string 3701 ReschedulePolicy *ReschedulePolicy 3702 RescheduleTrackers []*RescheduleEvent 3703 ShouldReschedule bool 3704 } 3705 3706 fail := time.Now() 3707 3708 harness := []testCase{ 3709 { 3710 Desc: "Reschedule when desired state is stop", 3711 ClientStatus: AllocClientStatusPending, 3712 DesiredStatus: AllocDesiredStatusStop, 3713 FailTime: fail, 3714 ReschedulePolicy: nil, 3715 ShouldReschedule: false, 3716 }, 3717 { 3718 Desc: "Disabled rescheduling", 3719 ClientStatus: AllocClientStatusFailed, 3720 DesiredStatus: AllocDesiredStatusRun, 3721 FailTime: fail, 3722 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 3723 ShouldReschedule: false, 3724 }, 3725 { 3726 Desc: "Reschedule when client status is complete", 3727 ClientStatus: AllocClientStatusComplete, 3728 DesiredStatus: AllocDesiredStatusRun, 3729 FailTime: fail, 3730 ReschedulePolicy: nil, 3731 ShouldReschedule: false, 3732 }, 3733 { 3734 Desc: "Reschedule with nil reschedule policy", 3735 ClientStatus: AllocClientStatusFailed, 3736 DesiredStatus: AllocDesiredStatusRun, 3737 FailTime: fail, 3738 ReschedulePolicy: nil, 3739 ShouldReschedule: false, 3740 }, 3741 { 3742 Desc: "Reschedule with unlimited and attempts >0", 3743 ClientStatus: AllocClientStatusFailed, 3744 DesiredStatus: AllocDesiredStatusRun, 3745 FailTime: fail, 3746 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 3747 ShouldReschedule: true, 3748 }, 3749 { 3750 Desc: "Reschedule when client status is complete", 3751 ClientStatus: AllocClientStatusComplete, 3752 DesiredStatus: AllocDesiredStatusRun, 3753 FailTime: fail, 3754 ReschedulePolicy: nil, 3755 ShouldReschedule: false, 3756 }, 3757 { 3758 Desc: "Reschedule with policy when client status complete", 3759 ClientStatus: AllocClientStatusComplete, 3760 DesiredStatus: AllocDesiredStatusRun, 3761 FailTime: fail, 3762 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3763 ShouldReschedule: false, 3764 }, 3765 { 3766 Desc: "Reschedule with no previous attempts", 3767 ClientStatus: AllocClientStatusFailed, 3768 DesiredStatus: AllocDesiredStatusRun, 3769 FailTime: fail, 3770 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3771 ShouldReschedule: true, 3772 }, 3773 { 3774 Desc: "Reschedule with leftover attempts", 3775 ClientStatus: AllocClientStatusFailed, 3776 DesiredStatus: AllocDesiredStatusRun, 3777 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3778 FailTime: fail, 3779 RescheduleTrackers: []*RescheduleEvent{ 3780 { 3781 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 3782 }, 3783 }, 3784 ShouldReschedule: true, 3785 }, 3786 { 3787 Desc: "Reschedule with too old previous attempts", 3788 ClientStatus: AllocClientStatusFailed, 3789 DesiredStatus: AllocDesiredStatusRun, 3790 FailTime: fail, 3791 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 3792 RescheduleTrackers: []*RescheduleEvent{ 3793 { 3794 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 3795 }, 3796 }, 3797 ShouldReschedule: true, 3798 }, 3799 { 3800 Desc: "Reschedule with no leftover attempts", 3801 ClientStatus: AllocClientStatusFailed, 3802 DesiredStatus: AllocDesiredStatusRun, 3803 FailTime: fail, 3804 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3805 RescheduleTrackers: []*RescheduleEvent{ 3806 { 3807 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 3808 }, 3809 { 3810 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 3811 }, 3812 }, 3813 ShouldReschedule: false, 3814 }, 3815 } 3816 3817 for _, state := range harness { 3818 alloc := Allocation{} 3819 alloc.DesiredStatus = state.DesiredStatus 3820 alloc.ClientStatus = state.ClientStatus 3821 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 3822 3823 t.Run(state.Desc, func(t *testing.T) { 3824 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 3825 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 3826 } 3827 }) 3828 3829 } 3830 } 3831 3832 func TestAllocation_LastEventTime(t *testing.T) { 3833 type testCase struct { 3834 desc string 3835 taskState map[string]*TaskState 3836 expectedLastEventTime time.Time 3837 } 3838 3839 t1 := time.Now().UTC() 3840 3841 testCases := []testCase{ 3842 { 3843 desc: "nil task state", 3844 expectedLastEventTime: t1, 3845 }, 3846 { 3847 desc: "empty task state", 3848 taskState: make(map[string]*TaskState), 3849 expectedLastEventTime: t1, 3850 }, 3851 { 3852 desc: "Finished At not set", 3853 taskState: map[string]*TaskState{"foo": {State: "start", 3854 StartedAt: t1.Add(-2 * time.Hour)}}, 3855 expectedLastEventTime: t1, 3856 }, 3857 { 3858 desc: "One finished ", 3859 taskState: map[string]*TaskState{"foo": {State: "start", 3860 StartedAt: t1.Add(-2 * time.Hour), 3861 FinishedAt: t1.Add(-1 * time.Hour)}}, 3862 expectedLastEventTime: t1.Add(-1 * time.Hour), 3863 }, 3864 { 3865 desc: "Multiple task groups", 3866 taskState: map[string]*TaskState{"foo": {State: "start", 3867 StartedAt: t1.Add(-2 * time.Hour), 3868 FinishedAt: t1.Add(-1 * time.Hour)}, 3869 "bar": {State: "start", 3870 StartedAt: t1.Add(-2 * time.Hour), 3871 FinishedAt: t1.Add(-40 * time.Minute)}}, 3872 expectedLastEventTime: t1.Add(-40 * time.Minute), 3873 }, 3874 { 3875 desc: "No finishedAt set, one task event, should use modify time", 3876 taskState: map[string]*TaskState{"foo": { 3877 State: "run", 3878 StartedAt: t1.Add(-2 * time.Hour), 3879 Events: []*TaskEvent{ 3880 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 3881 }}, 3882 }, 3883 expectedLastEventTime: t1, 3884 }, 3885 } 3886 for _, tc := range testCases { 3887 t.Run(tc.desc, func(t *testing.T) { 3888 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 3889 alloc.TaskStates = tc.taskState 3890 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 3891 }) 3892 } 3893 } 3894 3895 func TestAllocation_NextDelay(t *testing.T) { 3896 type testCase struct { 3897 desc string 3898 reschedulePolicy *ReschedulePolicy 3899 alloc *Allocation 3900 expectedRescheduleTime time.Time 3901 expectedRescheduleEligible bool 3902 } 3903 now := time.Now() 3904 testCases := []testCase{ 3905 { 3906 desc: "Allocation hasn't failed yet", 3907 reschedulePolicy: &ReschedulePolicy{ 3908 DelayFunction: "constant", 3909 Delay: 5 * time.Second, 3910 }, 3911 alloc: &Allocation{}, 3912 expectedRescheduleTime: time.Time{}, 3913 expectedRescheduleEligible: false, 3914 }, 3915 { 3916 desc: "Allocation has no reschedule policy", 3917 alloc: &Allocation{}, 3918 expectedRescheduleTime: time.Time{}, 3919 expectedRescheduleEligible: false, 3920 }, 3921 { 3922 desc: "Allocation lacks task state", 3923 reschedulePolicy: &ReschedulePolicy{ 3924 DelayFunction: "constant", 3925 Delay: 5 * time.Second, 3926 Unlimited: true, 3927 }, 3928 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 3929 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 3930 expectedRescheduleEligible: true, 3931 }, 3932 { 3933 desc: "linear delay, unlimited restarts, no reschedule tracker", 3934 reschedulePolicy: &ReschedulePolicy{ 3935 DelayFunction: "constant", 3936 Delay: 5 * time.Second, 3937 Unlimited: true, 3938 }, 3939 alloc: &Allocation{ 3940 ClientStatus: AllocClientStatusFailed, 3941 TaskStates: map[string]*TaskState{"foo": {State: "dead", 3942 StartedAt: now.Add(-1 * time.Hour), 3943 FinishedAt: now.Add(-2 * time.Second)}}, 3944 }, 3945 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3946 expectedRescheduleEligible: true, 3947 }, 3948 { 3949 desc: "linear delay with reschedule tracker", 3950 reschedulePolicy: &ReschedulePolicy{ 3951 DelayFunction: "constant", 3952 Delay: 5 * time.Second, 3953 Interval: 10 * time.Minute, 3954 Attempts: 2, 3955 }, 3956 alloc: &Allocation{ 3957 ClientStatus: AllocClientStatusFailed, 3958 TaskStates: map[string]*TaskState{"foo": {State: "start", 3959 StartedAt: now.Add(-1 * time.Hour), 3960 FinishedAt: now.Add(-2 * time.Second)}}, 3961 RescheduleTracker: &RescheduleTracker{ 3962 Events: []*RescheduleEvent{{ 3963 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3964 Delay: 5 * time.Second, 3965 }}, 3966 }}, 3967 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3968 expectedRescheduleEligible: true, 3969 }, 3970 { 3971 desc: "linear delay with reschedule tracker, attempts exhausted", 3972 reschedulePolicy: &ReschedulePolicy{ 3973 DelayFunction: "constant", 3974 Delay: 5 * time.Second, 3975 Interval: 10 * time.Minute, 3976 Attempts: 2, 3977 }, 3978 alloc: &Allocation{ 3979 ClientStatus: AllocClientStatusFailed, 3980 TaskStates: map[string]*TaskState{"foo": {State: "start", 3981 StartedAt: now.Add(-1 * time.Hour), 3982 FinishedAt: now.Add(-2 * time.Second)}}, 3983 RescheduleTracker: &RescheduleTracker{ 3984 Events: []*RescheduleEvent{ 3985 { 3986 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 3987 Delay: 5 * time.Second, 3988 }, 3989 { 3990 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3991 Delay: 5 * time.Second, 3992 }, 3993 }, 3994 }}, 3995 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3996 expectedRescheduleEligible: false, 3997 }, 3998 { 3999 desc: "exponential delay - no reschedule tracker", 4000 reschedulePolicy: &ReschedulePolicy{ 4001 DelayFunction: "exponential", 4002 Delay: 5 * time.Second, 4003 MaxDelay: 90 * time.Second, 4004 Unlimited: true, 4005 }, 4006 alloc: &Allocation{ 4007 ClientStatus: AllocClientStatusFailed, 4008 TaskStates: map[string]*TaskState{"foo": {State: "start", 4009 StartedAt: now.Add(-1 * time.Hour), 4010 FinishedAt: now.Add(-2 * time.Second)}}, 4011 }, 4012 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4013 expectedRescheduleEligible: true, 4014 }, 4015 { 4016 desc: "exponential delay with reschedule tracker", 4017 reschedulePolicy: &ReschedulePolicy{ 4018 DelayFunction: "exponential", 4019 Delay: 5 * time.Second, 4020 MaxDelay: 90 * time.Second, 4021 Unlimited: true, 4022 }, 4023 alloc: &Allocation{ 4024 ClientStatus: AllocClientStatusFailed, 4025 TaskStates: map[string]*TaskState{"foo": {State: "start", 4026 StartedAt: now.Add(-1 * time.Hour), 4027 FinishedAt: now.Add(-2 * time.Second)}}, 4028 RescheduleTracker: &RescheduleTracker{ 4029 Events: []*RescheduleEvent{ 4030 { 4031 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4032 Delay: 5 * time.Second, 4033 }, 4034 { 4035 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4036 Delay: 10 * time.Second, 4037 }, 4038 { 4039 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4040 Delay: 20 * time.Second, 4041 }, 4042 }, 4043 }}, 4044 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4045 expectedRescheduleEligible: true, 4046 }, 4047 { 4048 desc: "exponential delay with delay ceiling reached", 4049 reschedulePolicy: &ReschedulePolicy{ 4050 DelayFunction: "exponential", 4051 Delay: 5 * time.Second, 4052 MaxDelay: 90 * time.Second, 4053 Unlimited: true, 4054 }, 4055 alloc: &Allocation{ 4056 ClientStatus: AllocClientStatusFailed, 4057 TaskStates: map[string]*TaskState{"foo": {State: "start", 4058 StartedAt: now.Add(-1 * time.Hour), 4059 FinishedAt: now.Add(-15 * time.Second)}}, 4060 RescheduleTracker: &RescheduleTracker{ 4061 Events: []*RescheduleEvent{ 4062 { 4063 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4064 Delay: 5 * time.Second, 4065 }, 4066 { 4067 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4068 Delay: 10 * time.Second, 4069 }, 4070 { 4071 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4072 Delay: 20 * time.Second, 4073 }, 4074 { 4075 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4076 Delay: 40 * time.Second, 4077 }, 4078 { 4079 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4080 Delay: 80 * time.Second, 4081 }, 4082 }, 4083 }}, 4084 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 4085 expectedRescheduleEligible: true, 4086 }, 4087 { 4088 // Test case where most recent reschedule ran longer than delay ceiling 4089 desc: "exponential delay, delay ceiling reset condition met", 4090 reschedulePolicy: &ReschedulePolicy{ 4091 DelayFunction: "exponential", 4092 Delay: 5 * time.Second, 4093 MaxDelay: 90 * time.Second, 4094 Unlimited: true, 4095 }, 4096 alloc: &Allocation{ 4097 ClientStatus: AllocClientStatusFailed, 4098 TaskStates: map[string]*TaskState{"foo": {State: "start", 4099 StartedAt: now.Add(-1 * time.Hour), 4100 FinishedAt: now.Add(-15 * time.Minute)}}, 4101 RescheduleTracker: &RescheduleTracker{ 4102 Events: []*RescheduleEvent{ 4103 { 4104 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4105 Delay: 5 * time.Second, 4106 }, 4107 { 4108 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4109 Delay: 10 * time.Second, 4110 }, 4111 { 4112 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4113 Delay: 20 * time.Second, 4114 }, 4115 { 4116 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4117 Delay: 40 * time.Second, 4118 }, 4119 { 4120 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4121 Delay: 80 * time.Second, 4122 }, 4123 { 4124 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4125 Delay: 90 * time.Second, 4126 }, 4127 { 4128 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4129 Delay: 90 * time.Second, 4130 }, 4131 }, 4132 }}, 4133 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 4134 expectedRescheduleEligible: true, 4135 }, 4136 { 4137 desc: "fibonacci delay - no reschedule tracker", 4138 reschedulePolicy: &ReschedulePolicy{ 4139 DelayFunction: "fibonacci", 4140 Delay: 5 * time.Second, 4141 MaxDelay: 90 * time.Second, 4142 Unlimited: true, 4143 }, 4144 alloc: &Allocation{ 4145 ClientStatus: AllocClientStatusFailed, 4146 TaskStates: map[string]*TaskState{"foo": {State: "start", 4147 StartedAt: now.Add(-1 * time.Hour), 4148 FinishedAt: now.Add(-2 * time.Second)}}}, 4149 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4150 expectedRescheduleEligible: true, 4151 }, 4152 { 4153 desc: "fibonacci delay with reschedule tracker", 4154 reschedulePolicy: &ReschedulePolicy{ 4155 DelayFunction: "fibonacci", 4156 Delay: 5 * time.Second, 4157 MaxDelay: 90 * time.Second, 4158 Unlimited: true, 4159 }, 4160 alloc: &Allocation{ 4161 ClientStatus: AllocClientStatusFailed, 4162 TaskStates: map[string]*TaskState{"foo": {State: "start", 4163 StartedAt: now.Add(-1 * time.Hour), 4164 FinishedAt: now.Add(-2 * time.Second)}}, 4165 RescheduleTracker: &RescheduleTracker{ 4166 Events: []*RescheduleEvent{ 4167 { 4168 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4169 Delay: 5 * time.Second, 4170 }, 4171 { 4172 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 4173 Delay: 5 * time.Second, 4174 }, 4175 }, 4176 }}, 4177 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 4178 expectedRescheduleEligible: true, 4179 }, 4180 { 4181 desc: "fibonacci delay with more events", 4182 reschedulePolicy: &ReschedulePolicy{ 4183 DelayFunction: "fibonacci", 4184 Delay: 5 * time.Second, 4185 MaxDelay: 90 * time.Second, 4186 Unlimited: true, 4187 }, 4188 alloc: &Allocation{ 4189 ClientStatus: AllocClientStatusFailed, 4190 TaskStates: map[string]*TaskState{"foo": {State: "start", 4191 StartedAt: now.Add(-1 * time.Hour), 4192 FinishedAt: now.Add(-2 * time.Second)}}, 4193 RescheduleTracker: &RescheduleTracker{ 4194 Events: []*RescheduleEvent{ 4195 { 4196 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4197 Delay: 5 * time.Second, 4198 }, 4199 { 4200 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4201 Delay: 5 * time.Second, 4202 }, 4203 { 4204 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4205 Delay: 10 * time.Second, 4206 }, 4207 { 4208 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4209 Delay: 15 * time.Second, 4210 }, 4211 { 4212 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4213 Delay: 25 * time.Second, 4214 }, 4215 }, 4216 }}, 4217 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4218 expectedRescheduleEligible: true, 4219 }, 4220 { 4221 desc: "fibonacci delay with delay ceiling reached", 4222 reschedulePolicy: &ReschedulePolicy{ 4223 DelayFunction: "fibonacci", 4224 Delay: 5 * time.Second, 4225 MaxDelay: 50 * time.Second, 4226 Unlimited: true, 4227 }, 4228 alloc: &Allocation{ 4229 ClientStatus: AllocClientStatusFailed, 4230 TaskStates: map[string]*TaskState{"foo": {State: "start", 4231 StartedAt: now.Add(-1 * time.Hour), 4232 FinishedAt: now.Add(-15 * time.Second)}}, 4233 RescheduleTracker: &RescheduleTracker{ 4234 Events: []*RescheduleEvent{ 4235 { 4236 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4237 Delay: 5 * time.Second, 4238 }, 4239 { 4240 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4241 Delay: 5 * time.Second, 4242 }, 4243 { 4244 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4245 Delay: 10 * time.Second, 4246 }, 4247 { 4248 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4249 Delay: 15 * time.Second, 4250 }, 4251 { 4252 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4253 Delay: 25 * time.Second, 4254 }, 4255 { 4256 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4257 Delay: 40 * time.Second, 4258 }, 4259 }, 4260 }}, 4261 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 4262 expectedRescheduleEligible: true, 4263 }, 4264 { 4265 desc: "fibonacci delay with delay reset condition met", 4266 reschedulePolicy: &ReschedulePolicy{ 4267 DelayFunction: "fibonacci", 4268 Delay: 5 * time.Second, 4269 MaxDelay: 50 * time.Second, 4270 Unlimited: true, 4271 }, 4272 alloc: &Allocation{ 4273 ClientStatus: AllocClientStatusFailed, 4274 TaskStates: map[string]*TaskState{"foo": {State: "start", 4275 StartedAt: now.Add(-1 * time.Hour), 4276 FinishedAt: now.Add(-5 * time.Minute)}}, 4277 RescheduleTracker: &RescheduleTracker{ 4278 Events: []*RescheduleEvent{ 4279 { 4280 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4281 Delay: 5 * time.Second, 4282 }, 4283 { 4284 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4285 Delay: 5 * time.Second, 4286 }, 4287 { 4288 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4289 Delay: 10 * time.Second, 4290 }, 4291 { 4292 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4293 Delay: 15 * time.Second, 4294 }, 4295 { 4296 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4297 Delay: 25 * time.Second, 4298 }, 4299 { 4300 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4301 Delay: 40 * time.Second, 4302 }, 4303 }, 4304 }}, 4305 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 4306 expectedRescheduleEligible: true, 4307 }, 4308 { 4309 desc: "fibonacci delay with the most recent event that reset delay value", 4310 reschedulePolicy: &ReschedulePolicy{ 4311 DelayFunction: "fibonacci", 4312 Delay: 5 * time.Second, 4313 MaxDelay: 50 * time.Second, 4314 Unlimited: true, 4315 }, 4316 alloc: &Allocation{ 4317 ClientStatus: AllocClientStatusFailed, 4318 TaskStates: map[string]*TaskState{"foo": {State: "start", 4319 StartedAt: now.Add(-1 * time.Hour), 4320 FinishedAt: now.Add(-5 * time.Second)}}, 4321 RescheduleTracker: &RescheduleTracker{ 4322 Events: []*RescheduleEvent{ 4323 { 4324 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4325 Delay: 5 * time.Second, 4326 }, 4327 { 4328 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4329 Delay: 5 * time.Second, 4330 }, 4331 { 4332 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4333 Delay: 10 * time.Second, 4334 }, 4335 { 4336 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4337 Delay: 15 * time.Second, 4338 }, 4339 { 4340 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4341 Delay: 25 * time.Second, 4342 }, 4343 { 4344 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4345 Delay: 40 * time.Second, 4346 }, 4347 { 4348 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4349 Delay: 50 * time.Second, 4350 }, 4351 { 4352 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 4353 Delay: 5 * time.Second, 4354 }, 4355 }, 4356 }}, 4357 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 4358 expectedRescheduleEligible: true, 4359 }, 4360 } 4361 for _, tc := range testCases { 4362 t.Run(tc.desc, func(t *testing.T) { 4363 require := require.New(t) 4364 j := testJob() 4365 if tc.reschedulePolicy != nil { 4366 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 4367 } 4368 tc.alloc.Job = j 4369 tc.alloc.TaskGroup = j.TaskGroups[0].Name 4370 reschedTime, allowed := tc.alloc.NextRescheduleTime() 4371 require.Equal(tc.expectedRescheduleEligible, allowed) 4372 require.Equal(tc.expectedRescheduleTime, reschedTime) 4373 }) 4374 } 4375 4376 } 4377 4378 func TestAllocation_Canonicalize_Old(t *testing.T) { 4379 alloc := MockAlloc() 4380 alloc.AllocatedResources = nil 4381 alloc.TaskResources = map[string]*Resources{ 4382 "web": { 4383 CPU: 500, 4384 MemoryMB: 256, 4385 Networks: []*NetworkResource{ 4386 { 4387 Device: "eth0", 4388 IP: "192.168.0.100", 4389 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4390 MBits: 50, 4391 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4392 }, 4393 }, 4394 }, 4395 } 4396 alloc.SharedResources = &Resources{ 4397 DiskMB: 150, 4398 } 4399 alloc.Canonicalize() 4400 4401 expected := &AllocatedResources{ 4402 Tasks: map[string]*AllocatedTaskResources{ 4403 "web": { 4404 Cpu: AllocatedCpuResources{ 4405 CpuShares: 500, 4406 }, 4407 Memory: AllocatedMemoryResources{ 4408 MemoryMB: 256, 4409 }, 4410 Networks: []*NetworkResource{ 4411 { 4412 Device: "eth0", 4413 IP: "192.168.0.100", 4414 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4415 MBits: 50, 4416 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4417 }, 4418 }, 4419 }, 4420 }, 4421 Shared: AllocatedSharedResources{ 4422 DiskMB: 150, 4423 }, 4424 } 4425 4426 require.Equal(t, expected, alloc.AllocatedResources) 4427 } 4428 4429 // TestAllocation_Canonicalize_New asserts that an alloc with latest 4430 // schema isn't modified with Canonicalize 4431 func TestAllocation_Canonicalize_New(t *testing.T) { 4432 alloc := MockAlloc() 4433 copy := alloc.Copy() 4434 4435 alloc.Canonicalize() 4436 require.Equal(t, copy, alloc) 4437 } 4438 4439 func TestRescheduleTracker_Copy(t *testing.T) { 4440 type testCase struct { 4441 original *RescheduleTracker 4442 expected *RescheduleTracker 4443 } 4444 4445 cases := []testCase{ 4446 {nil, nil}, 4447 {&RescheduleTracker{Events: []*RescheduleEvent{ 4448 {RescheduleTime: 2, 4449 PrevAllocID: "12", 4450 PrevNodeID: "12", 4451 Delay: 30 * time.Second}, 4452 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 4453 {RescheduleTime: 2, 4454 PrevAllocID: "12", 4455 PrevNodeID: "12", 4456 Delay: 30 * time.Second}, 4457 }}}, 4458 } 4459 4460 for _, tc := range cases { 4461 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 4462 t.Fatalf("expected %v but got %v", *tc.expected, *got) 4463 } 4464 } 4465 } 4466 4467 func TestVault_Validate(t *testing.T) { 4468 v := &Vault{ 4469 Env: true, 4470 ChangeMode: VaultChangeModeNoop, 4471 } 4472 4473 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 4474 t.Fatalf("Expected policy list empty error") 4475 } 4476 4477 v.Policies = []string{"foo", "root"} 4478 v.ChangeMode = VaultChangeModeSignal 4479 4480 err := v.Validate() 4481 if err == nil { 4482 t.Fatalf("Expected validation errors") 4483 } 4484 4485 if !strings.Contains(err.Error(), "Signal must") { 4486 t.Fatalf("Expected signal empty error") 4487 } 4488 if !strings.Contains(err.Error(), "root") { 4489 t.Fatalf("Expected root error") 4490 } 4491 } 4492 4493 func TestParameterizedJobConfig_Validate(t *testing.T) { 4494 d := &ParameterizedJobConfig{ 4495 Payload: "foo", 4496 } 4497 4498 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 4499 t.Fatalf("Expected unknown payload requirement: %v", err) 4500 } 4501 4502 d.Payload = DispatchPayloadOptional 4503 d.MetaOptional = []string{"foo", "bar"} 4504 d.MetaRequired = []string{"bar", "baz"} 4505 4506 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 4507 t.Fatalf("Expected meta not being disjoint error: %v", err) 4508 } 4509 } 4510 4511 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 4512 job := testJob() 4513 job.ParameterizedJob = &ParameterizedJobConfig{ 4514 Payload: DispatchPayloadOptional, 4515 } 4516 job.Type = JobTypeSystem 4517 4518 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 4519 t.Fatalf("Expected bad scheduler tpye: %v", err) 4520 } 4521 } 4522 4523 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 4524 d := &ParameterizedJobConfig{} 4525 d.Canonicalize() 4526 if d.Payload != DispatchPayloadOptional { 4527 t.Fatalf("Canonicalize failed") 4528 } 4529 } 4530 4531 func TestDispatchPayloadConfig_Validate(t *testing.T) { 4532 d := &DispatchPayloadConfig{ 4533 File: "foo", 4534 } 4535 4536 // task/local/haha 4537 if err := d.Validate(); err != nil { 4538 t.Fatalf("bad: %v", err) 4539 } 4540 4541 // task/haha 4542 d.File = "../haha" 4543 if err := d.Validate(); err != nil { 4544 t.Fatalf("bad: %v", err) 4545 } 4546 4547 // ../haha 4548 d.File = "../../../haha" 4549 if err := d.Validate(); err == nil { 4550 t.Fatalf("bad: %v", err) 4551 } 4552 } 4553 4554 func TestIsRecoverable(t *testing.T) { 4555 if IsRecoverable(nil) { 4556 t.Errorf("nil should not be recoverable") 4557 } 4558 if IsRecoverable(NewRecoverableError(nil, true)) { 4559 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 4560 } 4561 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 4562 t.Errorf("Custom errors should not be recoverable") 4563 } 4564 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 4565 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 4566 } 4567 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 4568 t.Errorf("Explicitly recoverable errors *should* be recoverable") 4569 } 4570 } 4571 4572 func TestACLTokenValidate(t *testing.T) { 4573 tk := &ACLToken{} 4574 4575 // Missing a type 4576 err := tk.Validate() 4577 assert.NotNil(t, err) 4578 if !strings.Contains(err.Error(), "client or management") { 4579 t.Fatalf("bad: %v", err) 4580 } 4581 4582 // Missing policies 4583 tk.Type = ACLClientToken 4584 err = tk.Validate() 4585 assert.NotNil(t, err) 4586 if !strings.Contains(err.Error(), "missing policies") { 4587 t.Fatalf("bad: %v", err) 4588 } 4589 4590 // Invalid policies 4591 tk.Type = ACLManagementToken 4592 tk.Policies = []string{"foo"} 4593 err = tk.Validate() 4594 assert.NotNil(t, err) 4595 if !strings.Contains(err.Error(), "associated with policies") { 4596 t.Fatalf("bad: %v", err) 4597 } 4598 4599 // Name too long policies 4600 tk.Name = "" 4601 for i := 0; i < 8; i++ { 4602 tk.Name += uuid.Generate() 4603 } 4604 tk.Policies = nil 4605 err = tk.Validate() 4606 assert.NotNil(t, err) 4607 if !strings.Contains(err.Error(), "too long") { 4608 t.Fatalf("bad: %v", err) 4609 } 4610 4611 // Make it valid 4612 tk.Name = "foo" 4613 err = tk.Validate() 4614 assert.Nil(t, err) 4615 } 4616 4617 func TestACLTokenPolicySubset(t *testing.T) { 4618 tk := &ACLToken{ 4619 Type: ACLClientToken, 4620 Policies: []string{"foo", "bar", "baz"}, 4621 } 4622 4623 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4624 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4625 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4626 assert.Equal(t, true, tk.PolicySubset([]string{})) 4627 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 4628 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 4629 4630 tk = &ACLToken{ 4631 Type: ACLManagementToken, 4632 } 4633 4634 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4635 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4636 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4637 assert.Equal(t, true, tk.PolicySubset([]string{})) 4638 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 4639 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 4640 } 4641 4642 func TestACLTokenSetHash(t *testing.T) { 4643 tk := &ACLToken{ 4644 Name: "foo", 4645 Type: ACLClientToken, 4646 Policies: []string{"foo", "bar"}, 4647 Global: false, 4648 } 4649 out1 := tk.SetHash() 4650 assert.NotNil(t, out1) 4651 assert.NotNil(t, tk.Hash) 4652 assert.Equal(t, out1, tk.Hash) 4653 4654 tk.Policies = []string{"foo"} 4655 out2 := tk.SetHash() 4656 assert.NotNil(t, out2) 4657 assert.NotNil(t, tk.Hash) 4658 assert.Equal(t, out2, tk.Hash) 4659 assert.NotEqual(t, out1, out2) 4660 } 4661 4662 func TestACLPolicySetHash(t *testing.T) { 4663 ap := &ACLPolicy{ 4664 Name: "foo", 4665 Description: "great policy", 4666 Rules: "node { policy = \"read\" }", 4667 } 4668 out1 := ap.SetHash() 4669 assert.NotNil(t, out1) 4670 assert.NotNil(t, ap.Hash) 4671 assert.Equal(t, out1, ap.Hash) 4672 4673 ap.Rules = "node { policy = \"write\" }" 4674 out2 := ap.SetHash() 4675 assert.NotNil(t, out2) 4676 assert.NotNil(t, ap.Hash) 4677 assert.Equal(t, out2, ap.Hash) 4678 assert.NotEqual(t, out1, out2) 4679 } 4680 4681 func TestTaskEventPopulate(t *testing.T) { 4682 prepopulatedEvent := NewTaskEvent(TaskSetup) 4683 prepopulatedEvent.DisplayMessage = "Hola" 4684 testcases := []struct { 4685 event *TaskEvent 4686 expectedMsg string 4687 }{ 4688 {nil, ""}, 4689 {prepopulatedEvent, "Hola"}, 4690 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 4691 {NewTaskEvent(TaskStarted), "Task started by client"}, 4692 {NewTaskEvent(TaskReceived), "Task received by client"}, 4693 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 4694 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 4695 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 4696 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 4697 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 4698 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 4699 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 4700 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 4701 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 4702 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 4703 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 4704 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 4705 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 4706 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 4707 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 4708 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 4709 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 4710 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 4711 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 4712 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 4713 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 4714 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 4715 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 4716 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 4717 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 4718 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 4719 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 4720 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 4721 {NewTaskEvent("Unknown Type, No message"), ""}, 4722 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 4723 } 4724 4725 for _, tc := range testcases { 4726 tc.event.PopulateEventDisplayMessage() 4727 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 4728 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 4729 } 4730 } 4731 } 4732 4733 func TestNetworkResourcesEquals(t *testing.T) { 4734 require := require.New(t) 4735 var networkResourcesTest = []struct { 4736 input []*NetworkResource 4737 expected bool 4738 errorMsg string 4739 }{ 4740 { 4741 []*NetworkResource{ 4742 { 4743 IP: "10.0.0.1", 4744 MBits: 50, 4745 ReservedPorts: []Port{{"web", 80, 0}}, 4746 }, 4747 { 4748 IP: "10.0.0.1", 4749 MBits: 50, 4750 ReservedPorts: []Port{{"web", 80, 0}}, 4751 }, 4752 }, 4753 true, 4754 "Equal network resources should return true", 4755 }, 4756 { 4757 []*NetworkResource{ 4758 { 4759 IP: "10.0.0.0", 4760 MBits: 50, 4761 ReservedPorts: []Port{{"web", 80, 0}}, 4762 }, 4763 { 4764 IP: "10.0.0.1", 4765 MBits: 50, 4766 ReservedPorts: []Port{{"web", 80, 0}}, 4767 }, 4768 }, 4769 false, 4770 "Different IP addresses should return false", 4771 }, 4772 { 4773 []*NetworkResource{ 4774 { 4775 IP: "10.0.0.1", 4776 MBits: 40, 4777 ReservedPorts: []Port{{"web", 80, 0}}, 4778 }, 4779 { 4780 IP: "10.0.0.1", 4781 MBits: 50, 4782 ReservedPorts: []Port{{"web", 80, 0}}, 4783 }, 4784 }, 4785 false, 4786 "Different MBits values should return false", 4787 }, 4788 { 4789 []*NetworkResource{ 4790 { 4791 IP: "10.0.0.1", 4792 MBits: 50, 4793 ReservedPorts: []Port{{"web", 80, 0}}, 4794 }, 4795 { 4796 IP: "10.0.0.1", 4797 MBits: 50, 4798 ReservedPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4799 }, 4800 }, 4801 false, 4802 "Different ReservedPorts lengths should return false", 4803 }, 4804 { 4805 []*NetworkResource{ 4806 { 4807 IP: "10.0.0.1", 4808 MBits: 50, 4809 ReservedPorts: []Port{{"web", 80, 0}}, 4810 }, 4811 { 4812 IP: "10.0.0.1", 4813 MBits: 50, 4814 ReservedPorts: []Port{}, 4815 }, 4816 }, 4817 false, 4818 "Empty and non empty ReservedPorts values should return false", 4819 }, 4820 { 4821 []*NetworkResource{ 4822 { 4823 IP: "10.0.0.1", 4824 MBits: 50, 4825 ReservedPorts: []Port{{"web", 80, 0}}, 4826 }, 4827 { 4828 IP: "10.0.0.1", 4829 MBits: 50, 4830 ReservedPorts: []Port{{"notweb", 80, 0}}, 4831 }, 4832 }, 4833 false, 4834 "Different valued ReservedPorts values should return false", 4835 }, 4836 { 4837 []*NetworkResource{ 4838 { 4839 IP: "10.0.0.1", 4840 MBits: 50, 4841 DynamicPorts: []Port{{"web", 80, 0}}, 4842 }, 4843 { 4844 IP: "10.0.0.1", 4845 MBits: 50, 4846 DynamicPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4847 }, 4848 }, 4849 false, 4850 "Different DynamicPorts lengths should return false", 4851 }, 4852 { 4853 []*NetworkResource{ 4854 { 4855 IP: "10.0.0.1", 4856 MBits: 50, 4857 DynamicPorts: []Port{{"web", 80, 0}}, 4858 }, 4859 { 4860 IP: "10.0.0.1", 4861 MBits: 50, 4862 DynamicPorts: []Port{}, 4863 }, 4864 }, 4865 false, 4866 "Empty and non empty DynamicPorts values should return false", 4867 }, 4868 { 4869 []*NetworkResource{ 4870 { 4871 IP: "10.0.0.1", 4872 MBits: 50, 4873 DynamicPorts: []Port{{"web", 80, 0}}, 4874 }, 4875 { 4876 IP: "10.0.0.1", 4877 MBits: 50, 4878 DynamicPorts: []Port{{"notweb", 80, 0}}, 4879 }, 4880 }, 4881 false, 4882 "Different valued DynamicPorts values should return false", 4883 }, 4884 } 4885 for _, testCase := range networkResourcesTest { 4886 first := testCase.input[0] 4887 second := testCase.input[1] 4888 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 4889 } 4890 } 4891 4892 func TestNode_Canonicalize(t *testing.T) { 4893 t.Parallel() 4894 require := require.New(t) 4895 4896 // Make sure the eligiblity is set properly 4897 node := &Node{} 4898 node.Canonicalize() 4899 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 4900 4901 node = &Node{ 4902 Drain: true, 4903 } 4904 node.Canonicalize() 4905 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 4906 } 4907 4908 func TestNode_Copy(t *testing.T) { 4909 t.Parallel() 4910 require := require.New(t) 4911 4912 node := &Node{ 4913 ID: uuid.Generate(), 4914 SecretID: uuid.Generate(), 4915 Datacenter: "dc1", 4916 Name: "foobar", 4917 Attributes: map[string]string{ 4918 "kernel.name": "linux", 4919 "arch": "x86", 4920 "nomad.version": "0.5.0", 4921 "driver.exec": "1", 4922 "driver.mock_driver": "1", 4923 }, 4924 Resources: &Resources{ 4925 CPU: 4000, 4926 MemoryMB: 8192, 4927 DiskMB: 100 * 1024, 4928 Networks: []*NetworkResource{ 4929 { 4930 Device: "eth0", 4931 CIDR: "192.168.0.100/32", 4932 MBits: 1000, 4933 }, 4934 }, 4935 }, 4936 Reserved: &Resources{ 4937 CPU: 100, 4938 MemoryMB: 256, 4939 DiskMB: 4 * 1024, 4940 Networks: []*NetworkResource{ 4941 { 4942 Device: "eth0", 4943 IP: "192.168.0.100", 4944 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 4945 MBits: 1, 4946 }, 4947 }, 4948 }, 4949 NodeResources: &NodeResources{ 4950 Cpu: NodeCpuResources{ 4951 CpuShares: 4000, 4952 }, 4953 Memory: NodeMemoryResources{ 4954 MemoryMB: 8192, 4955 }, 4956 Disk: NodeDiskResources{ 4957 DiskMB: 100 * 1024, 4958 }, 4959 Networks: []*NetworkResource{ 4960 { 4961 Device: "eth0", 4962 CIDR: "192.168.0.100/32", 4963 MBits: 1000, 4964 }, 4965 }, 4966 }, 4967 ReservedResources: &NodeReservedResources{ 4968 Cpu: NodeReservedCpuResources{ 4969 CpuShares: 100, 4970 }, 4971 Memory: NodeReservedMemoryResources{ 4972 MemoryMB: 256, 4973 }, 4974 Disk: NodeReservedDiskResources{ 4975 DiskMB: 4 * 1024, 4976 }, 4977 Networks: NodeReservedNetworkResources{ 4978 ReservedHostPorts: "22", 4979 }, 4980 }, 4981 Links: map[string]string{ 4982 "consul": "foobar.dc1", 4983 }, 4984 Meta: map[string]string{ 4985 "pci-dss": "true", 4986 "database": "mysql", 4987 "version": "5.6", 4988 }, 4989 NodeClass: "linux-medium-pci", 4990 Status: NodeStatusReady, 4991 SchedulingEligibility: NodeSchedulingEligible, 4992 Drivers: map[string]*DriverInfo{ 4993 "mock_driver": { 4994 Attributes: map[string]string{"running": "1"}, 4995 Detected: true, 4996 Healthy: true, 4997 HealthDescription: "Currently active", 4998 UpdateTime: time.Now(), 4999 }, 5000 }, 5001 } 5002 node.ComputeClass() 5003 5004 node2 := node.Copy() 5005 5006 require.Equal(node.Attributes, node2.Attributes) 5007 require.Equal(node.Resources, node2.Resources) 5008 require.Equal(node.Reserved, node2.Reserved) 5009 require.Equal(node.Links, node2.Links) 5010 require.Equal(node.Meta, node2.Meta) 5011 require.Equal(node.Events, node2.Events) 5012 require.Equal(node.DrainStrategy, node2.DrainStrategy) 5013 require.Equal(node.Drivers, node2.Drivers) 5014 } 5015 5016 func TestSpread_Validate(t *testing.T) { 5017 type tc struct { 5018 spread *Spread 5019 err error 5020 name string 5021 } 5022 5023 testCases := []tc{ 5024 { 5025 spread: &Spread{}, 5026 err: fmt.Errorf("Missing spread attribute"), 5027 name: "empty spread", 5028 }, 5029 { 5030 spread: &Spread{ 5031 Attribute: "${node.datacenter}", 5032 Weight: -1, 5033 }, 5034 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5035 name: "Invalid weight", 5036 }, 5037 { 5038 spread: &Spread{ 5039 Attribute: "${node.datacenter}", 5040 Weight: 110, 5041 }, 5042 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5043 name: "Invalid weight", 5044 }, 5045 { 5046 spread: &Spread{ 5047 Attribute: "${node.datacenter}", 5048 Weight: 50, 5049 SpreadTarget: []*SpreadTarget{ 5050 { 5051 Value: "dc1", 5052 Percent: 25, 5053 }, 5054 { 5055 Value: "dc2", 5056 Percent: 150, 5057 }, 5058 }, 5059 }, 5060 err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"), 5061 name: "Invalid percentages", 5062 }, 5063 { 5064 spread: &Spread{ 5065 Attribute: "${node.datacenter}", 5066 Weight: 50, 5067 SpreadTarget: []*SpreadTarget{ 5068 { 5069 Value: "dc1", 5070 Percent: 75, 5071 }, 5072 { 5073 Value: "dc2", 5074 Percent: 75, 5075 }, 5076 }, 5077 }, 5078 err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150), 5079 name: "Invalid percentages", 5080 }, 5081 { 5082 spread: &Spread{ 5083 Attribute: "${node.datacenter}", 5084 Weight: 50, 5085 SpreadTarget: []*SpreadTarget{ 5086 { 5087 Value: "dc1", 5088 Percent: 25, 5089 }, 5090 { 5091 Value: "dc1", 5092 Percent: 50, 5093 }, 5094 }, 5095 }, 5096 err: fmt.Errorf("Spread target value \"dc1\" already defined"), 5097 name: "No spread targets", 5098 }, 5099 { 5100 spread: &Spread{ 5101 Attribute: "${node.datacenter}", 5102 Weight: 50, 5103 SpreadTarget: []*SpreadTarget{ 5104 { 5105 Value: "dc1", 5106 Percent: 25, 5107 }, 5108 { 5109 Value: "dc2", 5110 Percent: 50, 5111 }, 5112 }, 5113 }, 5114 err: nil, 5115 name: "Valid spread", 5116 }, 5117 } 5118 5119 for _, tc := range testCases { 5120 t.Run(tc.name, func(t *testing.T) { 5121 err := tc.spread.Validate() 5122 if tc.err != nil { 5123 require.NotNil(t, err) 5124 require.Contains(t, err.Error(), tc.err.Error()) 5125 } else { 5126 require.Nil(t, err) 5127 } 5128 }) 5129 } 5130 } 5131 5132 func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { 5133 require := require.New(t) 5134 cases := []struct { 5135 Input string 5136 Parsed []uint64 5137 Err bool 5138 }{ 5139 { 5140 "1,2,3", 5141 []uint64{1, 2, 3}, 5142 false, 5143 }, 5144 { 5145 "3,1,2,1,2,3,1-3", 5146 []uint64{1, 2, 3}, 5147 false, 5148 }, 5149 { 5150 "3-1", 5151 nil, 5152 true, 5153 }, 5154 { 5155 "1-3,2-4", 5156 []uint64{1, 2, 3, 4}, 5157 false, 5158 }, 5159 { 5160 "1-3,4,5-5,6,7,8-10", 5161 []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 5162 false, 5163 }, 5164 } 5165 5166 for i, tc := range cases { 5167 r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input} 5168 out, err := r.ParseReservedHostPorts() 5169 if (err != nil) != tc.Err { 5170 t.Fatalf("test case %d: %v", i, err) 5171 continue 5172 } 5173 5174 require.Equal(out, tc.Parsed) 5175 } 5176 }