github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 15 "github.com/kr/pretty" 16 "github.com/stretchr/testify/assert" 17 "github.com/stretchr/testify/require" 18 ) 19 20 func TestJob_Validate(t *testing.T) { 21 j := &Job{} 22 err := j.Validate() 23 mErr := err.(*multierror.Error) 24 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 25 t.Fatalf("err: %s", err) 26 } 27 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 28 t.Fatalf("err: %s", err) 29 } 30 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 31 t.Fatalf("err: %s", err) 32 } 33 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 34 t.Fatalf("err: %s", err) 35 } 36 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 37 t.Fatalf("err: %s", err) 38 } 39 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 40 t.Fatalf("err: %s", err) 41 } 42 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 43 t.Fatalf("err: %s", err) 44 } 45 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 46 t.Fatalf("err: %s", err) 47 } 48 49 j = &Job{ 50 Type: "invalid-job-type", 51 } 52 err = j.Validate() 53 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 54 t.Errorf("expected %s but found: %v", expected, err) 55 } 56 57 j = &Job{ 58 Type: JobTypeService, 59 Periodic: &PeriodicConfig{ 60 Enabled: true, 61 }, 62 } 63 err = j.Validate() 64 mErr = err.(*multierror.Error) 65 if !strings.Contains(mErr.Error(), "Periodic") { 66 t.Fatalf("err: %s", err) 67 } 68 69 j = &Job{ 70 Region: "global", 71 ID: uuid.Generate(), 72 Namespace: "test", 73 Name: "my-job", 74 Type: JobTypeService, 75 Priority: 50, 76 Datacenters: []string{"dc1"}, 77 TaskGroups: []*TaskGroup{ 78 { 79 Name: "web", 80 RestartPolicy: &RestartPolicy{ 81 Interval: 5 * time.Minute, 82 Delay: 10 * time.Second, 83 Attempts: 10, 84 }, 85 }, 86 { 87 Name: "web", 88 RestartPolicy: &RestartPolicy{ 89 Interval: 5 * time.Minute, 90 Delay: 10 * time.Second, 91 Attempts: 10, 92 }, 93 }, 94 { 95 RestartPolicy: &RestartPolicy{ 96 Interval: 5 * time.Minute, 97 Delay: 10 * time.Second, 98 Attempts: 10, 99 }, 100 }, 101 }, 102 } 103 err = j.Validate() 104 mErr = err.(*multierror.Error) 105 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 106 t.Fatalf("err: %s", err) 107 } 108 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 109 t.Fatalf("err: %s", err) 110 } 111 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 112 t.Fatalf("err: %s", err) 113 } 114 115 // test for empty datacenters 116 j = &Job{ 117 Datacenters: []string{""}, 118 } 119 err = j.Validate() 120 mErr = err.(*multierror.Error) 121 if !strings.Contains(mErr.Error(), "datacenter must be non-empty string") { 122 t.Fatalf("err: %s", err) 123 } 124 } 125 126 func TestJob_ValidateScaling(t *testing.T) { 127 require := require.New(t) 128 129 p := &ScalingPolicy{ 130 Policy: nil, // allowed to be nil 131 Min: 5, 132 Max: 5, 133 Enabled: true, 134 } 135 job := testJob() 136 job.TaskGroups[0].Scaling = p 137 job.TaskGroups[0].Count = 5 138 139 require.NoError(job.Validate()) 140 141 // min <= max 142 p.Max = 0 143 p.Min = 10 144 err := job.Validate() 145 require.Error(err) 146 mErr := err.(*multierror.Error) 147 require.Len(mErr.Errors, 1) 148 require.Contains(mErr.Errors[0].Error(), "maximum count must not be less than minimum count") 149 require.Contains(mErr.Errors[0].Error(), "task group count must not be less than minimum count in scaling policy") 150 require.Contains(mErr.Errors[0].Error(), "task group count must not be greater than maximum count in scaling policy") 151 152 // count <= max 153 p.Max = 0 154 p.Min = 5 155 job.TaskGroups[0].Count = 5 156 err = job.Validate() 157 require.Error(err) 158 mErr = err.(*multierror.Error) 159 require.Len(mErr.Errors, 1) 160 require.Contains(mErr.Errors[0].Error(), "maximum count must not be less than minimum count") 161 require.Contains(mErr.Errors[0].Error(), "task group count must not be greater than maximum count in scaling policy") 162 163 // min <= count 164 job.TaskGroups[0].Count = 0 165 p.Min = 5 166 p.Max = 5 167 err = job.Validate() 168 require.Error(err) 169 mErr = err.(*multierror.Error) 170 require.Len(mErr.Errors, 1) 171 require.Contains(mErr.Errors[0].Error(), "task group count must not be less than minimum count in scaling policy") 172 } 173 174 func TestJob_Warnings(t *testing.T) { 175 cases := []struct { 176 Name string 177 Job *Job 178 Expected []string 179 }{ 180 { 181 Name: "Higher counts for update stanza", 182 Expected: []string{"max parallel count is greater"}, 183 Job: &Job{ 184 Type: JobTypeService, 185 TaskGroups: []*TaskGroup{ 186 { 187 Name: "foo", 188 Count: 2, 189 Update: &UpdateStrategy{ 190 MaxParallel: 10, 191 }, 192 }, 193 }, 194 }, 195 }, 196 { 197 Name: "AutoPromote mixed TaskGroups", 198 Expected: []string{"auto_promote must be true for all groups"}, 199 Job: &Job{ 200 Type: JobTypeService, 201 TaskGroups: []*TaskGroup{ 202 { 203 Update: &UpdateStrategy{ 204 AutoPromote: true, 205 }, 206 }, 207 { 208 Update: &UpdateStrategy{ 209 AutoPromote: false, 210 }, 211 }, 212 }, 213 }, 214 }, 215 { 216 Name: "Template.VaultGrace Deprecated", 217 Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."}, 218 Job: &Job{ 219 Type: JobTypeService, 220 TaskGroups: []*TaskGroup{ 221 { 222 Tasks: []*Task{ 223 { 224 Templates: []*Template{ 225 { 226 VaultGrace: 1, 227 }, 228 }, 229 }, 230 }, 231 }, 232 }, 233 }, 234 }, 235 } 236 237 for _, c := range cases { 238 t.Run(c.Name, func(t *testing.T) { 239 warnings := c.Job.Warnings() 240 if warnings == nil { 241 if len(c.Expected) == 0 { 242 return 243 } else { 244 t.Fatal("Got no warnings when they were expected") 245 } 246 } 247 248 a := warnings.Error() 249 for _, e := range c.Expected { 250 if !strings.Contains(a, e) { 251 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 252 } 253 } 254 }) 255 } 256 } 257 258 func TestJob_SpecChanged(t *testing.T) { 259 // Get a base test job 260 base := testJob() 261 262 // Only modify the indexes/mutable state of the job 263 mutatedBase := base.Copy() 264 mutatedBase.Status = "foo" 265 mutatedBase.ModifyIndex = base.ModifyIndex + 100 266 267 // changed contains a spec change that should be detected 268 change := base.Copy() 269 change.Priority = 99 270 271 cases := []struct { 272 Name string 273 Original *Job 274 New *Job 275 Changed bool 276 }{ 277 { 278 Name: "Same job except mutable indexes", 279 Changed: false, 280 Original: base, 281 New: mutatedBase, 282 }, 283 { 284 Name: "Different", 285 Changed: true, 286 Original: base, 287 New: change, 288 }, 289 } 290 291 for _, c := range cases { 292 t.Run(c.Name, func(t *testing.T) { 293 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 294 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 295 } 296 }) 297 } 298 } 299 300 func testJob() *Job { 301 return &Job{ 302 Region: "global", 303 ID: uuid.Generate(), 304 Namespace: "test", 305 Name: "my-job", 306 Type: JobTypeService, 307 Priority: 50, 308 AllAtOnce: false, 309 Datacenters: []string{"dc1"}, 310 Constraints: []*Constraint{ 311 { 312 LTarget: "$attr.kernel.name", 313 RTarget: "linux", 314 Operand: "=", 315 }, 316 }, 317 Periodic: &PeriodicConfig{ 318 Enabled: false, 319 }, 320 TaskGroups: []*TaskGroup{ 321 { 322 Name: "web", 323 Count: 10, 324 EphemeralDisk: DefaultEphemeralDisk(), 325 RestartPolicy: &RestartPolicy{ 326 Mode: RestartPolicyModeFail, 327 Attempts: 3, 328 Interval: 10 * time.Minute, 329 Delay: 1 * time.Minute, 330 }, 331 ReschedulePolicy: &ReschedulePolicy{ 332 Interval: 5 * time.Minute, 333 Attempts: 10, 334 Delay: 5 * time.Second, 335 DelayFunction: "constant", 336 }, 337 Tasks: []*Task{ 338 { 339 Name: "web", 340 Driver: "exec", 341 Config: map[string]interface{}{ 342 "command": "/bin/date", 343 }, 344 Env: map[string]string{ 345 "FOO": "bar", 346 }, 347 Artifacts: []*TaskArtifact{ 348 { 349 GetterSource: "http://foo.com", 350 }, 351 }, 352 Services: []*Service{ 353 { 354 Name: "${TASK}-frontend", 355 PortLabel: "http", 356 }, 357 }, 358 Resources: &Resources{ 359 CPU: 500, 360 MemoryMB: 256, 361 Networks: []*NetworkResource{ 362 { 363 MBits: 50, 364 DynamicPorts: []Port{{Label: "http"}}, 365 }, 366 }, 367 }, 368 LogConfig: &LogConfig{ 369 MaxFiles: 10, 370 MaxFileSizeMB: 1, 371 }, 372 }, 373 }, 374 Meta: map[string]string{ 375 "elb_check_type": "http", 376 "elb_check_interval": "30s", 377 "elb_check_min": "3", 378 }, 379 }, 380 }, 381 Meta: map[string]string{ 382 "owner": "armon", 383 }, 384 } 385 } 386 387 func TestJob_Copy(t *testing.T) { 388 j := testJob() 389 c := j.Copy() 390 if !reflect.DeepEqual(j, c) { 391 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 392 } 393 } 394 395 func TestJob_IsPeriodic(t *testing.T) { 396 j := &Job{ 397 Type: JobTypeService, 398 Periodic: &PeriodicConfig{ 399 Enabled: true, 400 }, 401 } 402 if !j.IsPeriodic() { 403 t.Fatalf("IsPeriodic() returned false on periodic job") 404 } 405 406 j = &Job{ 407 Type: JobTypeService, 408 } 409 if j.IsPeriodic() { 410 t.Fatalf("IsPeriodic() returned true on non-periodic job") 411 } 412 } 413 414 func TestJob_IsPeriodicActive(t *testing.T) { 415 cases := []struct { 416 job *Job 417 active bool 418 }{ 419 { 420 job: &Job{ 421 Type: JobTypeService, 422 Periodic: &PeriodicConfig{ 423 Enabled: true, 424 }, 425 }, 426 active: true, 427 }, 428 { 429 job: &Job{ 430 Type: JobTypeService, 431 Periodic: &PeriodicConfig{ 432 Enabled: false, 433 }, 434 }, 435 active: false, 436 }, 437 { 438 job: &Job{ 439 Type: JobTypeService, 440 Periodic: &PeriodicConfig{ 441 Enabled: true, 442 }, 443 Stop: true, 444 }, 445 active: false, 446 }, 447 { 448 job: &Job{ 449 Type: JobTypeService, 450 Periodic: &PeriodicConfig{ 451 Enabled: false, 452 }, 453 ParameterizedJob: &ParameterizedJobConfig{}, 454 }, 455 active: false, 456 }, 457 } 458 459 for i, c := range cases { 460 if act := c.job.IsPeriodicActive(); act != c.active { 461 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 462 } 463 } 464 } 465 466 func TestJob_SystemJob_Validate(t *testing.T) { 467 j := testJob() 468 j.Type = JobTypeSystem 469 j.TaskGroups[0].ReschedulePolicy = nil 470 j.Canonicalize() 471 472 err := j.Validate() 473 if err == nil || !strings.Contains(err.Error(), "exceed") { 474 t.Fatalf("expect error due to count") 475 } 476 477 j.TaskGroups[0].Count = 0 478 if err := j.Validate(); err != nil { 479 t.Fatalf("unexpected err: %v", err) 480 } 481 482 j.TaskGroups[0].Count = 1 483 if err := j.Validate(); err != nil { 484 t.Fatalf("unexpected err: %v", err) 485 } 486 487 // Add affinities at job, task group and task level, that should fail validation 488 489 j.Affinities = []*Affinity{{ 490 Operand: "=", 491 LTarget: "${node.datacenter}", 492 RTarget: "dc1", 493 }} 494 j.TaskGroups[0].Affinities = []*Affinity{{ 495 Operand: "=", 496 LTarget: "${meta.rack}", 497 RTarget: "r1", 498 }} 499 j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{ 500 Operand: "=", 501 LTarget: "${meta.rack}", 502 RTarget: "r1", 503 }} 504 err = j.Validate() 505 require.NotNil(t, err) 506 require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") 507 508 // Add spread at job and task group level, that should fail validation 509 j.Spreads = []*Spread{{ 510 Attribute: "${node.datacenter}", 511 Weight: 100, 512 }} 513 j.TaskGroups[0].Spreads = []*Spread{{ 514 Attribute: "${node.datacenter}", 515 Weight: 100, 516 }} 517 518 err = j.Validate() 519 require.NotNil(t, err) 520 require.Contains(t, err.Error(), "System jobs may not have a spread stanza") 521 522 } 523 524 func TestJob_VaultPolicies(t *testing.T) { 525 j0 := &Job{} 526 e0 := make(map[string]map[string]*Vault, 0) 527 528 vj1 := &Vault{ 529 Policies: []string{ 530 "p1", 531 "p2", 532 }, 533 } 534 vj2 := &Vault{ 535 Policies: []string{ 536 "p3", 537 "p4", 538 }, 539 } 540 vj3 := &Vault{ 541 Policies: []string{ 542 "p5", 543 }, 544 } 545 j1 := &Job{ 546 TaskGroups: []*TaskGroup{ 547 { 548 Name: "foo", 549 Tasks: []*Task{ 550 { 551 Name: "t1", 552 }, 553 { 554 Name: "t2", 555 Vault: vj1, 556 }, 557 }, 558 }, 559 { 560 Name: "bar", 561 Tasks: []*Task{ 562 { 563 Name: "t3", 564 Vault: vj2, 565 }, 566 { 567 Name: "t4", 568 Vault: vj3, 569 }, 570 }, 571 }, 572 }, 573 } 574 575 e1 := map[string]map[string]*Vault{ 576 "foo": { 577 "t2": vj1, 578 }, 579 "bar": { 580 "t3": vj2, 581 "t4": vj3, 582 }, 583 } 584 585 cases := []struct { 586 Job *Job 587 Expected map[string]map[string]*Vault 588 }{ 589 { 590 Job: j0, 591 Expected: e0, 592 }, 593 { 594 Job: j1, 595 Expected: e1, 596 }, 597 } 598 599 for i, c := range cases { 600 got := c.Job.VaultPolicies() 601 if !reflect.DeepEqual(got, c.Expected) { 602 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 603 } 604 } 605 } 606 607 func TestJob_ConnectTasks(t *testing.T) { 608 t.Parallel() 609 r := require.New(t) 610 611 // todo(shoenig): this will need some updates when we support connect native 612 // tasks, which will have a different Kind format, probably. 613 614 j0 := &Job{ 615 TaskGroups: []*TaskGroup{{ 616 Name: "tg1", 617 Tasks: []*Task{{ 618 Name: "connect-proxy-task1", 619 Kind: "connect-proxy:task1", 620 }, { 621 Name: "task2", 622 Kind: "task2", 623 }, { 624 Name: "connect-proxy-task3", 625 Kind: "connect-proxy:task3", 626 }}, 627 }, { 628 Name: "tg2", 629 Tasks: []*Task{{ 630 Name: "task1", 631 Kind: "task1", 632 }, { 633 Name: "connect-proxy-task2", 634 Kind: "connect-proxy:task2", 635 }}, 636 }}, 637 } 638 639 connectTasks := j0.ConnectTasks() 640 641 exp := map[string][]string{ 642 "tg1": {"connect-proxy-task1", "connect-proxy-task3"}, 643 "tg2": {"connect-proxy-task2"}, 644 } 645 r.Equal(exp, connectTasks) 646 } 647 648 func TestJob_RequiredSignals(t *testing.T) { 649 j0 := &Job{} 650 e0 := make(map[string]map[string][]string, 0) 651 652 vj1 := &Vault{ 653 Policies: []string{"p1"}, 654 ChangeMode: VaultChangeModeNoop, 655 } 656 vj2 := &Vault{ 657 Policies: []string{"p1"}, 658 ChangeMode: VaultChangeModeSignal, 659 ChangeSignal: "SIGUSR1", 660 } 661 tj1 := &Template{ 662 SourcePath: "foo", 663 DestPath: "bar", 664 ChangeMode: TemplateChangeModeNoop, 665 } 666 tj2 := &Template{ 667 SourcePath: "foo", 668 DestPath: "bar", 669 ChangeMode: TemplateChangeModeSignal, 670 ChangeSignal: "SIGUSR2", 671 } 672 j1 := &Job{ 673 TaskGroups: []*TaskGroup{ 674 { 675 Name: "foo", 676 Tasks: []*Task{ 677 { 678 Name: "t1", 679 }, 680 { 681 Name: "t2", 682 Vault: vj2, 683 Templates: []*Template{tj2}, 684 }, 685 }, 686 }, 687 { 688 Name: "bar", 689 Tasks: []*Task{ 690 { 691 Name: "t3", 692 Vault: vj1, 693 Templates: []*Template{tj1}, 694 }, 695 { 696 Name: "t4", 697 Vault: vj2, 698 }, 699 }, 700 }, 701 }, 702 } 703 704 e1 := map[string]map[string][]string{ 705 "foo": { 706 "t2": {"SIGUSR1", "SIGUSR2"}, 707 }, 708 "bar": { 709 "t4": {"SIGUSR1"}, 710 }, 711 } 712 713 j2 := &Job{ 714 TaskGroups: []*TaskGroup{ 715 { 716 Name: "foo", 717 Tasks: []*Task{ 718 { 719 Name: "t1", 720 KillSignal: "SIGQUIT", 721 }, 722 }, 723 }, 724 }, 725 } 726 727 e2 := map[string]map[string][]string{ 728 "foo": { 729 "t1": {"SIGQUIT"}, 730 }, 731 } 732 733 cases := []struct { 734 Job *Job 735 Expected map[string]map[string][]string 736 }{ 737 { 738 Job: j0, 739 Expected: e0, 740 }, 741 { 742 Job: j1, 743 Expected: e1, 744 }, 745 { 746 Job: j2, 747 Expected: e2, 748 }, 749 } 750 751 for i, c := range cases { 752 got := c.Job.RequiredSignals() 753 if !reflect.DeepEqual(got, c.Expected) { 754 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 755 } 756 } 757 } 758 759 // test new Equal comparisons for components of Jobs 760 func TestJob_PartEqual(t *testing.T) { 761 ns := &Networks{} 762 require.True(t, ns.Equals(&Networks{})) 763 764 ns = &Networks{ 765 &NetworkResource{Device: "eth0"}, 766 } 767 require.True(t, ns.Equals(&Networks{ 768 &NetworkResource{Device: "eth0"}, 769 })) 770 771 ns = &Networks{ 772 &NetworkResource{Device: "eth0"}, 773 &NetworkResource{Device: "eth1"}, 774 &NetworkResource{Device: "eth2"}, 775 } 776 require.True(t, ns.Equals(&Networks{ 777 &NetworkResource{Device: "eth2"}, 778 &NetworkResource{Device: "eth0"}, 779 &NetworkResource{Device: "eth1"}, 780 })) 781 782 cs := &Constraints{ 783 &Constraint{"left0", "right0", "=", ""}, 784 &Constraint{"left1", "right1", "=", ""}, 785 &Constraint{"left2", "right2", "=", ""}, 786 } 787 require.True(t, cs.Equals(&Constraints{ 788 &Constraint{"left0", "right0", "=", ""}, 789 &Constraint{"left2", "right2", "=", ""}, 790 &Constraint{"left1", "right1", "=", ""}, 791 })) 792 793 as := &Affinities{ 794 &Affinity{"left0", "right0", "=", 0, ""}, 795 &Affinity{"left1", "right1", "=", 0, ""}, 796 &Affinity{"left2", "right2", "=", 0, ""}, 797 } 798 require.True(t, as.Equals(&Affinities{ 799 &Affinity{"left0", "right0", "=", 0, ""}, 800 &Affinity{"left2", "right2", "=", 0, ""}, 801 &Affinity{"left1", "right1", "=", 0, ""}, 802 })) 803 } 804 805 func TestTask_UsesConnect(t *testing.T) { 806 t.Parallel() 807 808 t.Run("normal task", func(t *testing.T) { 809 task := testJob().TaskGroups[0].Tasks[0] 810 usesConnect := task.UsesConnect() 811 require.False(t, usesConnect) 812 }) 813 814 t.Run("sidecar proxy", func(t *testing.T) { 815 task := &Task{ 816 Name: "connect-proxy-task1", 817 Kind: "connect-proxy:task1", 818 } 819 usesConnect := task.UsesConnect() 820 require.True(t, usesConnect) 821 }) 822 823 // todo(shoenig): add native case 824 } 825 826 func TestTaskGroup_UsesConnect(t *testing.T) { 827 t.Parallel() 828 829 try := func(t *testing.T, tg *TaskGroup, exp bool) { 830 result := tg.UsesConnect() 831 require.Equal(t, exp, result) 832 } 833 834 t.Run("tg uses native", func(t *testing.T) { 835 try(t, &TaskGroup{ 836 Services: []*Service{ 837 {Connect: nil}, 838 {Connect: &ConsulConnect{Native: true}}, 839 }, 840 }, true) 841 }) 842 843 t.Run("tg uses sidecar", func(t *testing.T) { 844 try(t, &TaskGroup{ 845 Services: []*Service{{ 846 Connect: &ConsulConnect{ 847 SidecarService: &ConsulSidecarService{ 848 Port: "9090", 849 }, 850 }, 851 }}, 852 }, true) 853 }) 854 855 t.Run("tg does not use connect", func(t *testing.T) { 856 try(t, &TaskGroup{ 857 Services: []*Service{ 858 {Connect: nil}, 859 }, 860 }, false) 861 }) 862 } 863 864 func TestTaskGroup_Validate(t *testing.T) { 865 j := testJob() 866 tg := &TaskGroup{ 867 Count: -1, 868 RestartPolicy: &RestartPolicy{ 869 Interval: 5 * time.Minute, 870 Delay: 10 * time.Second, 871 Attempts: 10, 872 Mode: RestartPolicyModeDelay, 873 }, 874 ReschedulePolicy: &ReschedulePolicy{ 875 Interval: 5 * time.Minute, 876 Attempts: 5, 877 Delay: 5 * time.Second, 878 }, 879 } 880 err := tg.Validate(j) 881 mErr := err.(*multierror.Error) 882 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 883 t.Fatalf("err: %s", err) 884 } 885 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 886 t.Fatalf("err: %s", err) 887 } 888 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 889 t.Fatalf("err: %s", err) 890 } 891 892 tg = &TaskGroup{ 893 Tasks: []*Task{ 894 { 895 Name: "task-a", 896 Resources: &Resources{ 897 Networks: []*NetworkResource{ 898 { 899 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 900 }, 901 }, 902 }, 903 }, 904 { 905 Name: "task-b", 906 Resources: &Resources{ 907 Networks: []*NetworkResource{ 908 { 909 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 910 }, 911 }, 912 }, 913 }, 914 }, 915 } 916 err = tg.Validate(&Job{}) 917 expected := `Static port 123 already reserved by task-a:foo` 918 if !strings.Contains(err.Error(), expected) { 919 t.Errorf("expected %s but found: %v", expected, err) 920 } 921 922 tg = &TaskGroup{ 923 Tasks: []*Task{ 924 { 925 Name: "task-a", 926 Resources: &Resources{ 927 Networks: []*NetworkResource{ 928 { 929 ReservedPorts: []Port{ 930 {Label: "foo", Value: 123}, 931 {Label: "bar", Value: 123}, 932 }, 933 }, 934 }, 935 }, 936 }, 937 }, 938 } 939 err = tg.Validate(&Job{}) 940 expected = `Static port 123 already reserved by task-a:foo` 941 if !strings.Contains(err.Error(), expected) { 942 t.Errorf("expected %s but found: %v", expected, err) 943 } 944 945 tg = &TaskGroup{ 946 Name: "web", 947 Count: 1, 948 Tasks: []*Task{ 949 {Name: "web", Leader: true}, 950 {Name: "web", Leader: true}, 951 {}, 952 }, 953 RestartPolicy: &RestartPolicy{ 954 Interval: 5 * time.Minute, 955 Delay: 10 * time.Second, 956 Attempts: 10, 957 Mode: RestartPolicyModeDelay, 958 }, 959 ReschedulePolicy: &ReschedulePolicy{ 960 Interval: 5 * time.Minute, 961 Attempts: 10, 962 Delay: 5 * time.Second, 963 DelayFunction: "constant", 964 }, 965 } 966 967 err = tg.Validate(j) 968 mErr = err.(*multierror.Error) 969 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 970 t.Fatalf("err: %s", err) 971 } 972 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 973 t.Fatalf("err: %s", err) 974 } 975 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 976 t.Fatalf("err: %s", err) 977 } 978 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 979 t.Fatalf("err: %s", err) 980 } 981 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 982 t.Fatalf("err: %s", err) 983 } 984 985 tg = &TaskGroup{ 986 Name: "web", 987 Count: 1, 988 Tasks: []*Task{ 989 {Name: "web", Leader: true}, 990 }, 991 Update: DefaultUpdateStrategy.Copy(), 992 } 993 j.Type = JobTypeBatch 994 err = tg.Validate(j) 995 if !strings.Contains(err.Error(), "does not allow update block") { 996 t.Fatalf("err: %s", err) 997 } 998 999 tg = &TaskGroup{ 1000 Count: -1, 1001 RestartPolicy: &RestartPolicy{ 1002 Interval: 5 * time.Minute, 1003 Delay: 10 * time.Second, 1004 Attempts: 10, 1005 Mode: RestartPolicyModeDelay, 1006 }, 1007 ReschedulePolicy: &ReschedulePolicy{ 1008 Interval: 5 * time.Minute, 1009 Attempts: 5, 1010 Delay: 5 * time.Second, 1011 }, 1012 } 1013 j.Type = JobTypeSystem 1014 err = tg.Validate(j) 1015 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 1016 t.Fatalf("err: %s", err) 1017 } 1018 1019 tg = &TaskGroup{ 1020 Networks: []*NetworkResource{ 1021 { 1022 DynamicPorts: []Port{{"http", 0, 80}}, 1023 }, 1024 }, 1025 Tasks: []*Task{ 1026 { 1027 Resources: &Resources{ 1028 Networks: []*NetworkResource{ 1029 { 1030 DynamicPorts: []Port{{"http", 0, 80}}, 1031 }, 1032 }, 1033 }, 1034 }, 1035 }, 1036 } 1037 err = tg.Validate(j) 1038 require.Contains(t, err.Error(), "Port label http already in use") 1039 require.Contains(t, err.Error(), "Port mapped to 80 already in use") 1040 1041 tg = &TaskGroup{ 1042 Volumes: map[string]*VolumeRequest{ 1043 "foo": { 1044 Type: "nothost", 1045 Source: "foo", 1046 }, 1047 }, 1048 Tasks: []*Task{ 1049 { 1050 Name: "task-a", 1051 Resources: &Resources{}, 1052 }, 1053 }, 1054 } 1055 err = tg.Validate(&Job{}) 1056 require.Contains(t, err.Error(), `Volume foo has unrecognised type nothost`) 1057 1058 tg = &TaskGroup{ 1059 Volumes: map[string]*VolumeRequest{ 1060 "foo": { 1061 Type: "host", 1062 }, 1063 }, 1064 Tasks: []*Task{ 1065 { 1066 Name: "task-a", 1067 Resources: &Resources{}, 1068 }, 1069 }, 1070 } 1071 err = tg.Validate(&Job{}) 1072 require.Contains(t, err.Error(), `Volume foo has an empty source`) 1073 1074 tg = &TaskGroup{ 1075 Volumes: map[string]*VolumeRequest{ 1076 "foo": { 1077 Type: "host", 1078 }, 1079 }, 1080 Tasks: []*Task{ 1081 { 1082 Name: "task-a", 1083 Resources: &Resources{}, 1084 VolumeMounts: []*VolumeMount{ 1085 { 1086 Volume: "", 1087 }, 1088 }, 1089 }, 1090 { 1091 Name: "task-b", 1092 Resources: &Resources{}, 1093 VolumeMounts: []*VolumeMount{ 1094 { 1095 Volume: "foob", 1096 }, 1097 }, 1098 }, 1099 }, 1100 } 1101 err = tg.Validate(&Job{}) 1102 expected = `Task task-a has a volume mount (0) referencing an empty volume` 1103 require.Contains(t, err.Error(), expected) 1104 1105 expected = `Task task-b has a volume mount (0) referencing undefined volume foob` 1106 require.Contains(t, err.Error(), expected) 1107 1108 taskA := &Task{Name: "task-a"} 1109 tg = &TaskGroup{ 1110 Name: "group-a", 1111 Services: []*Service{ 1112 { 1113 Name: "service-a", 1114 Checks: []*ServiceCheck{ 1115 { 1116 Name: "check-a", 1117 Type: "tcp", 1118 TaskName: "task-b", 1119 PortLabel: "http", 1120 Interval: time.Duration(1 * time.Second), 1121 Timeout: time.Duration(1 * time.Second), 1122 }, 1123 }, 1124 }, 1125 }, 1126 Tasks: []*Task{taskA}, 1127 } 1128 err = tg.Validate(&Job{}) 1129 expected = `Check check-a invalid: refers to non-existent task task-b` 1130 require.Contains(t, err.Error(), expected) 1131 1132 expected = `Check check-a invalid: only script and gRPC checks should have tasks` 1133 require.Contains(t, err.Error(), expected) 1134 1135 } 1136 1137 func TestTask_Validate(t *testing.T) { 1138 task := &Task{} 1139 ephemeralDisk := DefaultEphemeralDisk() 1140 err := task.Validate(ephemeralDisk, JobTypeBatch, nil) 1141 mErr := err.(*multierror.Error) 1142 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 1143 t.Fatalf("err: %s", err) 1144 } 1145 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 1146 t.Fatalf("err: %s", err) 1147 } 1148 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 1149 t.Fatalf("err: %s", err) 1150 } 1151 1152 task = &Task{Name: "web/foo"} 1153 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1154 mErr = err.(*multierror.Error) 1155 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 1156 t.Fatalf("err: %s", err) 1157 } 1158 1159 task = &Task{ 1160 Name: "web", 1161 Driver: "docker", 1162 Resources: &Resources{ 1163 CPU: 100, 1164 MemoryMB: 100, 1165 }, 1166 LogConfig: DefaultLogConfig(), 1167 } 1168 ephemeralDisk.SizeMB = 200 1169 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1170 if err != nil { 1171 t.Fatalf("err: %s", err) 1172 } 1173 1174 task.Constraints = append(task.Constraints, 1175 &Constraint{ 1176 Operand: ConstraintDistinctHosts, 1177 }, 1178 &Constraint{ 1179 Operand: ConstraintDistinctProperty, 1180 LTarget: "${meta.rack}", 1181 }) 1182 1183 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1184 mErr = err.(*multierror.Error) 1185 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 1186 t.Fatalf("err: %s", err) 1187 } 1188 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 1189 t.Fatalf("err: %s", err) 1190 } 1191 } 1192 1193 func TestTask_Validate_Services(t *testing.T) { 1194 s1 := &Service{ 1195 Name: "service-name", 1196 PortLabel: "bar", 1197 Checks: []*ServiceCheck{ 1198 { 1199 Name: "check-name", 1200 Type: ServiceCheckTCP, 1201 Interval: 0 * time.Second, 1202 }, 1203 { 1204 Name: "check-name", 1205 Type: ServiceCheckTCP, 1206 Timeout: 2 * time.Second, 1207 }, 1208 { 1209 Name: "check-name", 1210 Type: ServiceCheckTCP, 1211 Interval: 1 * time.Second, 1212 }, 1213 }, 1214 } 1215 1216 s2 := &Service{ 1217 Name: "service-name", 1218 PortLabel: "bar", 1219 } 1220 1221 s3 := &Service{ 1222 Name: "service-A", 1223 PortLabel: "a", 1224 } 1225 s4 := &Service{ 1226 Name: "service-A", 1227 PortLabel: "b", 1228 } 1229 1230 ephemeralDisk := DefaultEphemeralDisk() 1231 ephemeralDisk.SizeMB = 200 1232 task := &Task{ 1233 Name: "web", 1234 Driver: "docker", 1235 Resources: &Resources{ 1236 CPU: 100, 1237 MemoryMB: 100, 1238 }, 1239 Services: []*Service{s1, s2}, 1240 } 1241 1242 task1 := &Task{ 1243 Name: "web", 1244 Driver: "docker", 1245 Resources: DefaultResources(), 1246 Services: []*Service{s3, s4}, 1247 LogConfig: DefaultLogConfig(), 1248 } 1249 task1.Resources.Networks = []*NetworkResource{ 1250 { 1251 MBits: 10, 1252 DynamicPorts: []Port{ 1253 { 1254 Label: "a", 1255 Value: 1000, 1256 }, 1257 { 1258 Label: "b", 1259 Value: 2000, 1260 }, 1261 }, 1262 }, 1263 } 1264 1265 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1266 if err == nil { 1267 t.Fatal("expected an error") 1268 } 1269 1270 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 1271 t.Fatalf("err: %v", err) 1272 } 1273 1274 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 1275 t.Fatalf("err: %v", err) 1276 } 1277 1278 if !strings.Contains(err.Error(), "missing required value interval") { 1279 t.Fatalf("err: %v", err) 1280 } 1281 1282 if !strings.Contains(err.Error(), "cannot be less than") { 1283 t.Fatalf("err: %v", err) 1284 } 1285 1286 if err = task1.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1287 t.Fatalf("err : %v", err) 1288 } 1289 } 1290 1291 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1292 ephemeralDisk := DefaultEphemeralDisk() 1293 getTask := func(s *Service) *Task { 1294 task := &Task{ 1295 Name: "web", 1296 Driver: "docker", 1297 Resources: DefaultResources(), 1298 Services: []*Service{s}, 1299 LogConfig: DefaultLogConfig(), 1300 } 1301 task.Resources.Networks = []*NetworkResource{ 1302 { 1303 MBits: 10, 1304 DynamicPorts: []Port{ 1305 { 1306 Label: "http", 1307 Value: 80, 1308 }, 1309 }, 1310 }, 1311 } 1312 return task 1313 } 1314 1315 cases := []*Service{ 1316 { 1317 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1318 Name: "DriverModeWithLabel", 1319 PortLabel: "http", 1320 AddressMode: AddressModeDriver, 1321 }, 1322 { 1323 Name: "DriverModeWithPort", 1324 PortLabel: "80", 1325 AddressMode: AddressModeDriver, 1326 }, 1327 { 1328 Name: "HostModeWithLabel", 1329 PortLabel: "http", 1330 AddressMode: AddressModeHost, 1331 }, 1332 { 1333 Name: "HostModeWithoutLabel", 1334 AddressMode: AddressModeHost, 1335 }, 1336 { 1337 Name: "DriverModeWithoutLabel", 1338 AddressMode: AddressModeDriver, 1339 }, 1340 } 1341 1342 for _, service := range cases { 1343 task := getTask(service) 1344 t.Run(service.Name, func(t *testing.T) { 1345 if err := task.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1346 t.Fatalf("unexpected err: %v", err) 1347 } 1348 }) 1349 } 1350 } 1351 1352 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1353 ephemeralDisk := DefaultEphemeralDisk() 1354 getTask := func(s *Service) *Task { 1355 task := &Task{ 1356 Name: "web", 1357 Driver: "docker", 1358 Resources: DefaultResources(), 1359 Services: []*Service{s}, 1360 LogConfig: DefaultLogConfig(), 1361 } 1362 task.Resources.Networks = []*NetworkResource{ 1363 { 1364 MBits: 10, 1365 DynamicPorts: []Port{ 1366 { 1367 Label: "http", 1368 Value: 80, 1369 }, 1370 }, 1371 }, 1372 } 1373 return task 1374 } 1375 1376 cases := []*Service{ 1377 { 1378 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1379 Name: "DriverModeWithLabel", 1380 PortLabel: "asdf", 1381 AddressMode: AddressModeDriver, 1382 }, 1383 { 1384 Name: "HostModeWithLabel", 1385 PortLabel: "asdf", 1386 AddressMode: AddressModeHost, 1387 }, 1388 { 1389 Name: "HostModeWithPort", 1390 PortLabel: "80", 1391 AddressMode: AddressModeHost, 1392 }, 1393 } 1394 1395 for _, service := range cases { 1396 task := getTask(service) 1397 t.Run(service.Name, func(t *testing.T) { 1398 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1399 if err == nil { 1400 t.Fatalf("expected an error") 1401 } 1402 //t.Logf("err: %v", err) 1403 }) 1404 } 1405 } 1406 1407 func TestTask_Validate_Service_Check(t *testing.T) { 1408 1409 invalidCheck := ServiceCheck{ 1410 Name: "check-name", 1411 Command: "/bin/true", 1412 Type: ServiceCheckScript, 1413 Interval: 10 * time.Second, 1414 } 1415 1416 err := invalidCheck.validate() 1417 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1418 t.Fatalf("expected a timeout validation error but received: %q", err) 1419 } 1420 1421 check1 := ServiceCheck{ 1422 Name: "check-name", 1423 Type: ServiceCheckTCP, 1424 Interval: 10 * time.Second, 1425 Timeout: 2 * time.Second, 1426 } 1427 1428 if err := check1.validate(); err != nil { 1429 t.Fatalf("err: %v", err) 1430 } 1431 1432 check1.InitialStatus = "foo" 1433 err = check1.validate() 1434 if err == nil { 1435 t.Fatal("Expected an error") 1436 } 1437 1438 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1439 t.Fatalf("err: %v", err) 1440 } 1441 1442 check1.InitialStatus = api.HealthCritical 1443 err = check1.validate() 1444 if err != nil { 1445 t.Fatalf("err: %v", err) 1446 } 1447 1448 check1.InitialStatus = api.HealthPassing 1449 err = check1.validate() 1450 if err != nil { 1451 t.Fatalf("err: %v", err) 1452 } 1453 1454 check1.InitialStatus = "" 1455 err = check1.validate() 1456 if err != nil { 1457 t.Fatalf("err: %v", err) 1458 } 1459 1460 check2 := ServiceCheck{ 1461 Name: "check-name-2", 1462 Type: ServiceCheckHTTP, 1463 Interval: 10 * time.Second, 1464 Timeout: 2 * time.Second, 1465 Path: "/foo/bar", 1466 } 1467 1468 err = check2.validate() 1469 if err != nil { 1470 t.Fatalf("err: %v", err) 1471 } 1472 1473 check2.Path = "" 1474 err = check2.validate() 1475 if err == nil { 1476 t.Fatal("Expected an error") 1477 } 1478 if !strings.Contains(err.Error(), "valid http path") { 1479 t.Fatalf("err: %v", err) 1480 } 1481 1482 check2.Path = "http://www.example.com" 1483 err = check2.validate() 1484 if err == nil { 1485 t.Fatal("Expected an error") 1486 } 1487 if !strings.Contains(err.Error(), "relative http path") { 1488 t.Fatalf("err: %v", err) 1489 } 1490 1491 t.Run("check expose", func(t *testing.T) { 1492 t.Run("type http", func(t *testing.T) { 1493 require.NoError(t, (&ServiceCheck{ 1494 Type: ServiceCheckHTTP, 1495 Interval: 1 * time.Second, 1496 Timeout: 1 * time.Second, 1497 Path: "/health", 1498 Expose: true, 1499 }).validate()) 1500 }) 1501 t.Run("type tcp", func(t *testing.T) { 1502 require.EqualError(t, (&ServiceCheck{ 1503 Type: ServiceCheckTCP, 1504 Interval: 1 * time.Second, 1505 Timeout: 1 * time.Second, 1506 Expose: true, 1507 }).validate(), "expose may only be set on HTTP or gRPC checks") 1508 }) 1509 }) 1510 } 1511 1512 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1513 // inherit address mode but do inherit ports. 1514 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1515 getTask := func(s *Service) *Task { 1516 return &Task{ 1517 Resources: &Resources{ 1518 Networks: []*NetworkResource{ 1519 { 1520 DynamicPorts: []Port{ 1521 { 1522 Label: "http", 1523 Value: 9999, 1524 }, 1525 }, 1526 }, 1527 }, 1528 }, 1529 Services: []*Service{s}, 1530 } 1531 } 1532 1533 cases := []struct { 1534 Service *Service 1535 ErrContains string 1536 }{ 1537 { 1538 Service: &Service{ 1539 Name: "invalid-driver", 1540 PortLabel: "80", 1541 AddressMode: "host", 1542 }, 1543 ErrContains: `port label "80" referenced`, 1544 }, 1545 { 1546 Service: &Service{ 1547 Name: "http-driver-fail-1", 1548 PortLabel: "80", 1549 AddressMode: "driver", 1550 Checks: []*ServiceCheck{ 1551 { 1552 Name: "invalid-check-1", 1553 Type: "tcp", 1554 Interval: time.Second, 1555 Timeout: time.Second, 1556 }, 1557 }, 1558 }, 1559 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1560 }, 1561 { 1562 Service: &Service{ 1563 Name: "http-driver-fail-2", 1564 PortLabel: "80", 1565 AddressMode: "driver", 1566 Checks: []*ServiceCheck{ 1567 { 1568 Name: "invalid-check-2", 1569 Type: "tcp", 1570 PortLabel: "80", 1571 Interval: time.Second, 1572 Timeout: time.Second, 1573 }, 1574 }, 1575 }, 1576 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1577 }, 1578 { 1579 Service: &Service{ 1580 Name: "http-driver-fail-3", 1581 PortLabel: "80", 1582 AddressMode: "driver", 1583 Checks: []*ServiceCheck{ 1584 { 1585 Name: "invalid-check-3", 1586 Type: "tcp", 1587 PortLabel: "missing-port-label", 1588 Interval: time.Second, 1589 Timeout: time.Second, 1590 }, 1591 }, 1592 }, 1593 ErrContains: `port label "missing-port-label" referenced`, 1594 }, 1595 { 1596 Service: &Service{ 1597 Name: "http-driver-passes", 1598 PortLabel: "80", 1599 AddressMode: "driver", 1600 Checks: []*ServiceCheck{ 1601 { 1602 Name: "valid-script-check", 1603 Type: "script", 1604 Command: "ok", 1605 Interval: time.Second, 1606 Timeout: time.Second, 1607 }, 1608 { 1609 Name: "valid-host-check", 1610 Type: "tcp", 1611 PortLabel: "http", 1612 Interval: time.Second, 1613 Timeout: time.Second, 1614 }, 1615 { 1616 Name: "valid-driver-check", 1617 Type: "tcp", 1618 AddressMode: "driver", 1619 Interval: time.Second, 1620 Timeout: time.Second, 1621 }, 1622 }, 1623 }, 1624 }, 1625 { 1626 Service: &Service{ 1627 Name: "empty-address-3673-passes-1", 1628 Checks: []*ServiceCheck{ 1629 { 1630 Name: "valid-port-label", 1631 Type: "tcp", 1632 PortLabel: "http", 1633 Interval: time.Second, 1634 Timeout: time.Second, 1635 }, 1636 { 1637 Name: "empty-is-ok", 1638 Type: "script", 1639 Command: "ok", 1640 Interval: time.Second, 1641 Timeout: time.Second, 1642 }, 1643 }, 1644 }, 1645 }, 1646 { 1647 Service: &Service{ 1648 Name: "empty-address-3673-passes-2", 1649 }, 1650 }, 1651 { 1652 Service: &Service{ 1653 Name: "empty-address-3673-fails", 1654 Checks: []*ServiceCheck{ 1655 { 1656 Name: "empty-is-not-ok", 1657 Type: "tcp", 1658 Interval: time.Second, 1659 Timeout: time.Second, 1660 }, 1661 }, 1662 }, 1663 ErrContains: `invalid: check requires a port but neither check nor service`, 1664 }, 1665 } 1666 1667 for _, tc := range cases { 1668 tc := tc 1669 task := getTask(tc.Service) 1670 t.Run(tc.Service.Name, func(t *testing.T) { 1671 err := validateServices(task) 1672 if err == nil && tc.ErrContains == "" { 1673 // Ok! 1674 return 1675 } 1676 if err == nil { 1677 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1678 } 1679 if !strings.Contains(err.Error(), tc.ErrContains) { 1680 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1681 } 1682 }) 1683 } 1684 } 1685 1686 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 1687 t.Parallel() 1688 // Bad (no port) 1689 invalidGRPC := &ServiceCheck{ 1690 Type: ServiceCheckGRPC, 1691 Interval: time.Second, 1692 Timeout: time.Second, 1693 } 1694 service := &Service{ 1695 Name: "test", 1696 Checks: []*ServiceCheck{invalidGRPC}, 1697 } 1698 1699 assert.Error(t, service.Validate()) 1700 1701 // Good 1702 service.Checks[0] = &ServiceCheck{ 1703 Type: ServiceCheckGRPC, 1704 Interval: time.Second, 1705 Timeout: time.Second, 1706 PortLabel: "some-port-label", 1707 } 1708 1709 assert.NoError(t, service.Validate()) 1710 } 1711 1712 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1713 t.Parallel() 1714 invalidCheckRestart := &CheckRestart{ 1715 Limit: -1, 1716 Grace: -1, 1717 } 1718 1719 err := invalidCheckRestart.Validate() 1720 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1721 assert.Len(t, err.(*multierror.Error).Errors, 2) 1722 1723 validCheckRestart := &CheckRestart{} 1724 assert.Nil(t, validCheckRestart.Validate()) 1725 1726 validCheckRestart.Limit = 1 1727 validCheckRestart.Grace = 1 1728 assert.Nil(t, validCheckRestart.Validate()) 1729 } 1730 1731 func TestTask_Validate_ConnectProxyKind(t *testing.T) { 1732 ephemeralDisk := DefaultEphemeralDisk() 1733 getTask := func(kind TaskKind, leader bool) *Task { 1734 task := &Task{ 1735 Name: "web", 1736 Driver: "docker", 1737 Resources: DefaultResources(), 1738 LogConfig: DefaultLogConfig(), 1739 Kind: kind, 1740 Leader: leader, 1741 } 1742 task.Resources.Networks = []*NetworkResource{ 1743 { 1744 MBits: 10, 1745 DynamicPorts: []Port{ 1746 { 1747 Label: "http", 1748 Value: 80, 1749 }, 1750 }, 1751 }, 1752 } 1753 return task 1754 } 1755 1756 cases := []struct { 1757 Desc string 1758 Kind TaskKind 1759 Leader bool 1760 Service *Service 1761 TgService []*Service 1762 ErrContains string 1763 }{ 1764 { 1765 Desc: "Not connect", 1766 Kind: "test", 1767 }, 1768 { 1769 Desc: "Invalid because of service in task definition", 1770 Kind: "connect-proxy:redis", 1771 Service: &Service{ 1772 Name: "redis", 1773 }, 1774 ErrContains: "Connect proxy task must not have a service stanza", 1775 }, 1776 { 1777 Desc: "Leader should not be set", 1778 Kind: "connect-proxy:redis", 1779 Leader: true, 1780 Service: &Service{ 1781 Name: "redis", 1782 }, 1783 ErrContains: "Connect proxy task must not have leader set", 1784 }, 1785 { 1786 Desc: "Service name invalid", 1787 Kind: "connect-proxy:redis:test", 1788 Service: &Service{ 1789 Name: "redis", 1790 }, 1791 ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`, 1792 }, 1793 { 1794 Desc: "Service name not found in group", 1795 Kind: "connect-proxy:redis", 1796 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1797 }, 1798 { 1799 Desc: "Connect stanza not configured in group", 1800 Kind: "connect-proxy:redis", 1801 TgService: []*Service{{ 1802 Name: "redis", 1803 }}, 1804 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1805 }, 1806 { 1807 Desc: "Valid connect proxy kind", 1808 Kind: "connect-proxy:redis", 1809 TgService: []*Service{{ 1810 Name: "redis", 1811 Connect: &ConsulConnect{ 1812 SidecarService: &ConsulSidecarService{ 1813 Port: "db", 1814 }, 1815 }, 1816 }}, 1817 }, 1818 } 1819 1820 for _, tc := range cases { 1821 tc := tc 1822 task := getTask(tc.Kind, tc.Leader) 1823 if tc.Service != nil { 1824 task.Services = []*Service{tc.Service} 1825 } 1826 t.Run(tc.Desc, func(t *testing.T) { 1827 err := task.Validate(ephemeralDisk, "service", tc.TgService) 1828 if err == nil && tc.ErrContains == "" { 1829 // Ok! 1830 return 1831 } 1832 require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains) 1833 require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err) 1834 }) 1835 } 1836 1837 } 1838 func TestTask_Validate_LogConfig(t *testing.T) { 1839 task := &Task{ 1840 LogConfig: DefaultLogConfig(), 1841 } 1842 ephemeralDisk := &EphemeralDisk{ 1843 SizeMB: 1, 1844 } 1845 1846 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1847 mErr := err.(*multierror.Error) 1848 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1849 t.Fatalf("err: %s", err) 1850 } 1851 } 1852 1853 func TestTask_Validate_CSIPluginConfig(t *testing.T) { 1854 table := []struct { 1855 name string 1856 pc *TaskCSIPluginConfig 1857 expectedErr string 1858 }{ 1859 { 1860 name: "no errors when not specified", 1861 pc: nil, 1862 }, 1863 { 1864 name: "requires non-empty plugin id", 1865 pc: &TaskCSIPluginConfig{}, 1866 expectedErr: "CSIPluginConfig must have a non-empty PluginID", 1867 }, 1868 { 1869 name: "requires valid plugin type", 1870 pc: &TaskCSIPluginConfig{ 1871 ID: "com.hashicorp.csi", 1872 Type: "nonsense", 1873 }, 1874 expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"", 1875 }, 1876 } 1877 1878 for _, tt := range table { 1879 t.Run(tt.name, func(t *testing.T) { 1880 task := &Task{ 1881 CSIPluginConfig: tt.pc, 1882 } 1883 ephemeralDisk := &EphemeralDisk{ 1884 SizeMB: 1, 1885 } 1886 1887 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1888 mErr := err.(*multierror.Error) 1889 if tt.expectedErr != "" { 1890 if !strings.Contains(mErr.Errors[4].Error(), tt.expectedErr) { 1891 t.Fatalf("err: %s", err) 1892 } 1893 } else { 1894 if len(mErr.Errors) != 4 { 1895 t.Fatalf("unexpected err: %s", mErr.Errors[4]) 1896 } 1897 } 1898 }) 1899 } 1900 } 1901 1902 func TestTask_Validate_Template(t *testing.T) { 1903 1904 bad := &Template{} 1905 task := &Task{ 1906 Templates: []*Template{bad}, 1907 } 1908 ephemeralDisk := &EphemeralDisk{ 1909 SizeMB: 1, 1910 } 1911 1912 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1913 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1914 t.Fatalf("err: %s", err) 1915 } 1916 1917 // Have two templates that share the same destination 1918 good := &Template{ 1919 SourcePath: "foo", 1920 DestPath: "local/foo", 1921 ChangeMode: "noop", 1922 } 1923 1924 task.Templates = []*Template{good, good} 1925 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1926 if !strings.Contains(err.Error(), "same destination as") { 1927 t.Fatalf("err: %s", err) 1928 } 1929 1930 // Env templates can't use signals 1931 task.Templates = []*Template{ 1932 { 1933 Envvars: true, 1934 ChangeMode: "signal", 1935 }, 1936 } 1937 1938 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1939 if err == nil { 1940 t.Fatalf("expected error from Template.Validate") 1941 } 1942 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1943 t.Errorf("expected to find %q but found %v", expected, err) 1944 } 1945 } 1946 1947 func TestTemplate_Validate(t *testing.T) { 1948 cases := []struct { 1949 Tmpl *Template 1950 Fail bool 1951 ContainsErrs []string 1952 }{ 1953 { 1954 Tmpl: &Template{}, 1955 Fail: true, 1956 ContainsErrs: []string{ 1957 "specify a source path", 1958 "specify a destination", 1959 TemplateChangeModeInvalidError.Error(), 1960 }, 1961 }, 1962 { 1963 Tmpl: &Template{ 1964 Splay: -100, 1965 }, 1966 Fail: true, 1967 ContainsErrs: []string{ 1968 "positive splay", 1969 }, 1970 }, 1971 { 1972 Tmpl: &Template{ 1973 ChangeMode: "foo", 1974 }, 1975 Fail: true, 1976 ContainsErrs: []string{ 1977 TemplateChangeModeInvalidError.Error(), 1978 }, 1979 }, 1980 { 1981 Tmpl: &Template{ 1982 ChangeMode: "signal", 1983 }, 1984 Fail: true, 1985 ContainsErrs: []string{ 1986 "specify signal value", 1987 }, 1988 }, 1989 { 1990 Tmpl: &Template{ 1991 SourcePath: "foo", 1992 DestPath: "../../root", 1993 ChangeMode: "noop", 1994 }, 1995 Fail: true, 1996 ContainsErrs: []string{ 1997 "destination escapes", 1998 }, 1999 }, 2000 { 2001 Tmpl: &Template{ 2002 SourcePath: "foo", 2003 DestPath: "local/foo", 2004 ChangeMode: "noop", 2005 }, 2006 Fail: false, 2007 }, 2008 { 2009 Tmpl: &Template{ 2010 SourcePath: "foo", 2011 DestPath: "local/foo", 2012 ChangeMode: "noop", 2013 Perms: "0444", 2014 }, 2015 Fail: false, 2016 }, 2017 { 2018 Tmpl: &Template{ 2019 SourcePath: "foo", 2020 DestPath: "local/foo", 2021 ChangeMode: "noop", 2022 Perms: "zza", 2023 }, 2024 Fail: true, 2025 ContainsErrs: []string{ 2026 "as octal", 2027 }, 2028 }, 2029 } 2030 2031 for i, c := range cases { 2032 err := c.Tmpl.Validate() 2033 if err != nil { 2034 if !c.Fail { 2035 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 2036 } 2037 2038 e := err.Error() 2039 for _, exp := range c.ContainsErrs { 2040 if !strings.Contains(e, exp) { 2041 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 2042 } 2043 } 2044 } else if c.Fail { 2045 t.Fatalf("Case %d: should have failed: %v", i+1, err) 2046 } 2047 } 2048 } 2049 2050 func TestConstraint_Validate(t *testing.T) { 2051 c := &Constraint{} 2052 err := c.Validate() 2053 mErr := err.(*multierror.Error) 2054 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 2055 t.Fatalf("err: %s", err) 2056 } 2057 2058 c = &Constraint{ 2059 LTarget: "$attr.kernel.name", 2060 RTarget: "linux", 2061 Operand: "=", 2062 } 2063 err = c.Validate() 2064 require.NoError(t, err) 2065 2066 // Perform additional regexp validation 2067 c.Operand = ConstraintRegex 2068 c.RTarget = "(foo" 2069 err = c.Validate() 2070 mErr = err.(*multierror.Error) 2071 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 2072 t.Fatalf("err: %s", err) 2073 } 2074 2075 // Perform version validation 2076 c.Operand = ConstraintVersion 2077 c.RTarget = "~> foo" 2078 err = c.Validate() 2079 mErr = err.(*multierror.Error) 2080 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 2081 t.Fatalf("err: %s", err) 2082 } 2083 2084 // Perform semver validation 2085 c.Operand = ConstraintSemver 2086 err = c.Validate() 2087 require.Error(t, err) 2088 require.Contains(t, err.Error(), "Malformed constraint") 2089 2090 c.RTarget = ">= 0.6.1" 2091 require.NoError(t, c.Validate()) 2092 2093 // Perform distinct_property validation 2094 c.Operand = ConstraintDistinctProperty 2095 c.RTarget = "0" 2096 err = c.Validate() 2097 mErr = err.(*multierror.Error) 2098 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 2099 t.Fatalf("err: %s", err) 2100 } 2101 2102 c.RTarget = "-1" 2103 err = c.Validate() 2104 mErr = err.(*multierror.Error) 2105 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 2106 t.Fatalf("err: %s", err) 2107 } 2108 2109 // Perform distinct_hosts validation 2110 c.Operand = ConstraintDistinctHosts 2111 c.LTarget = "" 2112 c.RTarget = "" 2113 if err := c.Validate(); err != nil { 2114 t.Fatalf("expected valid constraint: %v", err) 2115 } 2116 2117 // Perform set_contains* validation 2118 c.RTarget = "" 2119 for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} { 2120 c.Operand = o 2121 err = c.Validate() 2122 mErr = err.(*multierror.Error) 2123 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 2124 t.Fatalf("err: %s", err) 2125 } 2126 } 2127 2128 // Perform LTarget validation 2129 c.Operand = ConstraintRegex 2130 c.RTarget = "foo" 2131 c.LTarget = "" 2132 err = c.Validate() 2133 mErr = err.(*multierror.Error) 2134 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 2135 t.Fatalf("err: %s", err) 2136 } 2137 2138 // Perform constraint type validation 2139 c.Operand = "foo" 2140 err = c.Validate() 2141 mErr = err.(*multierror.Error) 2142 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 2143 t.Fatalf("err: %s", err) 2144 } 2145 } 2146 2147 func TestAffinity_Validate(t *testing.T) { 2148 2149 type tc struct { 2150 affinity *Affinity 2151 err error 2152 name string 2153 } 2154 2155 testCases := []tc{ 2156 { 2157 affinity: &Affinity{}, 2158 err: fmt.Errorf("Missing affinity operand"), 2159 }, 2160 { 2161 affinity: &Affinity{ 2162 Operand: "foo", 2163 LTarget: "${meta.node_class}", 2164 Weight: 10, 2165 }, 2166 err: fmt.Errorf("Unknown affinity operator \"foo\""), 2167 }, 2168 { 2169 affinity: &Affinity{ 2170 Operand: "=", 2171 LTarget: "${meta.node_class}", 2172 Weight: 10, 2173 }, 2174 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2175 }, 2176 { 2177 affinity: &Affinity{ 2178 Operand: "=", 2179 LTarget: "${meta.node_class}", 2180 RTarget: "c4", 2181 Weight: 0, 2182 }, 2183 err: fmt.Errorf("Affinity weight cannot be zero"), 2184 }, 2185 { 2186 affinity: &Affinity{ 2187 Operand: "=", 2188 LTarget: "${meta.node_class}", 2189 RTarget: "c4", 2190 Weight: 110, 2191 }, 2192 err: fmt.Errorf("Affinity weight must be within the range [-100,100]"), 2193 }, 2194 { 2195 affinity: &Affinity{ 2196 Operand: "=", 2197 LTarget: "${node.class}", 2198 Weight: 10, 2199 }, 2200 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2201 }, 2202 { 2203 affinity: &Affinity{ 2204 Operand: "version", 2205 LTarget: "${meta.os}", 2206 RTarget: ">>2.0", 2207 Weight: 110, 2208 }, 2209 err: fmt.Errorf("Version affinity is invalid"), 2210 }, 2211 { 2212 affinity: &Affinity{ 2213 Operand: "regexp", 2214 LTarget: "${meta.os}", 2215 RTarget: "\\K2.0", 2216 Weight: 100, 2217 }, 2218 err: fmt.Errorf("Regular expression failed to compile"), 2219 }, 2220 } 2221 2222 for _, tc := range testCases { 2223 t.Run(tc.name, func(t *testing.T) { 2224 err := tc.affinity.Validate() 2225 if tc.err != nil { 2226 require.NotNil(t, err) 2227 require.Contains(t, err.Error(), tc.err.Error()) 2228 } else { 2229 require.Nil(t, err) 2230 } 2231 }) 2232 } 2233 } 2234 2235 func TestUpdateStrategy_Validate(t *testing.T) { 2236 u := &UpdateStrategy{ 2237 MaxParallel: -1, 2238 HealthCheck: "foo", 2239 MinHealthyTime: -10, 2240 HealthyDeadline: -15, 2241 ProgressDeadline: -25, 2242 AutoRevert: false, 2243 Canary: -1, 2244 } 2245 2246 err := u.Validate() 2247 mErr := err.(*multierror.Error) 2248 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 2249 t.Fatalf("err: %s", err) 2250 } 2251 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than zero") { 2252 t.Fatalf("err: %s", err) 2253 } 2254 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 2255 t.Fatalf("err: %s", err) 2256 } 2257 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 2258 t.Fatalf("err: %s", err) 2259 } 2260 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 2261 t.Fatalf("err: %s", err) 2262 } 2263 if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") { 2264 t.Fatalf("err: %s", err) 2265 } 2266 if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") { 2267 t.Fatalf("err: %s", err) 2268 } 2269 if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") { 2270 t.Fatalf("err: %s", err) 2271 } 2272 } 2273 2274 func TestResource_NetIndex(t *testing.T) { 2275 r := &Resources{ 2276 Networks: []*NetworkResource{ 2277 {Device: "eth0"}, 2278 {Device: "lo0"}, 2279 {Device: ""}, 2280 }, 2281 } 2282 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 2283 t.Fatalf("Bad: %d", idx) 2284 } 2285 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 2286 t.Fatalf("Bad: %d", idx) 2287 } 2288 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 2289 t.Fatalf("Bad: %d", idx) 2290 } 2291 } 2292 2293 func TestResource_Superset(t *testing.T) { 2294 r1 := &Resources{ 2295 CPU: 2000, 2296 MemoryMB: 2048, 2297 DiskMB: 10000, 2298 } 2299 r2 := &Resources{ 2300 CPU: 2000, 2301 MemoryMB: 1024, 2302 DiskMB: 5000, 2303 } 2304 2305 if s, _ := r1.Superset(r1); !s { 2306 t.Fatalf("bad") 2307 } 2308 if s, _ := r1.Superset(r2); !s { 2309 t.Fatalf("bad") 2310 } 2311 if s, _ := r2.Superset(r1); s { 2312 t.Fatalf("bad") 2313 } 2314 if s, _ := r2.Superset(r2); !s { 2315 t.Fatalf("bad") 2316 } 2317 } 2318 2319 func TestResource_Add(t *testing.T) { 2320 r1 := &Resources{ 2321 CPU: 2000, 2322 MemoryMB: 2048, 2323 DiskMB: 10000, 2324 Networks: []*NetworkResource{ 2325 { 2326 CIDR: "10.0.0.0/8", 2327 MBits: 100, 2328 ReservedPorts: []Port{{"ssh", 22, 0}}, 2329 }, 2330 }, 2331 } 2332 r2 := &Resources{ 2333 CPU: 2000, 2334 MemoryMB: 1024, 2335 DiskMB: 5000, 2336 Networks: []*NetworkResource{ 2337 { 2338 IP: "10.0.0.1", 2339 MBits: 50, 2340 ReservedPorts: []Port{{"web", 80, 0}}, 2341 }, 2342 }, 2343 } 2344 2345 err := r1.Add(r2) 2346 if err != nil { 2347 t.Fatalf("Err: %v", err) 2348 } 2349 2350 expect := &Resources{ 2351 CPU: 3000, 2352 MemoryMB: 3072, 2353 DiskMB: 15000, 2354 Networks: []*NetworkResource{ 2355 { 2356 CIDR: "10.0.0.0/8", 2357 MBits: 150, 2358 ReservedPorts: []Port{{"ssh", 22, 0}, {"web", 80, 0}}, 2359 }, 2360 }, 2361 } 2362 2363 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2364 t.Fatalf("bad: %#v %#v", expect, r1) 2365 } 2366 } 2367 2368 func TestResource_Add_Network(t *testing.T) { 2369 r1 := &Resources{} 2370 r2 := &Resources{ 2371 Networks: []*NetworkResource{ 2372 { 2373 MBits: 50, 2374 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}}, 2375 }, 2376 }, 2377 } 2378 r3 := &Resources{ 2379 Networks: []*NetworkResource{ 2380 { 2381 MBits: 25, 2382 DynamicPorts: []Port{{"admin", 0, 8080}}, 2383 }, 2384 }, 2385 } 2386 2387 err := r1.Add(r2) 2388 if err != nil { 2389 t.Fatalf("Err: %v", err) 2390 } 2391 err = r1.Add(r3) 2392 if err != nil { 2393 t.Fatalf("Err: %v", err) 2394 } 2395 2396 expect := &Resources{ 2397 Networks: []*NetworkResource{ 2398 { 2399 MBits: 75, 2400 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}}, 2401 }, 2402 }, 2403 } 2404 2405 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2406 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 2407 } 2408 } 2409 2410 func TestComparableResources_Subtract(t *testing.T) { 2411 r1 := &ComparableResources{ 2412 Flattened: AllocatedTaskResources{ 2413 Cpu: AllocatedCpuResources{ 2414 CpuShares: 2000, 2415 }, 2416 Memory: AllocatedMemoryResources{ 2417 MemoryMB: 2048, 2418 }, 2419 Networks: []*NetworkResource{ 2420 { 2421 CIDR: "10.0.0.0/8", 2422 MBits: 100, 2423 ReservedPorts: []Port{{"ssh", 22, 0}}, 2424 }, 2425 }, 2426 }, 2427 Shared: AllocatedSharedResources{ 2428 DiskMB: 10000, 2429 }, 2430 } 2431 2432 r2 := &ComparableResources{ 2433 Flattened: AllocatedTaskResources{ 2434 Cpu: AllocatedCpuResources{ 2435 CpuShares: 1000, 2436 }, 2437 Memory: AllocatedMemoryResources{ 2438 MemoryMB: 1024, 2439 }, 2440 Networks: []*NetworkResource{ 2441 { 2442 CIDR: "10.0.0.0/8", 2443 MBits: 20, 2444 ReservedPorts: []Port{{"ssh", 22, 0}}, 2445 }, 2446 }, 2447 }, 2448 Shared: AllocatedSharedResources{ 2449 DiskMB: 5000, 2450 }, 2451 } 2452 r1.Subtract(r2) 2453 2454 expect := &ComparableResources{ 2455 Flattened: AllocatedTaskResources{ 2456 Cpu: AllocatedCpuResources{ 2457 CpuShares: 1000, 2458 }, 2459 Memory: AllocatedMemoryResources{ 2460 MemoryMB: 1024, 2461 }, 2462 Networks: []*NetworkResource{ 2463 { 2464 CIDR: "10.0.0.0/8", 2465 MBits: 100, 2466 ReservedPorts: []Port{{"ssh", 22, 0}}, 2467 }, 2468 }, 2469 }, 2470 Shared: AllocatedSharedResources{ 2471 DiskMB: 5000, 2472 }, 2473 } 2474 2475 require := require.New(t) 2476 require.Equal(expect, r1) 2477 } 2478 2479 func TestEncodeDecode(t *testing.T) { 2480 type FooRequest struct { 2481 Foo string 2482 Bar int 2483 Baz bool 2484 } 2485 arg := &FooRequest{ 2486 Foo: "test", 2487 Bar: 42, 2488 Baz: true, 2489 } 2490 buf, err := Encode(1, arg) 2491 if err != nil { 2492 t.Fatalf("err: %v", err) 2493 } 2494 2495 var out FooRequest 2496 err = Decode(buf[1:], &out) 2497 if err != nil { 2498 t.Fatalf("err: %v", err) 2499 } 2500 2501 if !reflect.DeepEqual(arg, &out) { 2502 t.Fatalf("bad: %#v %#v", arg, out) 2503 } 2504 } 2505 2506 func BenchmarkEncodeDecode(b *testing.B) { 2507 job := testJob() 2508 2509 for i := 0; i < b.N; i++ { 2510 buf, err := Encode(1, job) 2511 if err != nil { 2512 b.Fatalf("err: %v", err) 2513 } 2514 2515 var out Job 2516 err = Decode(buf[1:], &out) 2517 if err != nil { 2518 b.Fatalf("err: %v", err) 2519 } 2520 } 2521 } 2522 2523 func TestInvalidServiceCheck(t *testing.T) { 2524 s := Service{ 2525 Name: "service-name", 2526 PortLabel: "bar", 2527 Checks: []*ServiceCheck{ 2528 { 2529 Name: "check-name", 2530 Type: "lol", 2531 }, 2532 }, 2533 } 2534 if err := s.Validate(); err == nil { 2535 t.Fatalf("Service should be invalid (invalid type)") 2536 } 2537 2538 s = Service{ 2539 Name: "service.name", 2540 PortLabel: "bar", 2541 } 2542 if err := s.ValidateName(s.Name); err == nil { 2543 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2544 } 2545 2546 s = Service{ 2547 Name: "-my-service", 2548 PortLabel: "bar", 2549 } 2550 if err := s.Validate(); err == nil { 2551 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2552 } 2553 2554 s = Service{ 2555 Name: "my-service-${NOMAD_META_FOO}", 2556 PortLabel: "bar", 2557 } 2558 if err := s.Validate(); err != nil { 2559 t.Fatalf("Service should be valid: %v", err) 2560 } 2561 2562 s = Service{ 2563 Name: "my_service-${NOMAD_META_FOO}", 2564 PortLabel: "bar", 2565 } 2566 if err := s.Validate(); err == nil { 2567 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2568 } 2569 2570 s = Service{ 2571 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2572 PortLabel: "bar", 2573 } 2574 if err := s.ValidateName(s.Name); err == nil { 2575 t.Fatalf("Service should be invalid (too long): %v", err) 2576 } 2577 2578 s = Service{ 2579 Name: "service-name", 2580 Checks: []*ServiceCheck{ 2581 { 2582 Name: "check-tcp", 2583 Type: ServiceCheckTCP, 2584 Interval: 5 * time.Second, 2585 Timeout: 2 * time.Second, 2586 }, 2587 { 2588 Name: "check-http", 2589 Type: ServiceCheckHTTP, 2590 Path: "/foo", 2591 Interval: 5 * time.Second, 2592 Timeout: 2 * time.Second, 2593 }, 2594 }, 2595 } 2596 if err := s.Validate(); err == nil { 2597 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2598 } 2599 2600 s = Service{ 2601 Name: "service-name", 2602 Checks: []*ServiceCheck{ 2603 { 2604 Name: "check-script", 2605 Type: ServiceCheckScript, 2606 Command: "/bin/date", 2607 Interval: 5 * time.Second, 2608 Timeout: 2 * time.Second, 2609 }, 2610 }, 2611 } 2612 if err := s.Validate(); err != nil { 2613 t.Fatalf("un-expected error: %v", err) 2614 } 2615 2616 s = Service{ 2617 Name: "service-name", 2618 Checks: []*ServiceCheck{ 2619 { 2620 Name: "tcp-check", 2621 Type: ServiceCheckTCP, 2622 Interval: 5 * time.Second, 2623 Timeout: 2 * time.Second, 2624 }, 2625 }, 2626 Connect: &ConsulConnect{ 2627 SidecarService: &ConsulSidecarService{}, 2628 }, 2629 } 2630 require.Error(t, s.Validate()) 2631 } 2632 2633 func TestDistinctCheckID(t *testing.T) { 2634 c1 := ServiceCheck{ 2635 Name: "web-health", 2636 Type: "http", 2637 Path: "/health", 2638 Interval: 2 * time.Second, 2639 Timeout: 3 * time.Second, 2640 } 2641 c2 := ServiceCheck{ 2642 Name: "web-health", 2643 Type: "http", 2644 Path: "/health1", 2645 Interval: 2 * time.Second, 2646 Timeout: 3 * time.Second, 2647 } 2648 2649 c3 := ServiceCheck{ 2650 Name: "web-health", 2651 Type: "http", 2652 Path: "/health", 2653 Interval: 4 * time.Second, 2654 Timeout: 3 * time.Second, 2655 } 2656 serviceID := "123" 2657 c1Hash := c1.Hash(serviceID) 2658 c2Hash := c2.Hash(serviceID) 2659 c3Hash := c3.Hash(serviceID) 2660 2661 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2662 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2663 } 2664 2665 } 2666 2667 func TestService_Canonicalize(t *testing.T) { 2668 job := "example" 2669 taskGroup := "cache" 2670 task := "redis" 2671 2672 s := Service{ 2673 Name: "${TASK}-db", 2674 } 2675 2676 s.Canonicalize(job, taskGroup, task) 2677 if s.Name != "redis-db" { 2678 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2679 } 2680 2681 s.Name = "db" 2682 s.Canonicalize(job, taskGroup, task) 2683 if s.Name != "db" { 2684 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2685 } 2686 2687 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2688 s.Canonicalize(job, taskGroup, task) 2689 if s.Name != "example-cache-redis-db" { 2690 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2691 } 2692 2693 s.Name = "${BASE}-db" 2694 s.Canonicalize(job, taskGroup, task) 2695 if s.Name != "example-cache-redis-db" { 2696 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2697 } 2698 2699 } 2700 2701 func TestService_Validate(t *testing.T) { 2702 s := Service{ 2703 Name: "testservice", 2704 } 2705 2706 s.Canonicalize("testjob", "testgroup", "testtask") 2707 2708 // Base service should be valid 2709 require.NoError(t, s.Validate()) 2710 2711 // Native Connect should be valid 2712 s.Connect = &ConsulConnect{ 2713 Native: true, 2714 } 2715 require.NoError(t, s.Validate()) 2716 2717 // Native Connect + Sidecar should be invalid 2718 s.Connect.SidecarService = &ConsulSidecarService{} 2719 require.Error(t, s.Validate()) 2720 } 2721 2722 func TestService_Equals(t *testing.T) { 2723 s := Service{ 2724 Name: "testservice", 2725 } 2726 2727 s.Canonicalize("testjob", "testgroup", "testtask") 2728 2729 o := s.Copy() 2730 2731 // Base service should be equal to copy of itself 2732 require.True(t, s.Equals(o)) 2733 2734 // create a helper to assert a diff and reset the struct 2735 assertDiff := func() { 2736 require.False(t, s.Equals(o)) 2737 o = s.Copy() 2738 require.True(t, s.Equals(o), "bug in copy") 2739 } 2740 2741 // Changing any field should cause inequality 2742 o.Name = "diff" 2743 assertDiff() 2744 2745 o.PortLabel = "diff" 2746 assertDiff() 2747 2748 o.AddressMode = AddressModeDriver 2749 assertDiff() 2750 2751 o.Tags = []string{"diff"} 2752 assertDiff() 2753 2754 o.CanaryTags = []string{"diff"} 2755 assertDiff() 2756 2757 o.Checks = []*ServiceCheck{{Name: "diff"}} 2758 assertDiff() 2759 2760 o.Connect = &ConsulConnect{Native: true} 2761 assertDiff() 2762 2763 o.EnableTagOverride = true 2764 assertDiff() 2765 } 2766 2767 func TestJob_ExpandServiceNames(t *testing.T) { 2768 j := &Job{ 2769 Name: "my-job", 2770 TaskGroups: []*TaskGroup{ 2771 { 2772 Name: "web", 2773 Tasks: []*Task{ 2774 { 2775 Name: "frontend", 2776 Services: []*Service{ 2777 { 2778 Name: "${BASE}-default", 2779 }, 2780 { 2781 Name: "jmx", 2782 }, 2783 }, 2784 }, 2785 }, 2786 }, 2787 { 2788 Name: "admin", 2789 Tasks: []*Task{ 2790 { 2791 Name: "admin-web", 2792 }, 2793 }, 2794 }, 2795 }, 2796 } 2797 2798 j.Canonicalize() 2799 2800 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 2801 if service1Name != "my-job-web-frontend-default" { 2802 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 2803 } 2804 2805 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 2806 if service2Name != "jmx" { 2807 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 2808 } 2809 2810 } 2811 2812 func TestJob_CombinedTaskMeta(t *testing.T) { 2813 j := &Job{ 2814 Meta: map[string]string{ 2815 "job_test": "job", 2816 "group_test": "job", 2817 "task_test": "job", 2818 }, 2819 TaskGroups: []*TaskGroup{ 2820 { 2821 Name: "group", 2822 Meta: map[string]string{ 2823 "group_test": "group", 2824 "task_test": "group", 2825 }, 2826 Tasks: []*Task{ 2827 { 2828 Name: "task", 2829 Meta: map[string]string{ 2830 "task_test": "task", 2831 }, 2832 }, 2833 }, 2834 }, 2835 }, 2836 } 2837 2838 require := require.New(t) 2839 require.EqualValues(map[string]string{ 2840 "job_test": "job", 2841 "group_test": "group", 2842 "task_test": "task", 2843 }, j.CombinedTaskMeta("group", "task")) 2844 require.EqualValues(map[string]string{ 2845 "job_test": "job", 2846 "group_test": "group", 2847 "task_test": "group", 2848 }, j.CombinedTaskMeta("group", "")) 2849 require.EqualValues(map[string]string{ 2850 "job_test": "job", 2851 "group_test": "job", 2852 "task_test": "job", 2853 }, j.CombinedTaskMeta("", "task")) 2854 2855 } 2856 2857 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 2858 // Create a config that is enabled but with no interval specified. 2859 p := &PeriodicConfig{Enabled: true} 2860 if err := p.Validate(); err == nil { 2861 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2862 } 2863 2864 // Create a config that is enabled, with a spec but no type specified. 2865 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2866 if err := p.Validate(); err == nil { 2867 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2868 } 2869 2870 // Create a config that is enabled, with a spec type but no spec specified. 2871 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2872 if err := p.Validate(); err == nil { 2873 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2874 } 2875 2876 // Create a config that is enabled, with a bad time zone. 2877 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2878 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2879 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2880 } 2881 } 2882 2883 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2884 specs := []string{"foo", "* *", "@foo"} 2885 for _, spec := range specs { 2886 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2887 p.Canonicalize() 2888 if err := p.Validate(); err == nil { 2889 t.Fatal("Invalid cron spec") 2890 } 2891 } 2892 } 2893 2894 func TestPeriodicConfig_ValidCron(t *testing.T) { 2895 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2896 for _, spec := range specs { 2897 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2898 p.Canonicalize() 2899 if err := p.Validate(); err != nil { 2900 t.Fatal("Passed valid cron") 2901 } 2902 } 2903 } 2904 2905 func TestPeriodicConfig_NextCron(t *testing.T) { 2906 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2907 2908 cases := []struct { 2909 spec string 2910 nextTime time.Time 2911 errorMsg string 2912 }{ 2913 { 2914 spec: "0 0 29 2 * 1980", 2915 nextTime: time.Time{}, 2916 }, 2917 { 2918 spec: "*/5 * * * *", 2919 nextTime: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2920 }, 2921 { 2922 spec: "1 15-0 *", 2923 nextTime: time.Time{}, 2924 errorMsg: "failed parsing cron expression", 2925 }, 2926 } 2927 2928 for i, c := range cases { 2929 t.Run(fmt.Sprintf("case: %d: %s", i, c.spec), func(t *testing.T) { 2930 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: c.spec} 2931 p.Canonicalize() 2932 n, err := p.Next(from) 2933 2934 require.Equal(t, c.nextTime, n) 2935 if c.errorMsg == "" { 2936 require.NoError(t, err) 2937 } else { 2938 require.Error(t, err) 2939 require.Contains(t, err.Error(), c.errorMsg) 2940 } 2941 }) 2942 } 2943 } 2944 2945 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2946 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2947 for _, zone := range zones { 2948 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2949 p.Canonicalize() 2950 if err := p.Validate(); err != nil { 2951 t.Fatalf("Valid tz errored: %v", err) 2952 } 2953 } 2954 } 2955 2956 func TestPeriodicConfig_DST(t *testing.T) { 2957 require := require.New(t) 2958 2959 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2960 p := &PeriodicConfig{ 2961 Enabled: true, 2962 SpecType: PeriodicSpecCron, 2963 Spec: "0 2 11-13 3 * 2017", 2964 TimeZone: "America/Los_Angeles", 2965 } 2966 p.Canonicalize() 2967 2968 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2969 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2970 2971 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2972 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2973 e2 := time.Date(2017, time.March, 13, 9, 0, 0, 0, time.UTC) 2974 2975 n1, err := p.Next(t1) 2976 require.Nil(err) 2977 2978 n2, err := p.Next(t2) 2979 require.Nil(err) 2980 2981 require.Equal(e1, n1.UTC()) 2982 require.Equal(e2, n2.UTC()) 2983 } 2984 2985 func TestTaskLifecycleConfig_Validate(t *testing.T) { 2986 testCases := []struct { 2987 name string 2988 tlc *TaskLifecycleConfig 2989 err error 2990 }{ 2991 { 2992 name: "prestart completed", 2993 tlc: &TaskLifecycleConfig{ 2994 Hook: "prestart", 2995 Sidecar: false, 2996 }, 2997 err: nil, 2998 }, 2999 { 3000 name: "prestart running", 3001 tlc: &TaskLifecycleConfig{ 3002 Hook: "prestart", 3003 Sidecar: true, 3004 }, 3005 err: nil, 3006 }, 3007 { 3008 name: "no hook", 3009 tlc: &TaskLifecycleConfig{ 3010 Sidecar: true, 3011 }, 3012 err: fmt.Errorf("no lifecycle hook provided"), 3013 }, 3014 } 3015 3016 for _, tc := range testCases { 3017 t.Run(tc.name, func(t *testing.T) { 3018 err := tc.tlc.Validate() 3019 if tc.err != nil { 3020 require.Error(t, err) 3021 require.Contains(t, err.Error(), tc.err.Error()) 3022 } else { 3023 require.Nil(t, err) 3024 } 3025 }) 3026 3027 } 3028 } 3029 3030 func TestRestartPolicy_Validate(t *testing.T) { 3031 // Policy with acceptable restart options passes 3032 p := &RestartPolicy{ 3033 Mode: RestartPolicyModeFail, 3034 Attempts: 0, 3035 Interval: 5 * time.Second, 3036 } 3037 if err := p.Validate(); err != nil { 3038 t.Fatalf("err: %v", err) 3039 } 3040 3041 // Policy with ambiguous restart options fails 3042 p = &RestartPolicy{ 3043 Mode: RestartPolicyModeDelay, 3044 Attempts: 0, 3045 Interval: 5 * time.Second, 3046 } 3047 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 3048 t.Fatalf("expect ambiguity error, got: %v", err) 3049 } 3050 3051 // Bad policy mode fails 3052 p = &RestartPolicy{ 3053 Mode: "nope", 3054 Attempts: 1, 3055 Interval: 5 * time.Second, 3056 } 3057 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 3058 t.Fatalf("expect mode error, got: %v", err) 3059 } 3060 3061 // Fails when attempts*delay does not fit inside interval 3062 p = &RestartPolicy{ 3063 Mode: RestartPolicyModeDelay, 3064 Attempts: 3, 3065 Delay: 5 * time.Second, 3066 Interval: 5 * time.Second, 3067 } 3068 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 3069 t.Fatalf("expect restart interval error, got: %v", err) 3070 } 3071 3072 // Fails when interval is to small 3073 p = &RestartPolicy{ 3074 Mode: RestartPolicyModeDelay, 3075 Attempts: 3, 3076 Delay: 5 * time.Second, 3077 Interval: 2 * time.Second, 3078 } 3079 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 3080 t.Fatalf("expect interval too small error, got: %v", err) 3081 } 3082 } 3083 3084 func TestReschedulePolicy_Validate(t *testing.T) { 3085 type testCase struct { 3086 desc string 3087 ReschedulePolicy *ReschedulePolicy 3088 errors []error 3089 } 3090 3091 testCases := []testCase{ 3092 { 3093 desc: "Nil", 3094 }, 3095 { 3096 desc: "Disabled", 3097 ReschedulePolicy: &ReschedulePolicy{ 3098 Attempts: 0, 3099 Interval: 0 * time.Second}, 3100 }, 3101 { 3102 desc: "Disabled", 3103 ReschedulePolicy: &ReschedulePolicy{ 3104 Attempts: -1, 3105 Interval: 5 * time.Minute}, 3106 }, 3107 { 3108 desc: "Valid Linear Delay", 3109 ReschedulePolicy: &ReschedulePolicy{ 3110 Attempts: 1, 3111 Interval: 5 * time.Minute, 3112 Delay: 10 * time.Second, 3113 DelayFunction: "constant"}, 3114 }, 3115 { 3116 desc: "Valid Exponential Delay", 3117 ReschedulePolicy: &ReschedulePolicy{ 3118 Attempts: 5, 3119 Interval: 1 * time.Hour, 3120 Delay: 30 * time.Second, 3121 MaxDelay: 5 * time.Minute, 3122 DelayFunction: "exponential"}, 3123 }, 3124 { 3125 desc: "Valid Fibonacci Delay", 3126 ReschedulePolicy: &ReschedulePolicy{ 3127 Attempts: 5, 3128 Interval: 15 * time.Minute, 3129 Delay: 10 * time.Second, 3130 MaxDelay: 5 * time.Minute, 3131 DelayFunction: "fibonacci"}, 3132 }, 3133 { 3134 desc: "Invalid delay function", 3135 ReschedulePolicy: &ReschedulePolicy{ 3136 Attempts: 1, 3137 Interval: 1 * time.Second, 3138 DelayFunction: "blah"}, 3139 errors: []error{ 3140 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3141 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3142 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 3143 }, 3144 }, 3145 { 3146 desc: "Invalid delay ceiling", 3147 ReschedulePolicy: &ReschedulePolicy{ 3148 Attempts: 1, 3149 Interval: 8 * time.Second, 3150 DelayFunction: "exponential", 3151 Delay: 15 * time.Second, 3152 MaxDelay: 5 * time.Second}, 3153 errors: []error{ 3154 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 3155 15*time.Second, 5*time.Second), 3156 }, 3157 }, 3158 { 3159 desc: "Invalid delay and interval", 3160 ReschedulePolicy: &ReschedulePolicy{ 3161 Attempts: 1, 3162 Interval: 1 * time.Second, 3163 DelayFunction: "constant"}, 3164 errors: []error{ 3165 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3166 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3167 }, 3168 }, { 3169 // Should suggest 2h40m as the interval 3170 desc: "Invalid Attempts - linear delay", 3171 ReschedulePolicy: &ReschedulePolicy{ 3172 Attempts: 10, 3173 Interval: 1 * time.Hour, 3174 Delay: 20 * time.Minute, 3175 DelayFunction: "constant", 3176 }, 3177 errors: []error{ 3178 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 3179 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 3180 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3181 200*time.Minute, 10), 3182 }, 3183 }, 3184 { 3185 // Should suggest 4h40m as the interval 3186 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 3187 desc: "Invalid Attempts - exponential delay", 3188 ReschedulePolicy: &ReschedulePolicy{ 3189 Attempts: 10, 3190 Interval: 30 * time.Minute, 3191 Delay: 5 * time.Minute, 3192 MaxDelay: 40 * time.Minute, 3193 DelayFunction: "exponential", 3194 }, 3195 errors: []error{ 3196 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3197 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 3198 "exponential", 40*time.Minute), 3199 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3200 280*time.Minute, 10), 3201 }, 3202 }, 3203 { 3204 // Should suggest 8h as the interval 3205 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 3206 desc: "Invalid Attempts - fibonacci delay", 3207 ReschedulePolicy: &ReschedulePolicy{ 3208 Attempts: 10, 3209 Interval: 1 * time.Hour, 3210 Delay: 20 * time.Minute, 3211 MaxDelay: 80 * time.Minute, 3212 DelayFunction: "fibonacci", 3213 }, 3214 errors: []error{ 3215 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3216 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 3217 "fibonacci", 80*time.Minute), 3218 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3219 480*time.Minute, 10), 3220 }, 3221 }, 3222 { 3223 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 3224 ReschedulePolicy: &ReschedulePolicy{ 3225 Attempts: 1, 3226 Unlimited: true, 3227 DelayFunction: "exponential", 3228 Delay: 5 * time.Minute, 3229 MaxDelay: 1 * time.Hour, 3230 }, 3231 errors: []error{ 3232 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 3233 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 3234 }, 3235 }, 3236 { 3237 desc: "Invalid Unlimited config", 3238 ReschedulePolicy: &ReschedulePolicy{ 3239 Attempts: 1, 3240 Interval: 1 * time.Second, 3241 Unlimited: true, 3242 DelayFunction: "exponential", 3243 }, 3244 errors: []error{ 3245 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3246 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3247 }, 3248 }, 3249 { 3250 desc: "Valid Unlimited config", 3251 ReschedulePolicy: &ReschedulePolicy{ 3252 Unlimited: true, 3253 DelayFunction: "exponential", 3254 Delay: 5 * time.Second, 3255 MaxDelay: 1 * time.Hour, 3256 }, 3257 }, 3258 } 3259 3260 for _, tc := range testCases { 3261 t.Run(tc.desc, func(t *testing.T) { 3262 require := require.New(t) 3263 gotErr := tc.ReschedulePolicy.Validate() 3264 if tc.errors != nil { 3265 // Validate all errors 3266 for _, err := range tc.errors { 3267 require.Contains(gotErr.Error(), err.Error()) 3268 } 3269 } else { 3270 require.Nil(gotErr) 3271 } 3272 }) 3273 } 3274 } 3275 3276 func TestAllocation_Index(t *testing.T) { 3277 a1 := Allocation{ 3278 Name: "example.cache[1]", 3279 TaskGroup: "cache", 3280 JobID: "example", 3281 Job: &Job{ 3282 ID: "example", 3283 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 3284 } 3285 e1 := uint(1) 3286 a2 := a1.Copy() 3287 a2.Name = "example.cache[713127]" 3288 e2 := uint(713127) 3289 3290 if a1.Index() != e1 || a2.Index() != e2 { 3291 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 3292 } 3293 } 3294 3295 func TestTaskArtifact_Validate_Source(t *testing.T) { 3296 valid := &TaskArtifact{GetterSource: "google.com"} 3297 if err := valid.Validate(); err != nil { 3298 t.Fatalf("unexpected error: %v", err) 3299 } 3300 } 3301 3302 func TestTaskArtifact_Validate_Dest(t *testing.T) { 3303 valid := &TaskArtifact{GetterSource: "google.com"} 3304 if err := valid.Validate(); err != nil { 3305 t.Fatalf("unexpected error: %v", err) 3306 } 3307 3308 valid.RelativeDest = "local/" 3309 if err := valid.Validate(); err != nil { 3310 t.Fatalf("unexpected error: %v", err) 3311 } 3312 3313 valid.RelativeDest = "local/.." 3314 if err := valid.Validate(); err != nil { 3315 t.Fatalf("unexpected error: %v", err) 3316 } 3317 3318 valid.RelativeDest = "local/../../.." 3319 if err := valid.Validate(); err == nil { 3320 t.Fatalf("expected error: %v", err) 3321 } 3322 } 3323 3324 // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the 3325 // fields change. 3326 func TestTaskArtifact_Hash(t *testing.T) { 3327 t.Parallel() 3328 3329 cases := []TaskArtifact{ 3330 {}, 3331 { 3332 GetterSource: "a", 3333 }, 3334 { 3335 GetterSource: "b", 3336 }, 3337 { 3338 GetterSource: "b", 3339 GetterOptions: map[string]string{"c": "c"}, 3340 }, 3341 { 3342 GetterSource: "b", 3343 GetterOptions: map[string]string{ 3344 "c": "c", 3345 "d": "d", 3346 }, 3347 }, 3348 { 3349 GetterSource: "b", 3350 GetterOptions: map[string]string{ 3351 "c": "c", 3352 "d": "e", 3353 }, 3354 }, 3355 { 3356 GetterSource: "b", 3357 GetterOptions: map[string]string{ 3358 "c": "c", 3359 "d": "e", 3360 }, 3361 GetterMode: "f", 3362 }, 3363 { 3364 GetterSource: "b", 3365 GetterOptions: map[string]string{ 3366 "c": "c", 3367 "d": "e", 3368 }, 3369 GetterMode: "g", 3370 }, 3371 { 3372 GetterSource: "b", 3373 GetterOptions: map[string]string{ 3374 "c": "c", 3375 "d": "e", 3376 }, 3377 GetterMode: "g", 3378 RelativeDest: "h", 3379 }, 3380 { 3381 GetterSource: "b", 3382 GetterOptions: map[string]string{ 3383 "c": "c", 3384 "d": "e", 3385 }, 3386 GetterMode: "g", 3387 RelativeDest: "i", 3388 }, 3389 } 3390 3391 // Map of hash to source 3392 hashes := make(map[string]TaskArtifact, len(cases)) 3393 for _, tc := range cases { 3394 h := tc.Hash() 3395 3396 // Hash should be deterministic 3397 require.Equal(t, h, tc.Hash()) 3398 3399 // Hash should be unique 3400 if orig, ok := hashes[h]; ok { 3401 require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n", 3402 pretty.Sprint(tc), pretty.Sprint(orig), 3403 ) 3404 } 3405 hashes[h] = tc 3406 } 3407 3408 require.Len(t, hashes, len(cases)) 3409 } 3410 3411 func TestAllocation_ShouldMigrate(t *testing.T) { 3412 alloc := Allocation{ 3413 PreviousAllocation: "123", 3414 TaskGroup: "foo", 3415 Job: &Job{ 3416 TaskGroups: []*TaskGroup{ 3417 { 3418 Name: "foo", 3419 EphemeralDisk: &EphemeralDisk{ 3420 Migrate: true, 3421 Sticky: true, 3422 }, 3423 }, 3424 }, 3425 }, 3426 } 3427 3428 if !alloc.ShouldMigrate() { 3429 t.Fatalf("bad: %v", alloc) 3430 } 3431 3432 alloc1 := Allocation{ 3433 PreviousAllocation: "123", 3434 TaskGroup: "foo", 3435 Job: &Job{ 3436 TaskGroups: []*TaskGroup{ 3437 { 3438 Name: "foo", 3439 EphemeralDisk: &EphemeralDisk{}, 3440 }, 3441 }, 3442 }, 3443 } 3444 3445 if alloc1.ShouldMigrate() { 3446 t.Fatalf("bad: %v", alloc) 3447 } 3448 3449 alloc2 := Allocation{ 3450 PreviousAllocation: "123", 3451 TaskGroup: "foo", 3452 Job: &Job{ 3453 TaskGroups: []*TaskGroup{ 3454 { 3455 Name: "foo", 3456 EphemeralDisk: &EphemeralDisk{ 3457 Sticky: false, 3458 Migrate: true, 3459 }, 3460 }, 3461 }, 3462 }, 3463 } 3464 3465 if alloc2.ShouldMigrate() { 3466 t.Fatalf("bad: %v", alloc) 3467 } 3468 3469 alloc3 := Allocation{ 3470 PreviousAllocation: "123", 3471 TaskGroup: "foo", 3472 Job: &Job{ 3473 TaskGroups: []*TaskGroup{ 3474 { 3475 Name: "foo", 3476 }, 3477 }, 3478 }, 3479 } 3480 3481 if alloc3.ShouldMigrate() { 3482 t.Fatalf("bad: %v", alloc) 3483 } 3484 3485 // No previous 3486 alloc4 := Allocation{ 3487 TaskGroup: "foo", 3488 Job: &Job{ 3489 TaskGroups: []*TaskGroup{ 3490 { 3491 Name: "foo", 3492 EphemeralDisk: &EphemeralDisk{ 3493 Migrate: true, 3494 Sticky: true, 3495 }, 3496 }, 3497 }, 3498 }, 3499 } 3500 3501 if alloc4.ShouldMigrate() { 3502 t.Fatalf("bad: %v", alloc4) 3503 } 3504 } 3505 3506 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 3507 cases := []struct { 3508 Input *TaskArtifact 3509 Err bool 3510 }{ 3511 { 3512 &TaskArtifact{ 3513 GetterSource: "foo.com", 3514 GetterOptions: map[string]string{ 3515 "checksum": "no-type", 3516 }, 3517 }, 3518 true, 3519 }, 3520 { 3521 &TaskArtifact{ 3522 GetterSource: "foo.com", 3523 GetterOptions: map[string]string{ 3524 "checksum": "md5:toosmall", 3525 }, 3526 }, 3527 true, 3528 }, 3529 { 3530 &TaskArtifact{ 3531 GetterSource: "foo.com", 3532 GetterOptions: map[string]string{ 3533 "checksum": "invalid:type", 3534 }, 3535 }, 3536 true, 3537 }, 3538 { 3539 &TaskArtifact{ 3540 GetterSource: "foo.com", 3541 GetterOptions: map[string]string{ 3542 "checksum": "md5:${ARTIFACT_CHECKSUM}", 3543 }, 3544 }, 3545 false, 3546 }, 3547 } 3548 3549 for i, tc := range cases { 3550 err := tc.Input.Validate() 3551 if (err != nil) != tc.Err { 3552 t.Fatalf("case %d: %v", i, err) 3553 continue 3554 } 3555 } 3556 } 3557 3558 func TestPlan_NormalizeAllocations(t *testing.T) { 3559 t.Parallel() 3560 plan := &Plan{ 3561 NodeUpdate: make(map[string][]*Allocation), 3562 NodePreemptions: make(map[string][]*Allocation), 3563 } 3564 stoppedAlloc := MockAlloc() 3565 desiredDesc := "Desired desc" 3566 plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost) 3567 preemptedAlloc := MockAlloc() 3568 preemptingAllocID := uuid.Generate() 3569 plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID) 3570 3571 plan.NormalizeAllocations() 3572 3573 actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0] 3574 expectedStoppedAlloc := &Allocation{ 3575 ID: stoppedAlloc.ID, 3576 DesiredDescription: desiredDesc, 3577 ClientStatus: AllocClientStatusLost, 3578 } 3579 assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc) 3580 actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0] 3581 expectedPreemptedAlloc := &Allocation{ 3582 ID: preemptedAlloc.ID, 3583 PreemptedByAllocation: preemptingAllocID, 3584 } 3585 assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc) 3586 } 3587 3588 func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3589 t.Parallel() 3590 plan := &Plan{ 3591 NodeUpdate: make(map[string][]*Allocation), 3592 } 3593 alloc := MockAlloc() 3594 desiredDesc := "Desired desc" 3595 3596 plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost) 3597 3598 expectedAlloc := new(Allocation) 3599 *expectedAlloc = *alloc 3600 expectedAlloc.DesiredDescription = desiredDesc 3601 expectedAlloc.DesiredStatus = AllocDesiredStatusStop 3602 expectedAlloc.ClientStatus = AllocClientStatusLost 3603 expectedAlloc.Job = nil 3604 expectedAlloc.AllocStates = []*AllocState{{ 3605 Field: AllocStateFieldClientStatus, 3606 Value: "lost", 3607 }} 3608 3609 // This value is set to time.Now() in AppendStoppedAlloc, so clear it 3610 appendedAlloc := plan.NodeUpdate[alloc.NodeID][0] 3611 appendedAlloc.AllocStates[0].Time = time.Time{} 3612 3613 assert.Equal(t, expectedAlloc, appendedAlloc) 3614 assert.Equal(t, alloc.Job, plan.Job) 3615 } 3616 3617 func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3618 t.Parallel() 3619 plan := &Plan{ 3620 NodePreemptions: make(map[string][]*Allocation), 3621 } 3622 alloc := MockAlloc() 3623 preemptingAllocID := uuid.Generate() 3624 3625 plan.AppendPreemptedAlloc(alloc, preemptingAllocID) 3626 3627 appendedAlloc := plan.NodePreemptions[alloc.NodeID][0] 3628 expectedAlloc := &Allocation{ 3629 ID: alloc.ID, 3630 PreemptedByAllocation: preemptingAllocID, 3631 JobID: alloc.JobID, 3632 Namespace: alloc.Namespace, 3633 DesiredStatus: AllocDesiredStatusEvict, 3634 DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID), 3635 AllocatedResources: alloc.AllocatedResources, 3636 TaskResources: alloc.TaskResources, 3637 SharedResources: alloc.SharedResources, 3638 } 3639 assert.Equal(t, expectedAlloc, appendedAlloc) 3640 } 3641 3642 func TestAllocation_MsgPackTags(t *testing.T) { 3643 t.Parallel() 3644 planType := reflect.TypeOf(Allocation{}) 3645 3646 msgPackTags, _ := planType.FieldByName("_struct") 3647 3648 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3649 } 3650 3651 func TestEvaluation_MsgPackTags(t *testing.T) { 3652 t.Parallel() 3653 planType := reflect.TypeOf(Evaluation{}) 3654 3655 msgPackTags, _ := planType.FieldByName("_struct") 3656 3657 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3658 } 3659 3660 func TestAllocation_Terminated(t *testing.T) { 3661 type desiredState struct { 3662 ClientStatus string 3663 DesiredStatus string 3664 Terminated bool 3665 } 3666 3667 harness := []desiredState{ 3668 { 3669 ClientStatus: AllocClientStatusPending, 3670 DesiredStatus: AllocDesiredStatusStop, 3671 Terminated: false, 3672 }, 3673 { 3674 ClientStatus: AllocClientStatusRunning, 3675 DesiredStatus: AllocDesiredStatusStop, 3676 Terminated: false, 3677 }, 3678 { 3679 ClientStatus: AllocClientStatusFailed, 3680 DesiredStatus: AllocDesiredStatusStop, 3681 Terminated: true, 3682 }, 3683 { 3684 ClientStatus: AllocClientStatusFailed, 3685 DesiredStatus: AllocDesiredStatusRun, 3686 Terminated: true, 3687 }, 3688 } 3689 3690 for _, state := range harness { 3691 alloc := Allocation{} 3692 alloc.DesiredStatus = state.DesiredStatus 3693 alloc.ClientStatus = state.ClientStatus 3694 if alloc.Terminated() != state.Terminated { 3695 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 3696 } 3697 } 3698 } 3699 3700 func TestAllocation_ShouldReschedule(t *testing.T) { 3701 type testCase struct { 3702 Desc string 3703 FailTime time.Time 3704 ClientStatus string 3705 DesiredStatus string 3706 ReschedulePolicy *ReschedulePolicy 3707 RescheduleTrackers []*RescheduleEvent 3708 ShouldReschedule bool 3709 } 3710 3711 fail := time.Now() 3712 3713 harness := []testCase{ 3714 { 3715 Desc: "Reschedule when desired state is stop", 3716 ClientStatus: AllocClientStatusPending, 3717 DesiredStatus: AllocDesiredStatusStop, 3718 FailTime: fail, 3719 ReschedulePolicy: nil, 3720 ShouldReschedule: false, 3721 }, 3722 { 3723 Desc: "Disabled rescheduling", 3724 ClientStatus: AllocClientStatusFailed, 3725 DesiredStatus: AllocDesiredStatusRun, 3726 FailTime: fail, 3727 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 3728 ShouldReschedule: false, 3729 }, 3730 { 3731 Desc: "Reschedule when client status is complete", 3732 ClientStatus: AllocClientStatusComplete, 3733 DesiredStatus: AllocDesiredStatusRun, 3734 FailTime: fail, 3735 ReschedulePolicy: nil, 3736 ShouldReschedule: false, 3737 }, 3738 { 3739 Desc: "Reschedule with nil reschedule policy", 3740 ClientStatus: AllocClientStatusFailed, 3741 DesiredStatus: AllocDesiredStatusRun, 3742 FailTime: fail, 3743 ReschedulePolicy: nil, 3744 ShouldReschedule: false, 3745 }, 3746 { 3747 Desc: "Reschedule with unlimited and attempts >0", 3748 ClientStatus: AllocClientStatusFailed, 3749 DesiredStatus: AllocDesiredStatusRun, 3750 FailTime: fail, 3751 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 3752 ShouldReschedule: true, 3753 }, 3754 { 3755 Desc: "Reschedule when client status is complete", 3756 ClientStatus: AllocClientStatusComplete, 3757 DesiredStatus: AllocDesiredStatusRun, 3758 FailTime: fail, 3759 ReschedulePolicy: nil, 3760 ShouldReschedule: false, 3761 }, 3762 { 3763 Desc: "Reschedule with policy when client status complete", 3764 ClientStatus: AllocClientStatusComplete, 3765 DesiredStatus: AllocDesiredStatusRun, 3766 FailTime: fail, 3767 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3768 ShouldReschedule: false, 3769 }, 3770 { 3771 Desc: "Reschedule with no previous attempts", 3772 ClientStatus: AllocClientStatusFailed, 3773 DesiredStatus: AllocDesiredStatusRun, 3774 FailTime: fail, 3775 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3776 ShouldReschedule: true, 3777 }, 3778 { 3779 Desc: "Reschedule with leftover attempts", 3780 ClientStatus: AllocClientStatusFailed, 3781 DesiredStatus: AllocDesiredStatusRun, 3782 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3783 FailTime: fail, 3784 RescheduleTrackers: []*RescheduleEvent{ 3785 { 3786 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 3787 }, 3788 }, 3789 ShouldReschedule: true, 3790 }, 3791 { 3792 Desc: "Reschedule with too old previous attempts", 3793 ClientStatus: AllocClientStatusFailed, 3794 DesiredStatus: AllocDesiredStatusRun, 3795 FailTime: fail, 3796 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 3797 RescheduleTrackers: []*RescheduleEvent{ 3798 { 3799 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 3800 }, 3801 }, 3802 ShouldReschedule: true, 3803 }, 3804 { 3805 Desc: "Reschedule with no leftover attempts", 3806 ClientStatus: AllocClientStatusFailed, 3807 DesiredStatus: AllocDesiredStatusRun, 3808 FailTime: fail, 3809 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3810 RescheduleTrackers: []*RescheduleEvent{ 3811 { 3812 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 3813 }, 3814 { 3815 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 3816 }, 3817 }, 3818 ShouldReschedule: false, 3819 }, 3820 } 3821 3822 for _, state := range harness { 3823 alloc := Allocation{} 3824 alloc.DesiredStatus = state.DesiredStatus 3825 alloc.ClientStatus = state.ClientStatus 3826 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 3827 3828 t.Run(state.Desc, func(t *testing.T) { 3829 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 3830 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 3831 } 3832 }) 3833 3834 } 3835 } 3836 3837 func TestAllocation_LastEventTime(t *testing.T) { 3838 type testCase struct { 3839 desc string 3840 taskState map[string]*TaskState 3841 expectedLastEventTime time.Time 3842 } 3843 3844 t1 := time.Now().UTC() 3845 3846 testCases := []testCase{ 3847 { 3848 desc: "nil task state", 3849 expectedLastEventTime: t1, 3850 }, 3851 { 3852 desc: "empty task state", 3853 taskState: make(map[string]*TaskState), 3854 expectedLastEventTime: t1, 3855 }, 3856 { 3857 desc: "Finished At not set", 3858 taskState: map[string]*TaskState{"foo": {State: "start", 3859 StartedAt: t1.Add(-2 * time.Hour)}}, 3860 expectedLastEventTime: t1, 3861 }, 3862 { 3863 desc: "One finished ", 3864 taskState: map[string]*TaskState{"foo": {State: "start", 3865 StartedAt: t1.Add(-2 * time.Hour), 3866 FinishedAt: t1.Add(-1 * time.Hour)}}, 3867 expectedLastEventTime: t1.Add(-1 * time.Hour), 3868 }, 3869 { 3870 desc: "Multiple task groups", 3871 taskState: map[string]*TaskState{"foo": {State: "start", 3872 StartedAt: t1.Add(-2 * time.Hour), 3873 FinishedAt: t1.Add(-1 * time.Hour)}, 3874 "bar": {State: "start", 3875 StartedAt: t1.Add(-2 * time.Hour), 3876 FinishedAt: t1.Add(-40 * time.Minute)}}, 3877 expectedLastEventTime: t1.Add(-40 * time.Minute), 3878 }, 3879 { 3880 desc: "No finishedAt set, one task event, should use modify time", 3881 taskState: map[string]*TaskState{"foo": { 3882 State: "run", 3883 StartedAt: t1.Add(-2 * time.Hour), 3884 Events: []*TaskEvent{ 3885 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 3886 }}, 3887 }, 3888 expectedLastEventTime: t1, 3889 }, 3890 } 3891 for _, tc := range testCases { 3892 t.Run(tc.desc, func(t *testing.T) { 3893 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 3894 alloc.TaskStates = tc.taskState 3895 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 3896 }) 3897 } 3898 } 3899 3900 func TestAllocation_NextDelay(t *testing.T) { 3901 type testCase struct { 3902 desc string 3903 reschedulePolicy *ReschedulePolicy 3904 alloc *Allocation 3905 expectedRescheduleTime time.Time 3906 expectedRescheduleEligible bool 3907 } 3908 now := time.Now() 3909 testCases := []testCase{ 3910 { 3911 desc: "Allocation hasn't failed yet", 3912 reschedulePolicy: &ReschedulePolicy{ 3913 DelayFunction: "constant", 3914 Delay: 5 * time.Second, 3915 }, 3916 alloc: &Allocation{}, 3917 expectedRescheduleTime: time.Time{}, 3918 expectedRescheduleEligible: false, 3919 }, 3920 { 3921 desc: "Allocation has no reschedule policy", 3922 alloc: &Allocation{}, 3923 expectedRescheduleTime: time.Time{}, 3924 expectedRescheduleEligible: false, 3925 }, 3926 { 3927 desc: "Allocation lacks task state", 3928 reschedulePolicy: &ReschedulePolicy{ 3929 DelayFunction: "constant", 3930 Delay: 5 * time.Second, 3931 Unlimited: true, 3932 }, 3933 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 3934 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 3935 expectedRescheduleEligible: true, 3936 }, 3937 { 3938 desc: "linear delay, unlimited restarts, no reschedule tracker", 3939 reschedulePolicy: &ReschedulePolicy{ 3940 DelayFunction: "constant", 3941 Delay: 5 * time.Second, 3942 Unlimited: true, 3943 }, 3944 alloc: &Allocation{ 3945 ClientStatus: AllocClientStatusFailed, 3946 TaskStates: map[string]*TaskState{"foo": {State: "dead", 3947 StartedAt: now.Add(-1 * time.Hour), 3948 FinishedAt: now.Add(-2 * time.Second)}}, 3949 }, 3950 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3951 expectedRescheduleEligible: true, 3952 }, 3953 { 3954 desc: "linear delay with reschedule tracker", 3955 reschedulePolicy: &ReschedulePolicy{ 3956 DelayFunction: "constant", 3957 Delay: 5 * time.Second, 3958 Interval: 10 * time.Minute, 3959 Attempts: 2, 3960 }, 3961 alloc: &Allocation{ 3962 ClientStatus: AllocClientStatusFailed, 3963 TaskStates: map[string]*TaskState{"foo": {State: "start", 3964 StartedAt: now.Add(-1 * time.Hour), 3965 FinishedAt: now.Add(-2 * time.Second)}}, 3966 RescheduleTracker: &RescheduleTracker{ 3967 Events: []*RescheduleEvent{{ 3968 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3969 Delay: 5 * time.Second, 3970 }}, 3971 }}, 3972 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3973 expectedRescheduleEligible: true, 3974 }, 3975 { 3976 desc: "linear delay with reschedule tracker, attempts exhausted", 3977 reschedulePolicy: &ReschedulePolicy{ 3978 DelayFunction: "constant", 3979 Delay: 5 * time.Second, 3980 Interval: 10 * time.Minute, 3981 Attempts: 2, 3982 }, 3983 alloc: &Allocation{ 3984 ClientStatus: AllocClientStatusFailed, 3985 TaskStates: map[string]*TaskState{"foo": {State: "start", 3986 StartedAt: now.Add(-1 * time.Hour), 3987 FinishedAt: now.Add(-2 * time.Second)}}, 3988 RescheduleTracker: &RescheduleTracker{ 3989 Events: []*RescheduleEvent{ 3990 { 3991 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 3992 Delay: 5 * time.Second, 3993 }, 3994 { 3995 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3996 Delay: 5 * time.Second, 3997 }, 3998 }, 3999 }}, 4000 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4001 expectedRescheduleEligible: false, 4002 }, 4003 { 4004 desc: "exponential delay - no reschedule tracker", 4005 reschedulePolicy: &ReschedulePolicy{ 4006 DelayFunction: "exponential", 4007 Delay: 5 * time.Second, 4008 MaxDelay: 90 * time.Second, 4009 Unlimited: true, 4010 }, 4011 alloc: &Allocation{ 4012 ClientStatus: AllocClientStatusFailed, 4013 TaskStates: map[string]*TaskState{"foo": {State: "start", 4014 StartedAt: now.Add(-1 * time.Hour), 4015 FinishedAt: now.Add(-2 * time.Second)}}, 4016 }, 4017 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4018 expectedRescheduleEligible: true, 4019 }, 4020 { 4021 desc: "exponential delay with reschedule tracker", 4022 reschedulePolicy: &ReschedulePolicy{ 4023 DelayFunction: "exponential", 4024 Delay: 5 * time.Second, 4025 MaxDelay: 90 * time.Second, 4026 Unlimited: true, 4027 }, 4028 alloc: &Allocation{ 4029 ClientStatus: AllocClientStatusFailed, 4030 TaskStates: map[string]*TaskState{"foo": {State: "start", 4031 StartedAt: now.Add(-1 * time.Hour), 4032 FinishedAt: now.Add(-2 * time.Second)}}, 4033 RescheduleTracker: &RescheduleTracker{ 4034 Events: []*RescheduleEvent{ 4035 { 4036 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4037 Delay: 5 * time.Second, 4038 }, 4039 { 4040 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4041 Delay: 10 * time.Second, 4042 }, 4043 { 4044 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4045 Delay: 20 * time.Second, 4046 }, 4047 }, 4048 }}, 4049 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4050 expectedRescheduleEligible: true, 4051 }, 4052 { 4053 desc: "exponential delay with delay ceiling reached", 4054 reschedulePolicy: &ReschedulePolicy{ 4055 DelayFunction: "exponential", 4056 Delay: 5 * time.Second, 4057 MaxDelay: 90 * time.Second, 4058 Unlimited: true, 4059 }, 4060 alloc: &Allocation{ 4061 ClientStatus: AllocClientStatusFailed, 4062 TaskStates: map[string]*TaskState{"foo": {State: "start", 4063 StartedAt: now.Add(-1 * time.Hour), 4064 FinishedAt: now.Add(-15 * time.Second)}}, 4065 RescheduleTracker: &RescheduleTracker{ 4066 Events: []*RescheduleEvent{ 4067 { 4068 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4069 Delay: 5 * time.Second, 4070 }, 4071 { 4072 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4073 Delay: 10 * time.Second, 4074 }, 4075 { 4076 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4077 Delay: 20 * time.Second, 4078 }, 4079 { 4080 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4081 Delay: 40 * time.Second, 4082 }, 4083 { 4084 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4085 Delay: 80 * time.Second, 4086 }, 4087 }, 4088 }}, 4089 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 4090 expectedRescheduleEligible: true, 4091 }, 4092 { 4093 // Test case where most recent reschedule ran longer than delay ceiling 4094 desc: "exponential delay, delay ceiling reset condition met", 4095 reschedulePolicy: &ReschedulePolicy{ 4096 DelayFunction: "exponential", 4097 Delay: 5 * time.Second, 4098 MaxDelay: 90 * time.Second, 4099 Unlimited: true, 4100 }, 4101 alloc: &Allocation{ 4102 ClientStatus: AllocClientStatusFailed, 4103 TaskStates: map[string]*TaskState{"foo": {State: "start", 4104 StartedAt: now.Add(-1 * time.Hour), 4105 FinishedAt: now.Add(-15 * time.Minute)}}, 4106 RescheduleTracker: &RescheduleTracker{ 4107 Events: []*RescheduleEvent{ 4108 { 4109 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4110 Delay: 5 * time.Second, 4111 }, 4112 { 4113 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4114 Delay: 10 * time.Second, 4115 }, 4116 { 4117 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4118 Delay: 20 * time.Second, 4119 }, 4120 { 4121 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4122 Delay: 40 * time.Second, 4123 }, 4124 { 4125 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4126 Delay: 80 * time.Second, 4127 }, 4128 { 4129 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4130 Delay: 90 * time.Second, 4131 }, 4132 { 4133 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4134 Delay: 90 * time.Second, 4135 }, 4136 }, 4137 }}, 4138 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 4139 expectedRescheduleEligible: true, 4140 }, 4141 { 4142 desc: "fibonacci delay - no reschedule tracker", 4143 reschedulePolicy: &ReschedulePolicy{ 4144 DelayFunction: "fibonacci", 4145 Delay: 5 * time.Second, 4146 MaxDelay: 90 * time.Second, 4147 Unlimited: true, 4148 }, 4149 alloc: &Allocation{ 4150 ClientStatus: AllocClientStatusFailed, 4151 TaskStates: map[string]*TaskState{"foo": {State: "start", 4152 StartedAt: now.Add(-1 * time.Hour), 4153 FinishedAt: now.Add(-2 * time.Second)}}}, 4154 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4155 expectedRescheduleEligible: true, 4156 }, 4157 { 4158 desc: "fibonacci delay with reschedule tracker", 4159 reschedulePolicy: &ReschedulePolicy{ 4160 DelayFunction: "fibonacci", 4161 Delay: 5 * time.Second, 4162 MaxDelay: 90 * time.Second, 4163 Unlimited: true, 4164 }, 4165 alloc: &Allocation{ 4166 ClientStatus: AllocClientStatusFailed, 4167 TaskStates: map[string]*TaskState{"foo": {State: "start", 4168 StartedAt: now.Add(-1 * time.Hour), 4169 FinishedAt: now.Add(-2 * time.Second)}}, 4170 RescheduleTracker: &RescheduleTracker{ 4171 Events: []*RescheduleEvent{ 4172 { 4173 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4174 Delay: 5 * time.Second, 4175 }, 4176 { 4177 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 4178 Delay: 5 * time.Second, 4179 }, 4180 }, 4181 }}, 4182 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 4183 expectedRescheduleEligible: true, 4184 }, 4185 { 4186 desc: "fibonacci delay with more events", 4187 reschedulePolicy: &ReschedulePolicy{ 4188 DelayFunction: "fibonacci", 4189 Delay: 5 * time.Second, 4190 MaxDelay: 90 * time.Second, 4191 Unlimited: true, 4192 }, 4193 alloc: &Allocation{ 4194 ClientStatus: AllocClientStatusFailed, 4195 TaskStates: map[string]*TaskState{"foo": {State: "start", 4196 StartedAt: now.Add(-1 * time.Hour), 4197 FinishedAt: now.Add(-2 * time.Second)}}, 4198 RescheduleTracker: &RescheduleTracker{ 4199 Events: []*RescheduleEvent{ 4200 { 4201 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4202 Delay: 5 * time.Second, 4203 }, 4204 { 4205 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4206 Delay: 5 * time.Second, 4207 }, 4208 { 4209 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4210 Delay: 10 * time.Second, 4211 }, 4212 { 4213 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4214 Delay: 15 * time.Second, 4215 }, 4216 { 4217 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4218 Delay: 25 * time.Second, 4219 }, 4220 }, 4221 }}, 4222 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4223 expectedRescheduleEligible: true, 4224 }, 4225 { 4226 desc: "fibonacci delay with delay ceiling reached", 4227 reschedulePolicy: &ReschedulePolicy{ 4228 DelayFunction: "fibonacci", 4229 Delay: 5 * time.Second, 4230 MaxDelay: 50 * time.Second, 4231 Unlimited: true, 4232 }, 4233 alloc: &Allocation{ 4234 ClientStatus: AllocClientStatusFailed, 4235 TaskStates: map[string]*TaskState{"foo": {State: "start", 4236 StartedAt: now.Add(-1 * time.Hour), 4237 FinishedAt: now.Add(-15 * time.Second)}}, 4238 RescheduleTracker: &RescheduleTracker{ 4239 Events: []*RescheduleEvent{ 4240 { 4241 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4242 Delay: 5 * time.Second, 4243 }, 4244 { 4245 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4246 Delay: 5 * time.Second, 4247 }, 4248 { 4249 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4250 Delay: 10 * time.Second, 4251 }, 4252 { 4253 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4254 Delay: 15 * time.Second, 4255 }, 4256 { 4257 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4258 Delay: 25 * time.Second, 4259 }, 4260 { 4261 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4262 Delay: 40 * time.Second, 4263 }, 4264 }, 4265 }}, 4266 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 4267 expectedRescheduleEligible: true, 4268 }, 4269 { 4270 desc: "fibonacci delay with delay reset condition met", 4271 reschedulePolicy: &ReschedulePolicy{ 4272 DelayFunction: "fibonacci", 4273 Delay: 5 * time.Second, 4274 MaxDelay: 50 * time.Second, 4275 Unlimited: true, 4276 }, 4277 alloc: &Allocation{ 4278 ClientStatus: AllocClientStatusFailed, 4279 TaskStates: map[string]*TaskState{"foo": {State: "start", 4280 StartedAt: now.Add(-1 * time.Hour), 4281 FinishedAt: now.Add(-5 * time.Minute)}}, 4282 RescheduleTracker: &RescheduleTracker{ 4283 Events: []*RescheduleEvent{ 4284 { 4285 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4286 Delay: 5 * time.Second, 4287 }, 4288 { 4289 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4290 Delay: 5 * time.Second, 4291 }, 4292 { 4293 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4294 Delay: 10 * time.Second, 4295 }, 4296 { 4297 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4298 Delay: 15 * time.Second, 4299 }, 4300 { 4301 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4302 Delay: 25 * time.Second, 4303 }, 4304 { 4305 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4306 Delay: 40 * time.Second, 4307 }, 4308 }, 4309 }}, 4310 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 4311 expectedRescheduleEligible: true, 4312 }, 4313 { 4314 desc: "fibonacci delay with the most recent event that reset delay value", 4315 reschedulePolicy: &ReschedulePolicy{ 4316 DelayFunction: "fibonacci", 4317 Delay: 5 * time.Second, 4318 MaxDelay: 50 * time.Second, 4319 Unlimited: true, 4320 }, 4321 alloc: &Allocation{ 4322 ClientStatus: AllocClientStatusFailed, 4323 TaskStates: map[string]*TaskState{"foo": {State: "start", 4324 StartedAt: now.Add(-1 * time.Hour), 4325 FinishedAt: now.Add(-5 * time.Second)}}, 4326 RescheduleTracker: &RescheduleTracker{ 4327 Events: []*RescheduleEvent{ 4328 { 4329 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4330 Delay: 5 * time.Second, 4331 }, 4332 { 4333 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4334 Delay: 5 * time.Second, 4335 }, 4336 { 4337 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4338 Delay: 10 * time.Second, 4339 }, 4340 { 4341 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4342 Delay: 15 * time.Second, 4343 }, 4344 { 4345 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4346 Delay: 25 * time.Second, 4347 }, 4348 { 4349 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4350 Delay: 40 * time.Second, 4351 }, 4352 { 4353 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4354 Delay: 50 * time.Second, 4355 }, 4356 { 4357 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 4358 Delay: 5 * time.Second, 4359 }, 4360 }, 4361 }}, 4362 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 4363 expectedRescheduleEligible: true, 4364 }, 4365 } 4366 for _, tc := range testCases { 4367 t.Run(tc.desc, func(t *testing.T) { 4368 require := require.New(t) 4369 j := testJob() 4370 if tc.reschedulePolicy != nil { 4371 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 4372 } 4373 tc.alloc.Job = j 4374 tc.alloc.TaskGroup = j.TaskGroups[0].Name 4375 reschedTime, allowed := tc.alloc.NextRescheduleTime() 4376 require.Equal(tc.expectedRescheduleEligible, allowed) 4377 require.Equal(tc.expectedRescheduleTime, reschedTime) 4378 }) 4379 } 4380 4381 } 4382 4383 func TestAllocation_WaitClientStop(t *testing.T) { 4384 type testCase struct { 4385 desc string 4386 stop time.Duration 4387 status string 4388 expectedShould bool 4389 expectedRescheduleTime time.Time 4390 } 4391 now := time.Now().UTC() 4392 testCases := []testCase{ 4393 { 4394 desc: "running", 4395 stop: 2 * time.Second, 4396 status: AllocClientStatusRunning, 4397 expectedShould: true, 4398 }, 4399 { 4400 desc: "no stop_after_client_disconnect", 4401 status: AllocClientStatusLost, 4402 expectedShould: false, 4403 }, 4404 { 4405 desc: "stop", 4406 status: AllocClientStatusLost, 4407 stop: 2 * time.Second, 4408 expectedShould: true, 4409 expectedRescheduleTime: now.Add((2 + 5) * time.Second), 4410 }, 4411 } 4412 for _, tc := range testCases { 4413 t.Run(tc.desc, func(t *testing.T) { 4414 j := testJob() 4415 a := &Allocation{ 4416 ClientStatus: tc.status, 4417 Job: j, 4418 TaskStates: map[string]*TaskState{}, 4419 } 4420 4421 if tc.status == AllocClientStatusLost { 4422 a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost) 4423 } 4424 4425 j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop 4426 a.TaskGroup = j.TaskGroups[0].Name 4427 4428 require.Equal(t, tc.expectedShould, a.ShouldClientStop()) 4429 4430 if !tc.expectedShould || tc.status != AllocClientStatusLost { 4431 return 4432 } 4433 4434 // the reschedTime is close to the expectedRescheduleTime 4435 reschedTime := a.WaitClientStop() 4436 e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix() 4437 require.Less(t, e, int64(2)) 4438 }) 4439 } 4440 } 4441 4442 func TestAllocation_Canonicalize_Old(t *testing.T) { 4443 alloc := MockAlloc() 4444 alloc.AllocatedResources = nil 4445 alloc.TaskResources = map[string]*Resources{ 4446 "web": { 4447 CPU: 500, 4448 MemoryMB: 256, 4449 Networks: []*NetworkResource{ 4450 { 4451 Device: "eth0", 4452 IP: "192.168.0.100", 4453 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4454 MBits: 50, 4455 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4456 }, 4457 }, 4458 }, 4459 } 4460 alloc.SharedResources = &Resources{ 4461 DiskMB: 150, 4462 } 4463 alloc.Canonicalize() 4464 4465 expected := &AllocatedResources{ 4466 Tasks: map[string]*AllocatedTaskResources{ 4467 "web": { 4468 Cpu: AllocatedCpuResources{ 4469 CpuShares: 500, 4470 }, 4471 Memory: AllocatedMemoryResources{ 4472 MemoryMB: 256, 4473 }, 4474 Networks: []*NetworkResource{ 4475 { 4476 Device: "eth0", 4477 IP: "192.168.0.100", 4478 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4479 MBits: 50, 4480 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4481 }, 4482 }, 4483 }, 4484 }, 4485 Shared: AllocatedSharedResources{ 4486 DiskMB: 150, 4487 }, 4488 } 4489 4490 require.Equal(t, expected, alloc.AllocatedResources) 4491 } 4492 4493 // TestAllocation_Canonicalize_New asserts that an alloc with latest 4494 // schema isn't modified with Canonicalize 4495 func TestAllocation_Canonicalize_New(t *testing.T) { 4496 alloc := MockAlloc() 4497 copy := alloc.Copy() 4498 4499 alloc.Canonicalize() 4500 require.Equal(t, copy, alloc) 4501 } 4502 4503 func TestRescheduleTracker_Copy(t *testing.T) { 4504 type testCase struct { 4505 original *RescheduleTracker 4506 expected *RescheduleTracker 4507 } 4508 4509 cases := []testCase{ 4510 {nil, nil}, 4511 {&RescheduleTracker{Events: []*RescheduleEvent{ 4512 {RescheduleTime: 2, 4513 PrevAllocID: "12", 4514 PrevNodeID: "12", 4515 Delay: 30 * time.Second}, 4516 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 4517 {RescheduleTime: 2, 4518 PrevAllocID: "12", 4519 PrevNodeID: "12", 4520 Delay: 30 * time.Second}, 4521 }}}, 4522 } 4523 4524 for _, tc := range cases { 4525 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 4526 t.Fatalf("expected %v but got %v", *tc.expected, *got) 4527 } 4528 } 4529 } 4530 4531 func TestVault_Validate(t *testing.T) { 4532 v := &Vault{ 4533 Env: true, 4534 ChangeMode: VaultChangeModeNoop, 4535 } 4536 4537 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 4538 t.Fatalf("Expected policy list empty error") 4539 } 4540 4541 v.Policies = []string{"foo", "root"} 4542 v.ChangeMode = VaultChangeModeSignal 4543 4544 err := v.Validate() 4545 if err == nil { 4546 t.Fatalf("Expected validation errors") 4547 } 4548 4549 if !strings.Contains(err.Error(), "Signal must") { 4550 t.Fatalf("Expected signal empty error") 4551 } 4552 if !strings.Contains(err.Error(), "root") { 4553 t.Fatalf("Expected root error") 4554 } 4555 } 4556 4557 func TestParameterizedJobConfig_Validate(t *testing.T) { 4558 d := &ParameterizedJobConfig{ 4559 Payload: "foo", 4560 } 4561 4562 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 4563 t.Fatalf("Expected unknown payload requirement: %v", err) 4564 } 4565 4566 d.Payload = DispatchPayloadOptional 4567 d.MetaOptional = []string{"foo", "bar"} 4568 d.MetaRequired = []string{"bar", "baz"} 4569 4570 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 4571 t.Fatalf("Expected meta not being disjoint error: %v", err) 4572 } 4573 } 4574 4575 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 4576 job := testJob() 4577 job.ParameterizedJob = &ParameterizedJobConfig{ 4578 Payload: DispatchPayloadOptional, 4579 } 4580 job.Type = JobTypeSystem 4581 4582 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 4583 t.Fatalf("Expected bad scheduler tpye: %v", err) 4584 } 4585 } 4586 4587 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 4588 d := &ParameterizedJobConfig{} 4589 d.Canonicalize() 4590 if d.Payload != DispatchPayloadOptional { 4591 t.Fatalf("Canonicalize failed") 4592 } 4593 } 4594 4595 func TestDispatchPayloadConfig_Validate(t *testing.T) { 4596 d := &DispatchPayloadConfig{ 4597 File: "foo", 4598 } 4599 4600 // task/local/haha 4601 if err := d.Validate(); err != nil { 4602 t.Fatalf("bad: %v", err) 4603 } 4604 4605 // task/haha 4606 d.File = "../haha" 4607 if err := d.Validate(); err != nil { 4608 t.Fatalf("bad: %v", err) 4609 } 4610 4611 // ../haha 4612 d.File = "../../../haha" 4613 if err := d.Validate(); err == nil { 4614 t.Fatalf("bad: %v", err) 4615 } 4616 } 4617 4618 func TestIsRecoverable(t *testing.T) { 4619 if IsRecoverable(nil) { 4620 t.Errorf("nil should not be recoverable") 4621 } 4622 if IsRecoverable(NewRecoverableError(nil, true)) { 4623 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 4624 } 4625 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 4626 t.Errorf("Custom errors should not be recoverable") 4627 } 4628 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 4629 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 4630 } 4631 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 4632 t.Errorf("Explicitly recoverable errors *should* be recoverable") 4633 } 4634 } 4635 4636 func TestACLTokenValidate(t *testing.T) { 4637 tk := &ACLToken{} 4638 4639 // Missing a type 4640 err := tk.Validate() 4641 assert.NotNil(t, err) 4642 if !strings.Contains(err.Error(), "client or management") { 4643 t.Fatalf("bad: %v", err) 4644 } 4645 4646 // Missing policies 4647 tk.Type = ACLClientToken 4648 err = tk.Validate() 4649 assert.NotNil(t, err) 4650 if !strings.Contains(err.Error(), "missing policies") { 4651 t.Fatalf("bad: %v", err) 4652 } 4653 4654 // Invalid policies 4655 tk.Type = ACLManagementToken 4656 tk.Policies = []string{"foo"} 4657 err = tk.Validate() 4658 assert.NotNil(t, err) 4659 if !strings.Contains(err.Error(), "associated with policies") { 4660 t.Fatalf("bad: %v", err) 4661 } 4662 4663 // Name too long policies 4664 tk.Name = "" 4665 for i := 0; i < 8; i++ { 4666 tk.Name += uuid.Generate() 4667 } 4668 tk.Policies = nil 4669 err = tk.Validate() 4670 assert.NotNil(t, err) 4671 if !strings.Contains(err.Error(), "too long") { 4672 t.Fatalf("bad: %v", err) 4673 } 4674 4675 // Make it valid 4676 tk.Name = "foo" 4677 err = tk.Validate() 4678 assert.Nil(t, err) 4679 } 4680 4681 func TestACLTokenPolicySubset(t *testing.T) { 4682 tk := &ACLToken{ 4683 Type: ACLClientToken, 4684 Policies: []string{"foo", "bar", "baz"}, 4685 } 4686 4687 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4688 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4689 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4690 assert.Equal(t, true, tk.PolicySubset([]string{})) 4691 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 4692 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 4693 4694 tk = &ACLToken{ 4695 Type: ACLManagementToken, 4696 } 4697 4698 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4699 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4700 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4701 assert.Equal(t, true, tk.PolicySubset([]string{})) 4702 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 4703 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 4704 } 4705 4706 func TestACLTokenSetHash(t *testing.T) { 4707 tk := &ACLToken{ 4708 Name: "foo", 4709 Type: ACLClientToken, 4710 Policies: []string{"foo", "bar"}, 4711 Global: false, 4712 } 4713 out1 := tk.SetHash() 4714 assert.NotNil(t, out1) 4715 assert.NotNil(t, tk.Hash) 4716 assert.Equal(t, out1, tk.Hash) 4717 4718 tk.Policies = []string{"foo"} 4719 out2 := tk.SetHash() 4720 assert.NotNil(t, out2) 4721 assert.NotNil(t, tk.Hash) 4722 assert.Equal(t, out2, tk.Hash) 4723 assert.NotEqual(t, out1, out2) 4724 } 4725 4726 func TestACLPolicySetHash(t *testing.T) { 4727 ap := &ACLPolicy{ 4728 Name: "foo", 4729 Description: "great policy", 4730 Rules: "node { policy = \"read\" }", 4731 } 4732 out1 := ap.SetHash() 4733 assert.NotNil(t, out1) 4734 assert.NotNil(t, ap.Hash) 4735 assert.Equal(t, out1, ap.Hash) 4736 4737 ap.Rules = "node { policy = \"write\" }" 4738 out2 := ap.SetHash() 4739 assert.NotNil(t, out2) 4740 assert.NotNil(t, ap.Hash) 4741 assert.Equal(t, out2, ap.Hash) 4742 assert.NotEqual(t, out1, out2) 4743 } 4744 4745 func TestTaskEventPopulate(t *testing.T) { 4746 prepopulatedEvent := NewTaskEvent(TaskSetup) 4747 prepopulatedEvent.DisplayMessage = "Hola" 4748 testcases := []struct { 4749 event *TaskEvent 4750 expectedMsg string 4751 }{ 4752 {nil, ""}, 4753 {prepopulatedEvent, "Hola"}, 4754 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 4755 {NewTaskEvent(TaskStarted), "Task started by client"}, 4756 {NewTaskEvent(TaskReceived), "Task received by client"}, 4757 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 4758 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 4759 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 4760 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 4761 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 4762 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 4763 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 4764 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 4765 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 4766 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 4767 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 4768 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 4769 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 4770 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 4771 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 4772 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 4773 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 4774 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 4775 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 4776 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 4777 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 4778 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 4779 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 4780 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 4781 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 4782 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 4783 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 4784 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 4785 {NewTaskEvent("Unknown Type, No message"), ""}, 4786 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 4787 } 4788 4789 for _, tc := range testcases { 4790 tc.event.PopulateEventDisplayMessage() 4791 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 4792 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 4793 } 4794 } 4795 } 4796 4797 func TestNetworkResourcesEquals(t *testing.T) { 4798 require := require.New(t) 4799 var networkResourcesTest = []struct { 4800 input []*NetworkResource 4801 expected bool 4802 errorMsg string 4803 }{ 4804 { 4805 []*NetworkResource{ 4806 { 4807 IP: "10.0.0.1", 4808 MBits: 50, 4809 ReservedPorts: []Port{{"web", 80, 0}}, 4810 }, 4811 { 4812 IP: "10.0.0.1", 4813 MBits: 50, 4814 ReservedPorts: []Port{{"web", 80, 0}}, 4815 }, 4816 }, 4817 true, 4818 "Equal network resources should return true", 4819 }, 4820 { 4821 []*NetworkResource{ 4822 { 4823 IP: "10.0.0.0", 4824 MBits: 50, 4825 ReservedPorts: []Port{{"web", 80, 0}}, 4826 }, 4827 { 4828 IP: "10.0.0.1", 4829 MBits: 50, 4830 ReservedPorts: []Port{{"web", 80, 0}}, 4831 }, 4832 }, 4833 false, 4834 "Different IP addresses should return false", 4835 }, 4836 { 4837 []*NetworkResource{ 4838 { 4839 IP: "10.0.0.1", 4840 MBits: 40, 4841 ReservedPorts: []Port{{"web", 80, 0}}, 4842 }, 4843 { 4844 IP: "10.0.0.1", 4845 MBits: 50, 4846 ReservedPorts: []Port{{"web", 80, 0}}, 4847 }, 4848 }, 4849 false, 4850 "Different MBits values should return false", 4851 }, 4852 { 4853 []*NetworkResource{ 4854 { 4855 IP: "10.0.0.1", 4856 MBits: 50, 4857 ReservedPorts: []Port{{"web", 80, 0}}, 4858 }, 4859 { 4860 IP: "10.0.0.1", 4861 MBits: 50, 4862 ReservedPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4863 }, 4864 }, 4865 false, 4866 "Different ReservedPorts lengths should return false", 4867 }, 4868 { 4869 []*NetworkResource{ 4870 { 4871 IP: "10.0.0.1", 4872 MBits: 50, 4873 ReservedPorts: []Port{{"web", 80, 0}}, 4874 }, 4875 { 4876 IP: "10.0.0.1", 4877 MBits: 50, 4878 ReservedPorts: []Port{}, 4879 }, 4880 }, 4881 false, 4882 "Empty and non empty ReservedPorts values should return false", 4883 }, 4884 { 4885 []*NetworkResource{ 4886 { 4887 IP: "10.0.0.1", 4888 MBits: 50, 4889 ReservedPorts: []Port{{"web", 80, 0}}, 4890 }, 4891 { 4892 IP: "10.0.0.1", 4893 MBits: 50, 4894 ReservedPorts: []Port{{"notweb", 80, 0}}, 4895 }, 4896 }, 4897 false, 4898 "Different valued ReservedPorts values should return false", 4899 }, 4900 { 4901 []*NetworkResource{ 4902 { 4903 IP: "10.0.0.1", 4904 MBits: 50, 4905 DynamicPorts: []Port{{"web", 80, 0}}, 4906 }, 4907 { 4908 IP: "10.0.0.1", 4909 MBits: 50, 4910 DynamicPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4911 }, 4912 }, 4913 false, 4914 "Different DynamicPorts lengths should return false", 4915 }, 4916 { 4917 []*NetworkResource{ 4918 { 4919 IP: "10.0.0.1", 4920 MBits: 50, 4921 DynamicPorts: []Port{{"web", 80, 0}}, 4922 }, 4923 { 4924 IP: "10.0.0.1", 4925 MBits: 50, 4926 DynamicPorts: []Port{}, 4927 }, 4928 }, 4929 false, 4930 "Empty and non empty DynamicPorts values should return false", 4931 }, 4932 { 4933 []*NetworkResource{ 4934 { 4935 IP: "10.0.0.1", 4936 MBits: 50, 4937 DynamicPorts: []Port{{"web", 80, 0}}, 4938 }, 4939 { 4940 IP: "10.0.0.1", 4941 MBits: 50, 4942 DynamicPorts: []Port{{"notweb", 80, 0}}, 4943 }, 4944 }, 4945 false, 4946 "Different valued DynamicPorts values should return false", 4947 }, 4948 } 4949 for _, testCase := range networkResourcesTest { 4950 first := testCase.input[0] 4951 second := testCase.input[1] 4952 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 4953 } 4954 } 4955 4956 func TestNode_Canonicalize(t *testing.T) { 4957 t.Parallel() 4958 require := require.New(t) 4959 4960 // Make sure the eligiblity is set properly 4961 node := &Node{} 4962 node.Canonicalize() 4963 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 4964 4965 node = &Node{ 4966 Drain: true, 4967 } 4968 node.Canonicalize() 4969 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 4970 } 4971 4972 func TestNode_Copy(t *testing.T) { 4973 t.Parallel() 4974 require := require.New(t) 4975 4976 node := &Node{ 4977 ID: uuid.Generate(), 4978 SecretID: uuid.Generate(), 4979 Datacenter: "dc1", 4980 Name: "foobar", 4981 Attributes: map[string]string{ 4982 "kernel.name": "linux", 4983 "arch": "x86", 4984 "nomad.version": "0.5.0", 4985 "driver.exec": "1", 4986 "driver.mock_driver": "1", 4987 }, 4988 Resources: &Resources{ 4989 CPU: 4000, 4990 MemoryMB: 8192, 4991 DiskMB: 100 * 1024, 4992 Networks: []*NetworkResource{ 4993 { 4994 Device: "eth0", 4995 CIDR: "192.168.0.100/32", 4996 MBits: 1000, 4997 }, 4998 }, 4999 }, 5000 Reserved: &Resources{ 5001 CPU: 100, 5002 MemoryMB: 256, 5003 DiskMB: 4 * 1024, 5004 Networks: []*NetworkResource{ 5005 { 5006 Device: "eth0", 5007 IP: "192.168.0.100", 5008 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 5009 MBits: 1, 5010 }, 5011 }, 5012 }, 5013 NodeResources: &NodeResources{ 5014 Cpu: NodeCpuResources{ 5015 CpuShares: 4000, 5016 }, 5017 Memory: NodeMemoryResources{ 5018 MemoryMB: 8192, 5019 }, 5020 Disk: NodeDiskResources{ 5021 DiskMB: 100 * 1024, 5022 }, 5023 Networks: []*NetworkResource{ 5024 { 5025 Device: "eth0", 5026 CIDR: "192.168.0.100/32", 5027 MBits: 1000, 5028 }, 5029 }, 5030 }, 5031 ReservedResources: &NodeReservedResources{ 5032 Cpu: NodeReservedCpuResources{ 5033 CpuShares: 100, 5034 }, 5035 Memory: NodeReservedMemoryResources{ 5036 MemoryMB: 256, 5037 }, 5038 Disk: NodeReservedDiskResources{ 5039 DiskMB: 4 * 1024, 5040 }, 5041 Networks: NodeReservedNetworkResources{ 5042 ReservedHostPorts: "22", 5043 }, 5044 }, 5045 Links: map[string]string{ 5046 "consul": "foobar.dc1", 5047 }, 5048 Meta: map[string]string{ 5049 "pci-dss": "true", 5050 "database": "mysql", 5051 "version": "5.6", 5052 }, 5053 NodeClass: "linux-medium-pci", 5054 Status: NodeStatusReady, 5055 SchedulingEligibility: NodeSchedulingEligible, 5056 Drivers: map[string]*DriverInfo{ 5057 "mock_driver": { 5058 Attributes: map[string]string{"running": "1"}, 5059 Detected: true, 5060 Healthy: true, 5061 HealthDescription: "Currently active", 5062 UpdateTime: time.Now(), 5063 }, 5064 }, 5065 } 5066 node.ComputeClass() 5067 5068 node2 := node.Copy() 5069 5070 require.Equal(node.Attributes, node2.Attributes) 5071 require.Equal(node.Resources, node2.Resources) 5072 require.Equal(node.Reserved, node2.Reserved) 5073 require.Equal(node.Links, node2.Links) 5074 require.Equal(node.Meta, node2.Meta) 5075 require.Equal(node.Events, node2.Events) 5076 require.Equal(node.DrainStrategy, node2.DrainStrategy) 5077 require.Equal(node.Drivers, node2.Drivers) 5078 } 5079 5080 func TestSpread_Validate(t *testing.T) { 5081 type tc struct { 5082 spread *Spread 5083 err error 5084 name string 5085 } 5086 5087 testCases := []tc{ 5088 { 5089 spread: &Spread{}, 5090 err: fmt.Errorf("Missing spread attribute"), 5091 name: "empty spread", 5092 }, 5093 { 5094 spread: &Spread{ 5095 Attribute: "${node.datacenter}", 5096 Weight: -1, 5097 }, 5098 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5099 name: "Invalid weight", 5100 }, 5101 { 5102 spread: &Spread{ 5103 Attribute: "${node.datacenter}", 5104 Weight: 110, 5105 }, 5106 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5107 name: "Invalid weight", 5108 }, 5109 { 5110 spread: &Spread{ 5111 Attribute: "${node.datacenter}", 5112 Weight: 50, 5113 SpreadTarget: []*SpreadTarget{ 5114 { 5115 Value: "dc1", 5116 Percent: 25, 5117 }, 5118 { 5119 Value: "dc2", 5120 Percent: 150, 5121 }, 5122 }, 5123 }, 5124 err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"), 5125 name: "Invalid percentages", 5126 }, 5127 { 5128 spread: &Spread{ 5129 Attribute: "${node.datacenter}", 5130 Weight: 50, 5131 SpreadTarget: []*SpreadTarget{ 5132 { 5133 Value: "dc1", 5134 Percent: 75, 5135 }, 5136 { 5137 Value: "dc2", 5138 Percent: 75, 5139 }, 5140 }, 5141 }, 5142 err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150), 5143 name: "Invalid percentages", 5144 }, 5145 { 5146 spread: &Spread{ 5147 Attribute: "${node.datacenter}", 5148 Weight: 50, 5149 SpreadTarget: []*SpreadTarget{ 5150 { 5151 Value: "dc1", 5152 Percent: 25, 5153 }, 5154 { 5155 Value: "dc1", 5156 Percent: 50, 5157 }, 5158 }, 5159 }, 5160 err: fmt.Errorf("Spread target value \"dc1\" already defined"), 5161 name: "No spread targets", 5162 }, 5163 { 5164 spread: &Spread{ 5165 Attribute: "${node.datacenter}", 5166 Weight: 50, 5167 SpreadTarget: []*SpreadTarget{ 5168 { 5169 Value: "dc1", 5170 Percent: 25, 5171 }, 5172 { 5173 Value: "dc2", 5174 Percent: 50, 5175 }, 5176 }, 5177 }, 5178 err: nil, 5179 name: "Valid spread", 5180 }, 5181 } 5182 5183 for _, tc := range testCases { 5184 t.Run(tc.name, func(t *testing.T) { 5185 err := tc.spread.Validate() 5186 if tc.err != nil { 5187 require.NotNil(t, err) 5188 require.Contains(t, err.Error(), tc.err.Error()) 5189 } else { 5190 require.Nil(t, err) 5191 } 5192 }) 5193 } 5194 } 5195 5196 func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { 5197 require := require.New(t) 5198 cases := []struct { 5199 Input string 5200 Parsed []uint64 5201 Err bool 5202 }{ 5203 { 5204 "1,2,3", 5205 []uint64{1, 2, 3}, 5206 false, 5207 }, 5208 { 5209 "3,1,2,1,2,3,1-3", 5210 []uint64{1, 2, 3}, 5211 false, 5212 }, 5213 { 5214 "3-1", 5215 nil, 5216 true, 5217 }, 5218 { 5219 "1-3,2-4", 5220 []uint64{1, 2, 3, 4}, 5221 false, 5222 }, 5223 { 5224 "1-3,4,5-5,6,7,8-10", 5225 []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 5226 false, 5227 }, 5228 } 5229 5230 for i, tc := range cases { 5231 r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input} 5232 out, err := r.ParseReservedHostPorts() 5233 if (err != nil) != tc.Err { 5234 t.Fatalf("test case %d: %v", i, err) 5235 continue 5236 } 5237 5238 require.Equal(out, tc.Parsed) 5239 } 5240 }