github.com/uchennaokeke444/nomad@v0.11.8/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 15 "github.com/kr/pretty" 16 "github.com/stretchr/testify/assert" 17 "github.com/stretchr/testify/require" 18 ) 19 20 func TestJob_Validate(t *testing.T) { 21 j := &Job{} 22 err := j.Validate() 23 mErr := err.(*multierror.Error) 24 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 25 t.Fatalf("err: %s", err) 26 } 27 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 28 t.Fatalf("err: %s", err) 29 } 30 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 31 t.Fatalf("err: %s", err) 32 } 33 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 34 t.Fatalf("err: %s", err) 35 } 36 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 37 t.Fatalf("err: %s", err) 38 } 39 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 40 t.Fatalf("err: %s", err) 41 } 42 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 43 t.Fatalf("err: %s", err) 44 } 45 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 46 t.Fatalf("err: %s", err) 47 } 48 49 j = &Job{ 50 Type: "invalid-job-type", 51 } 52 err = j.Validate() 53 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 54 t.Errorf("expected %s but found: %v", expected, err) 55 } 56 57 j = &Job{ 58 Type: JobTypeService, 59 Periodic: &PeriodicConfig{ 60 Enabled: true, 61 }, 62 } 63 err = j.Validate() 64 mErr = err.(*multierror.Error) 65 if !strings.Contains(mErr.Error(), "Periodic") { 66 t.Fatalf("err: %s", err) 67 } 68 69 j = &Job{ 70 Region: "global", 71 ID: uuid.Generate(), 72 Namespace: "test", 73 Name: "my-job", 74 Type: JobTypeService, 75 Priority: 50, 76 Datacenters: []string{"dc1"}, 77 TaskGroups: []*TaskGroup{ 78 { 79 Name: "web", 80 RestartPolicy: &RestartPolicy{ 81 Interval: 5 * time.Minute, 82 Delay: 10 * time.Second, 83 Attempts: 10, 84 }, 85 }, 86 { 87 Name: "web", 88 RestartPolicy: &RestartPolicy{ 89 Interval: 5 * time.Minute, 90 Delay: 10 * time.Second, 91 Attempts: 10, 92 }, 93 }, 94 { 95 RestartPolicy: &RestartPolicy{ 96 Interval: 5 * time.Minute, 97 Delay: 10 * time.Second, 98 Attempts: 10, 99 }, 100 }, 101 }, 102 } 103 err = j.Validate() 104 mErr = err.(*multierror.Error) 105 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 106 t.Fatalf("err: %s", err) 107 } 108 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 109 t.Fatalf("err: %s", err) 110 } 111 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 112 t.Fatalf("err: %s", err) 113 } 114 115 // test for empty datacenters 116 j = &Job{ 117 Datacenters: []string{""}, 118 } 119 err = j.Validate() 120 mErr = err.(*multierror.Error) 121 if !strings.Contains(mErr.Error(), "datacenter must be non-empty string") { 122 t.Fatalf("err: %s", err) 123 } 124 } 125 126 func TestJob_ValidateScaling(t *testing.T) { 127 require := require.New(t) 128 129 p := &ScalingPolicy{ 130 Policy: nil, // allowed to be nil 131 Min: 5, 132 Max: 5, 133 Enabled: true, 134 } 135 job := testJob() 136 job.TaskGroups[0].Scaling = p 137 job.TaskGroups[0].Count = 5 138 139 require.NoError(job.Validate()) 140 141 // min <= max 142 p.Max = 0 143 p.Min = 10 144 err := job.Validate() 145 require.Error(err) 146 mErr := err.(*multierror.Error) 147 require.Len(mErr.Errors, 1) 148 require.Contains(mErr.Errors[0].Error(), "maximum count must not be less than minimum count") 149 require.Contains(mErr.Errors[0].Error(), "task group count must not be less than minimum count in scaling policy") 150 require.Contains(mErr.Errors[0].Error(), "task group count must not be greater than maximum count in scaling policy") 151 152 // count <= max 153 p.Max = 0 154 p.Min = 5 155 job.TaskGroups[0].Count = 5 156 err = job.Validate() 157 require.Error(err) 158 mErr = err.(*multierror.Error) 159 require.Len(mErr.Errors, 1) 160 require.Contains(mErr.Errors[0].Error(), "maximum count must not be less than minimum count") 161 require.Contains(mErr.Errors[0].Error(), "task group count must not be greater than maximum count in scaling policy") 162 163 // min <= count 164 job.TaskGroups[0].Count = 0 165 p.Min = 5 166 p.Max = 5 167 err = job.Validate() 168 require.Error(err) 169 mErr = err.(*multierror.Error) 170 require.Len(mErr.Errors, 1) 171 require.Contains(mErr.Errors[0].Error(), "task group count must not be less than minimum count in scaling policy") 172 } 173 174 func TestJob_Warnings(t *testing.T) { 175 cases := []struct { 176 Name string 177 Job *Job 178 Expected []string 179 }{ 180 { 181 Name: "Higher counts for update stanza", 182 Expected: []string{"max parallel count is greater"}, 183 Job: &Job{ 184 Type: JobTypeService, 185 TaskGroups: []*TaskGroup{ 186 { 187 Name: "foo", 188 Count: 2, 189 Update: &UpdateStrategy{ 190 MaxParallel: 10, 191 }, 192 }, 193 }, 194 }, 195 }, 196 { 197 Name: "AutoPromote mixed TaskGroups", 198 Expected: []string{"auto_promote must be true for all groups"}, 199 Job: &Job{ 200 Type: JobTypeService, 201 TaskGroups: []*TaskGroup{ 202 { 203 Update: &UpdateStrategy{ 204 AutoPromote: true, 205 }, 206 }, 207 { 208 Update: &UpdateStrategy{ 209 AutoPromote: false, 210 }, 211 }, 212 }, 213 }, 214 }, 215 { 216 Name: "Template.VaultGrace Deprecated", 217 Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."}, 218 Job: &Job{ 219 Type: JobTypeService, 220 TaskGroups: []*TaskGroup{ 221 { 222 Tasks: []*Task{ 223 { 224 Templates: []*Template{ 225 { 226 VaultGrace: 1, 227 }, 228 }, 229 }, 230 }, 231 }, 232 }, 233 }, 234 }, 235 } 236 237 for _, c := range cases { 238 t.Run(c.Name, func(t *testing.T) { 239 warnings := c.Job.Warnings() 240 if warnings == nil { 241 if len(c.Expected) == 0 { 242 return 243 } else { 244 t.Fatal("Got no warnings when they were expected") 245 } 246 } 247 248 a := warnings.Error() 249 for _, e := range c.Expected { 250 if !strings.Contains(a, e) { 251 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 252 } 253 } 254 }) 255 } 256 } 257 258 func TestJob_SpecChanged(t *testing.T) { 259 // Get a base test job 260 base := testJob() 261 262 // Only modify the indexes/mutable state of the job 263 mutatedBase := base.Copy() 264 mutatedBase.Status = "foo" 265 mutatedBase.ModifyIndex = base.ModifyIndex + 100 266 267 // changed contains a spec change that should be detected 268 change := base.Copy() 269 change.Priority = 99 270 271 cases := []struct { 272 Name string 273 Original *Job 274 New *Job 275 Changed bool 276 }{ 277 { 278 Name: "Same job except mutable indexes", 279 Changed: false, 280 Original: base, 281 New: mutatedBase, 282 }, 283 { 284 Name: "Different", 285 Changed: true, 286 Original: base, 287 New: change, 288 }, 289 } 290 291 for _, c := range cases { 292 t.Run(c.Name, func(t *testing.T) { 293 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 294 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 295 } 296 }) 297 } 298 } 299 300 func testJob() *Job { 301 return &Job{ 302 Region: "global", 303 ID: uuid.Generate(), 304 Namespace: "test", 305 Name: "my-job", 306 Type: JobTypeService, 307 Priority: 50, 308 AllAtOnce: false, 309 Datacenters: []string{"dc1"}, 310 Constraints: []*Constraint{ 311 { 312 LTarget: "$attr.kernel.name", 313 RTarget: "linux", 314 Operand: "=", 315 }, 316 }, 317 Periodic: &PeriodicConfig{ 318 Enabled: false, 319 }, 320 TaskGroups: []*TaskGroup{ 321 { 322 Name: "web", 323 Count: 10, 324 EphemeralDisk: DefaultEphemeralDisk(), 325 RestartPolicy: &RestartPolicy{ 326 Mode: RestartPolicyModeFail, 327 Attempts: 3, 328 Interval: 10 * time.Minute, 329 Delay: 1 * time.Minute, 330 }, 331 ReschedulePolicy: &ReschedulePolicy{ 332 Interval: 5 * time.Minute, 333 Attempts: 10, 334 Delay: 5 * time.Second, 335 DelayFunction: "constant", 336 }, 337 Tasks: []*Task{ 338 { 339 Name: "web", 340 Driver: "exec", 341 Config: map[string]interface{}{ 342 "command": "/bin/date", 343 }, 344 Env: map[string]string{ 345 "FOO": "bar", 346 }, 347 Artifacts: []*TaskArtifact{ 348 { 349 GetterSource: "http://foo.com", 350 }, 351 }, 352 Services: []*Service{ 353 { 354 Name: "${TASK}-frontend", 355 PortLabel: "http", 356 }, 357 }, 358 Resources: &Resources{ 359 CPU: 500, 360 MemoryMB: 256, 361 Networks: []*NetworkResource{ 362 { 363 MBits: 50, 364 DynamicPorts: []Port{{Label: "http"}}, 365 }, 366 }, 367 }, 368 LogConfig: &LogConfig{ 369 MaxFiles: 10, 370 MaxFileSizeMB: 1, 371 }, 372 }, 373 }, 374 Meta: map[string]string{ 375 "elb_check_type": "http", 376 "elb_check_interval": "30s", 377 "elb_check_min": "3", 378 }, 379 }, 380 }, 381 Meta: map[string]string{ 382 "owner": "armon", 383 }, 384 } 385 } 386 387 func TestJob_Copy(t *testing.T) { 388 j := testJob() 389 c := j.Copy() 390 if !reflect.DeepEqual(j, c) { 391 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 392 } 393 } 394 395 func TestJob_IsPeriodic(t *testing.T) { 396 j := &Job{ 397 Type: JobTypeService, 398 Periodic: &PeriodicConfig{ 399 Enabled: true, 400 }, 401 } 402 if !j.IsPeriodic() { 403 t.Fatalf("IsPeriodic() returned false on periodic job") 404 } 405 406 j = &Job{ 407 Type: JobTypeService, 408 } 409 if j.IsPeriodic() { 410 t.Fatalf("IsPeriodic() returned true on non-periodic job") 411 } 412 } 413 414 func TestJob_IsPeriodicActive(t *testing.T) { 415 cases := []struct { 416 job *Job 417 active bool 418 }{ 419 { 420 job: &Job{ 421 Type: JobTypeService, 422 Periodic: &PeriodicConfig{ 423 Enabled: true, 424 }, 425 }, 426 active: true, 427 }, 428 { 429 job: &Job{ 430 Type: JobTypeService, 431 Periodic: &PeriodicConfig{ 432 Enabled: false, 433 }, 434 }, 435 active: false, 436 }, 437 { 438 job: &Job{ 439 Type: JobTypeService, 440 Periodic: &PeriodicConfig{ 441 Enabled: true, 442 }, 443 Stop: true, 444 }, 445 active: false, 446 }, 447 { 448 job: &Job{ 449 Type: JobTypeService, 450 Periodic: &PeriodicConfig{ 451 Enabled: false, 452 }, 453 ParameterizedJob: &ParameterizedJobConfig{}, 454 }, 455 active: false, 456 }, 457 } 458 459 for i, c := range cases { 460 if act := c.job.IsPeriodicActive(); act != c.active { 461 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 462 } 463 } 464 } 465 466 func TestJob_SystemJob_Validate(t *testing.T) { 467 j := testJob() 468 j.Type = JobTypeSystem 469 j.TaskGroups[0].ReschedulePolicy = nil 470 j.Canonicalize() 471 472 err := j.Validate() 473 if err == nil || !strings.Contains(err.Error(), "exceed") { 474 t.Fatalf("expect error due to count") 475 } 476 477 j.TaskGroups[0].Count = 0 478 if err := j.Validate(); err != nil { 479 t.Fatalf("unexpected err: %v", err) 480 } 481 482 j.TaskGroups[0].Count = 1 483 if err := j.Validate(); err != nil { 484 t.Fatalf("unexpected err: %v", err) 485 } 486 487 // Add affinities at job, task group and task level, that should fail validation 488 489 j.Affinities = []*Affinity{{ 490 Operand: "=", 491 LTarget: "${node.datacenter}", 492 RTarget: "dc1", 493 }} 494 j.TaskGroups[0].Affinities = []*Affinity{{ 495 Operand: "=", 496 LTarget: "${meta.rack}", 497 RTarget: "r1", 498 }} 499 j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{ 500 Operand: "=", 501 LTarget: "${meta.rack}", 502 RTarget: "r1", 503 }} 504 err = j.Validate() 505 require.NotNil(t, err) 506 require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") 507 508 // Add spread at job and task group level, that should fail validation 509 j.Spreads = []*Spread{{ 510 Attribute: "${node.datacenter}", 511 Weight: 100, 512 }} 513 j.TaskGroups[0].Spreads = []*Spread{{ 514 Attribute: "${node.datacenter}", 515 Weight: 100, 516 }} 517 518 err = j.Validate() 519 require.NotNil(t, err) 520 require.Contains(t, err.Error(), "System jobs may not have a spread stanza") 521 522 } 523 524 func TestJob_VaultPolicies(t *testing.T) { 525 j0 := &Job{} 526 e0 := make(map[string]map[string]*Vault, 0) 527 528 vj1 := &Vault{ 529 Policies: []string{ 530 "p1", 531 "p2", 532 }, 533 } 534 vj2 := &Vault{ 535 Policies: []string{ 536 "p3", 537 "p4", 538 }, 539 } 540 vj3 := &Vault{ 541 Policies: []string{ 542 "p5", 543 }, 544 } 545 j1 := &Job{ 546 TaskGroups: []*TaskGroup{ 547 { 548 Name: "foo", 549 Tasks: []*Task{ 550 { 551 Name: "t1", 552 }, 553 { 554 Name: "t2", 555 Vault: vj1, 556 }, 557 }, 558 }, 559 { 560 Name: "bar", 561 Tasks: []*Task{ 562 { 563 Name: "t3", 564 Vault: vj2, 565 }, 566 { 567 Name: "t4", 568 Vault: vj3, 569 }, 570 }, 571 }, 572 }, 573 } 574 575 e1 := map[string]map[string]*Vault{ 576 "foo": { 577 "t2": vj1, 578 }, 579 "bar": { 580 "t3": vj2, 581 "t4": vj3, 582 }, 583 } 584 585 cases := []struct { 586 Job *Job 587 Expected map[string]map[string]*Vault 588 }{ 589 { 590 Job: j0, 591 Expected: e0, 592 }, 593 { 594 Job: j1, 595 Expected: e1, 596 }, 597 } 598 599 for i, c := range cases { 600 got := c.Job.VaultPolicies() 601 if !reflect.DeepEqual(got, c.Expected) { 602 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 603 } 604 } 605 } 606 607 func TestJob_ConnectTasks(t *testing.T) { 608 t.Parallel() 609 r := require.New(t) 610 611 // todo(shoenig): this will need some updates when we support connect native 612 // tasks, which will have a different Kind format, probably. 613 614 j0 := &Job{ 615 TaskGroups: []*TaskGroup{{ 616 Name: "tg1", 617 Tasks: []*Task{{ 618 Name: "connect-proxy-task1", 619 Kind: "connect-proxy:task1", 620 }, { 621 Name: "task2", 622 Kind: "task2", 623 }, { 624 Name: "connect-proxy-task3", 625 Kind: "connect-proxy:task3", 626 }}, 627 }, { 628 Name: "tg2", 629 Tasks: []*Task{{ 630 Name: "task1", 631 Kind: "task1", 632 }, { 633 Name: "connect-proxy-task2", 634 Kind: "connect-proxy:task2", 635 }}, 636 }}, 637 } 638 639 connectTasks := j0.ConnectTasks() 640 641 exp := map[string][]string{ 642 "tg1": {"connect-proxy-task1", "connect-proxy-task3"}, 643 "tg2": {"connect-proxy-task2"}, 644 } 645 r.Equal(exp, connectTasks) 646 } 647 648 func TestJob_RequiredSignals(t *testing.T) { 649 j0 := &Job{} 650 e0 := make(map[string]map[string][]string, 0) 651 652 vj1 := &Vault{ 653 Policies: []string{"p1"}, 654 ChangeMode: VaultChangeModeNoop, 655 } 656 vj2 := &Vault{ 657 Policies: []string{"p1"}, 658 ChangeMode: VaultChangeModeSignal, 659 ChangeSignal: "SIGUSR1", 660 } 661 tj1 := &Template{ 662 SourcePath: "foo", 663 DestPath: "bar", 664 ChangeMode: TemplateChangeModeNoop, 665 } 666 tj2 := &Template{ 667 SourcePath: "foo", 668 DestPath: "bar", 669 ChangeMode: TemplateChangeModeSignal, 670 ChangeSignal: "SIGUSR2", 671 } 672 j1 := &Job{ 673 TaskGroups: []*TaskGroup{ 674 { 675 Name: "foo", 676 Tasks: []*Task{ 677 { 678 Name: "t1", 679 }, 680 { 681 Name: "t2", 682 Vault: vj2, 683 Templates: []*Template{tj2}, 684 }, 685 }, 686 }, 687 { 688 Name: "bar", 689 Tasks: []*Task{ 690 { 691 Name: "t3", 692 Vault: vj1, 693 Templates: []*Template{tj1}, 694 }, 695 { 696 Name: "t4", 697 Vault: vj2, 698 }, 699 }, 700 }, 701 }, 702 } 703 704 e1 := map[string]map[string][]string{ 705 "foo": { 706 "t2": {"SIGUSR1", "SIGUSR2"}, 707 }, 708 "bar": { 709 "t4": {"SIGUSR1"}, 710 }, 711 } 712 713 j2 := &Job{ 714 TaskGroups: []*TaskGroup{ 715 { 716 Name: "foo", 717 Tasks: []*Task{ 718 { 719 Name: "t1", 720 KillSignal: "SIGQUIT", 721 }, 722 }, 723 }, 724 }, 725 } 726 727 e2 := map[string]map[string][]string{ 728 "foo": { 729 "t1": {"SIGQUIT"}, 730 }, 731 } 732 733 cases := []struct { 734 Job *Job 735 Expected map[string]map[string][]string 736 }{ 737 { 738 Job: j0, 739 Expected: e0, 740 }, 741 { 742 Job: j1, 743 Expected: e1, 744 }, 745 { 746 Job: j2, 747 Expected: e2, 748 }, 749 } 750 751 for i, c := range cases { 752 got := c.Job.RequiredSignals() 753 if !reflect.DeepEqual(got, c.Expected) { 754 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 755 } 756 } 757 } 758 759 // test new Equal comparisons for components of Jobs 760 func TestJob_PartEqual(t *testing.T) { 761 ns := &Networks{} 762 require.True(t, ns.Equals(&Networks{})) 763 764 ns = &Networks{ 765 &NetworkResource{Device: "eth0"}, 766 } 767 require.True(t, ns.Equals(&Networks{ 768 &NetworkResource{Device: "eth0"}, 769 })) 770 771 ns = &Networks{ 772 &NetworkResource{Device: "eth0"}, 773 &NetworkResource{Device: "eth1"}, 774 &NetworkResource{Device: "eth2"}, 775 } 776 require.True(t, ns.Equals(&Networks{ 777 &NetworkResource{Device: "eth2"}, 778 &NetworkResource{Device: "eth0"}, 779 &NetworkResource{Device: "eth1"}, 780 })) 781 782 cs := &Constraints{ 783 &Constraint{"left0", "right0", "=", ""}, 784 &Constraint{"left1", "right1", "=", ""}, 785 &Constraint{"left2", "right2", "=", ""}, 786 } 787 require.True(t, cs.Equals(&Constraints{ 788 &Constraint{"left0", "right0", "=", ""}, 789 &Constraint{"left2", "right2", "=", ""}, 790 &Constraint{"left1", "right1", "=", ""}, 791 })) 792 793 as := &Affinities{ 794 &Affinity{"left0", "right0", "=", 0, ""}, 795 &Affinity{"left1", "right1", "=", 0, ""}, 796 &Affinity{"left2", "right2", "=", 0, ""}, 797 } 798 require.True(t, as.Equals(&Affinities{ 799 &Affinity{"left0", "right0", "=", 0, ""}, 800 &Affinity{"left2", "right2", "=", 0, ""}, 801 &Affinity{"left1", "right1", "=", 0, ""}, 802 })) 803 } 804 805 func TestTask_UsesConnect(t *testing.T) { 806 t.Parallel() 807 808 t.Run("normal task", func(t *testing.T) { 809 task := testJob().TaskGroups[0].Tasks[0] 810 usesConnect := task.UsesConnect() 811 require.False(t, usesConnect) 812 }) 813 814 t.Run("sidecar proxy", func(t *testing.T) { 815 task := &Task{ 816 Name: "connect-proxy-task1", 817 Kind: "connect-proxy:task1", 818 } 819 usesConnect := task.UsesConnect() 820 require.True(t, usesConnect) 821 }) 822 823 // todo(shoenig): add native case 824 } 825 826 func TestTaskGroup_UsesConnect(t *testing.T) { 827 t.Parallel() 828 829 try := func(t *testing.T, tg *TaskGroup, exp bool) { 830 result := tg.UsesConnect() 831 require.Equal(t, exp, result) 832 } 833 834 t.Run("tg uses native", func(t *testing.T) { 835 try(t, &TaskGroup{ 836 Services: []*Service{ 837 {Connect: nil}, 838 {Connect: &ConsulConnect{Native: true}}, 839 }, 840 }, true) 841 }) 842 843 t.Run("tg uses sidecar", func(t *testing.T) { 844 try(t, &TaskGroup{ 845 Services: []*Service{{ 846 Connect: &ConsulConnect{ 847 SidecarService: &ConsulSidecarService{ 848 Port: "9090", 849 }, 850 }, 851 }}, 852 }, true) 853 }) 854 855 t.Run("tg does not use connect", func(t *testing.T) { 856 try(t, &TaskGroup{ 857 Services: []*Service{ 858 {Connect: nil}, 859 }, 860 }, false) 861 }) 862 } 863 864 func TestTaskGroup_Validate(t *testing.T) { 865 j := testJob() 866 tg := &TaskGroup{ 867 Count: -1, 868 RestartPolicy: &RestartPolicy{ 869 Interval: 5 * time.Minute, 870 Delay: 10 * time.Second, 871 Attempts: 10, 872 Mode: RestartPolicyModeDelay, 873 }, 874 ReschedulePolicy: &ReschedulePolicy{ 875 Interval: 5 * time.Minute, 876 Attempts: 5, 877 Delay: 5 * time.Second, 878 }, 879 } 880 err := tg.Validate(j) 881 mErr := err.(*multierror.Error) 882 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 883 t.Fatalf("err: %s", err) 884 } 885 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 886 t.Fatalf("err: %s", err) 887 } 888 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 889 t.Fatalf("err: %s", err) 890 } 891 892 tg = &TaskGroup{ 893 Tasks: []*Task{ 894 { 895 Name: "task-a", 896 Resources: &Resources{ 897 Networks: []*NetworkResource{ 898 { 899 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 900 }, 901 }, 902 }, 903 }, 904 { 905 Name: "task-b", 906 Resources: &Resources{ 907 Networks: []*NetworkResource{ 908 { 909 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 910 }, 911 }, 912 }, 913 }, 914 }, 915 } 916 err = tg.Validate(&Job{}) 917 expected := `Static port 123 already reserved by task-a:foo` 918 if !strings.Contains(err.Error(), expected) { 919 t.Errorf("expected %s but found: %v", expected, err) 920 } 921 922 tg = &TaskGroup{ 923 Tasks: []*Task{ 924 { 925 Name: "task-a", 926 Resources: &Resources{ 927 Networks: []*NetworkResource{ 928 { 929 ReservedPorts: []Port{ 930 {Label: "foo", Value: 123}, 931 {Label: "bar", Value: 123}, 932 }, 933 }, 934 }, 935 }, 936 }, 937 }, 938 } 939 err = tg.Validate(&Job{}) 940 expected = `Static port 123 already reserved by task-a:foo` 941 if !strings.Contains(err.Error(), expected) { 942 t.Errorf("expected %s but found: %v", expected, err) 943 } 944 945 tg = &TaskGroup{ 946 Name: "web", 947 Count: 1, 948 Tasks: []*Task{ 949 {Name: "web", Leader: true}, 950 {Name: "web", Leader: true}, 951 {}, 952 }, 953 RestartPolicy: &RestartPolicy{ 954 Interval: 5 * time.Minute, 955 Delay: 10 * time.Second, 956 Attempts: 10, 957 Mode: RestartPolicyModeDelay, 958 }, 959 ReschedulePolicy: &ReschedulePolicy{ 960 Interval: 5 * time.Minute, 961 Attempts: 10, 962 Delay: 5 * time.Second, 963 DelayFunction: "constant", 964 }, 965 } 966 967 err = tg.Validate(j) 968 mErr = err.(*multierror.Error) 969 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 970 t.Fatalf("err: %s", err) 971 } 972 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 973 t.Fatalf("err: %s", err) 974 } 975 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 976 t.Fatalf("err: %s", err) 977 } 978 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 979 t.Fatalf("err: %s", err) 980 } 981 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 982 t.Fatalf("err: %s", err) 983 } 984 985 tg = &TaskGroup{ 986 Name: "web", 987 Count: 1, 988 Tasks: []*Task{ 989 {Name: "web", Leader: true}, 990 }, 991 Update: DefaultUpdateStrategy.Copy(), 992 } 993 j.Type = JobTypeBatch 994 err = tg.Validate(j) 995 if !strings.Contains(err.Error(), "does not allow update block") { 996 t.Fatalf("err: %s", err) 997 } 998 999 tg = &TaskGroup{ 1000 Count: -1, 1001 RestartPolicy: &RestartPolicy{ 1002 Interval: 5 * time.Minute, 1003 Delay: 10 * time.Second, 1004 Attempts: 10, 1005 Mode: RestartPolicyModeDelay, 1006 }, 1007 ReschedulePolicy: &ReschedulePolicy{ 1008 Interval: 5 * time.Minute, 1009 Attempts: 5, 1010 Delay: 5 * time.Second, 1011 }, 1012 } 1013 j.Type = JobTypeSystem 1014 err = tg.Validate(j) 1015 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 1016 t.Fatalf("err: %s", err) 1017 } 1018 1019 tg = &TaskGroup{ 1020 Networks: []*NetworkResource{ 1021 { 1022 DynamicPorts: []Port{{"http", 0, 80}}, 1023 }, 1024 }, 1025 Tasks: []*Task{ 1026 { 1027 Resources: &Resources{ 1028 Networks: []*NetworkResource{ 1029 { 1030 DynamicPorts: []Port{{"http", 0, 80}}, 1031 }, 1032 }, 1033 }, 1034 }, 1035 }, 1036 } 1037 err = tg.Validate(j) 1038 require.Contains(t, err.Error(), "Port label http already in use") 1039 require.Contains(t, err.Error(), "Port mapped to 80 already in use") 1040 1041 tg = &TaskGroup{ 1042 Volumes: map[string]*VolumeRequest{ 1043 "foo": { 1044 Type: "nothost", 1045 Source: "foo", 1046 }, 1047 }, 1048 Tasks: []*Task{ 1049 { 1050 Name: "task-a", 1051 Resources: &Resources{}, 1052 }, 1053 }, 1054 } 1055 err = tg.Validate(&Job{}) 1056 require.Contains(t, err.Error(), `Volume foo has unrecognised type nothost`) 1057 1058 tg = &TaskGroup{ 1059 Volumes: map[string]*VolumeRequest{ 1060 "foo": { 1061 Type: "host", 1062 }, 1063 }, 1064 Tasks: []*Task{ 1065 { 1066 Name: "task-a", 1067 Resources: &Resources{}, 1068 }, 1069 }, 1070 } 1071 err = tg.Validate(&Job{}) 1072 require.Contains(t, err.Error(), `Volume foo has an empty source`) 1073 1074 tg = &TaskGroup{ 1075 Volumes: map[string]*VolumeRequest{ 1076 "foo": { 1077 Type: "host", 1078 }, 1079 }, 1080 Tasks: []*Task{ 1081 { 1082 Name: "task-a", 1083 Resources: &Resources{}, 1084 VolumeMounts: []*VolumeMount{ 1085 { 1086 Volume: "", 1087 }, 1088 }, 1089 }, 1090 { 1091 Name: "task-b", 1092 Resources: &Resources{}, 1093 VolumeMounts: []*VolumeMount{ 1094 { 1095 Volume: "foob", 1096 }, 1097 }, 1098 }, 1099 }, 1100 } 1101 err = tg.Validate(&Job{}) 1102 expected = `Task task-a has a volume mount (0) referencing an empty volume` 1103 require.Contains(t, err.Error(), expected) 1104 1105 expected = `Task task-b has a volume mount (0) referencing undefined volume foob` 1106 require.Contains(t, err.Error(), expected) 1107 1108 taskA := &Task{Name: "task-a"} 1109 tg = &TaskGroup{ 1110 Name: "group-a", 1111 Services: []*Service{ 1112 { 1113 Name: "service-a", 1114 Checks: []*ServiceCheck{ 1115 { 1116 Name: "check-a", 1117 Type: "tcp", 1118 TaskName: "task-b", 1119 PortLabel: "http", 1120 Interval: time.Duration(1 * time.Second), 1121 Timeout: time.Duration(1 * time.Second), 1122 }, 1123 }, 1124 }, 1125 }, 1126 Tasks: []*Task{taskA}, 1127 } 1128 err = tg.Validate(&Job{}) 1129 expected = `Check check-a invalid: refers to non-existent task task-b` 1130 require.Contains(t, err.Error(), expected) 1131 1132 expected = `Check check-a invalid: only script and gRPC checks should have tasks` 1133 require.Contains(t, err.Error(), expected) 1134 1135 } 1136 1137 func TestTask_Validate(t *testing.T) { 1138 task := &Task{} 1139 ephemeralDisk := DefaultEphemeralDisk() 1140 err := task.Validate(ephemeralDisk, JobTypeBatch, nil) 1141 mErr := err.(*multierror.Error) 1142 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 1143 t.Fatalf("err: %s", err) 1144 } 1145 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 1146 t.Fatalf("err: %s", err) 1147 } 1148 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 1149 t.Fatalf("err: %s", err) 1150 } 1151 1152 task = &Task{Name: "web/foo"} 1153 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1154 mErr = err.(*multierror.Error) 1155 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 1156 t.Fatalf("err: %s", err) 1157 } 1158 1159 task = &Task{ 1160 Name: "web", 1161 Driver: "docker", 1162 Resources: &Resources{ 1163 CPU: 100, 1164 MemoryMB: 100, 1165 }, 1166 LogConfig: DefaultLogConfig(), 1167 } 1168 ephemeralDisk.SizeMB = 200 1169 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1170 if err != nil { 1171 t.Fatalf("err: %s", err) 1172 } 1173 1174 task.Constraints = append(task.Constraints, 1175 &Constraint{ 1176 Operand: ConstraintDistinctHosts, 1177 }, 1178 &Constraint{ 1179 Operand: ConstraintDistinctProperty, 1180 LTarget: "${meta.rack}", 1181 }) 1182 1183 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1184 mErr = err.(*multierror.Error) 1185 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 1186 t.Fatalf("err: %s", err) 1187 } 1188 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 1189 t.Fatalf("err: %s", err) 1190 } 1191 } 1192 1193 func TestTask_Validate_Services(t *testing.T) { 1194 s1 := &Service{ 1195 Name: "service-name", 1196 PortLabel: "bar", 1197 Checks: []*ServiceCheck{ 1198 { 1199 Name: "check-name", 1200 Type: ServiceCheckTCP, 1201 Interval: 0 * time.Second, 1202 }, 1203 { 1204 Name: "check-name", 1205 Type: ServiceCheckTCP, 1206 Timeout: 2 * time.Second, 1207 }, 1208 { 1209 Name: "check-name", 1210 Type: ServiceCheckTCP, 1211 Interval: 1 * time.Second, 1212 }, 1213 }, 1214 } 1215 1216 s2 := &Service{ 1217 Name: "service-name", 1218 PortLabel: "bar", 1219 } 1220 1221 s3 := &Service{ 1222 Name: "service-A", 1223 PortLabel: "a", 1224 } 1225 s4 := &Service{ 1226 Name: "service-A", 1227 PortLabel: "b", 1228 } 1229 1230 ephemeralDisk := DefaultEphemeralDisk() 1231 ephemeralDisk.SizeMB = 200 1232 task := &Task{ 1233 Name: "web", 1234 Driver: "docker", 1235 Resources: &Resources{ 1236 CPU: 100, 1237 MemoryMB: 100, 1238 }, 1239 Services: []*Service{s1, s2}, 1240 } 1241 1242 task1 := &Task{ 1243 Name: "web", 1244 Driver: "docker", 1245 Resources: DefaultResources(), 1246 Services: []*Service{s3, s4}, 1247 LogConfig: DefaultLogConfig(), 1248 } 1249 task1.Resources.Networks = []*NetworkResource{ 1250 { 1251 MBits: 10, 1252 DynamicPorts: []Port{ 1253 { 1254 Label: "a", 1255 Value: 1000, 1256 }, 1257 { 1258 Label: "b", 1259 Value: 2000, 1260 }, 1261 }, 1262 }, 1263 } 1264 1265 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1266 if err == nil { 1267 t.Fatal("expected an error") 1268 } 1269 1270 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 1271 t.Fatalf("err: %v", err) 1272 } 1273 1274 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 1275 t.Fatalf("err: %v", err) 1276 } 1277 1278 if !strings.Contains(err.Error(), "missing required value interval") { 1279 t.Fatalf("err: %v", err) 1280 } 1281 1282 if !strings.Contains(err.Error(), "cannot be less than") { 1283 t.Fatalf("err: %v", err) 1284 } 1285 1286 if err = task1.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1287 t.Fatalf("err : %v", err) 1288 } 1289 } 1290 1291 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1292 ephemeralDisk := DefaultEphemeralDisk() 1293 getTask := func(s *Service) *Task { 1294 task := &Task{ 1295 Name: "web", 1296 Driver: "docker", 1297 Resources: DefaultResources(), 1298 Services: []*Service{s}, 1299 LogConfig: DefaultLogConfig(), 1300 } 1301 task.Resources.Networks = []*NetworkResource{ 1302 { 1303 MBits: 10, 1304 DynamicPorts: []Port{ 1305 { 1306 Label: "http", 1307 Value: 80, 1308 }, 1309 }, 1310 }, 1311 } 1312 return task 1313 } 1314 1315 cases := []*Service{ 1316 { 1317 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1318 Name: "DriverModeWithLabel", 1319 PortLabel: "http", 1320 AddressMode: AddressModeDriver, 1321 }, 1322 { 1323 Name: "DriverModeWithPort", 1324 PortLabel: "80", 1325 AddressMode: AddressModeDriver, 1326 }, 1327 { 1328 Name: "HostModeWithLabel", 1329 PortLabel: "http", 1330 AddressMode: AddressModeHost, 1331 }, 1332 { 1333 Name: "HostModeWithoutLabel", 1334 AddressMode: AddressModeHost, 1335 }, 1336 { 1337 Name: "DriverModeWithoutLabel", 1338 AddressMode: AddressModeDriver, 1339 }, 1340 } 1341 1342 for _, service := range cases { 1343 task := getTask(service) 1344 t.Run(service.Name, func(t *testing.T) { 1345 if err := task.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1346 t.Fatalf("unexpected err: %v", err) 1347 } 1348 }) 1349 } 1350 } 1351 1352 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1353 ephemeralDisk := DefaultEphemeralDisk() 1354 getTask := func(s *Service) *Task { 1355 task := &Task{ 1356 Name: "web", 1357 Driver: "docker", 1358 Resources: DefaultResources(), 1359 Services: []*Service{s}, 1360 LogConfig: DefaultLogConfig(), 1361 } 1362 task.Resources.Networks = []*NetworkResource{ 1363 { 1364 MBits: 10, 1365 DynamicPorts: []Port{ 1366 { 1367 Label: "http", 1368 Value: 80, 1369 }, 1370 }, 1371 }, 1372 } 1373 return task 1374 } 1375 1376 cases := []*Service{ 1377 { 1378 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1379 Name: "DriverModeWithLabel", 1380 PortLabel: "asdf", 1381 AddressMode: AddressModeDriver, 1382 }, 1383 { 1384 Name: "HostModeWithLabel", 1385 PortLabel: "asdf", 1386 AddressMode: AddressModeHost, 1387 }, 1388 { 1389 Name: "HostModeWithPort", 1390 PortLabel: "80", 1391 AddressMode: AddressModeHost, 1392 }, 1393 } 1394 1395 for _, service := range cases { 1396 task := getTask(service) 1397 t.Run(service.Name, func(t *testing.T) { 1398 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1399 if err == nil { 1400 t.Fatalf("expected an error") 1401 } 1402 //t.Logf("err: %v", err) 1403 }) 1404 } 1405 } 1406 1407 func TestTask_Validate_Service_Check(t *testing.T) { 1408 1409 invalidCheck := ServiceCheck{ 1410 Name: "check-name", 1411 Command: "/bin/true", 1412 Type: ServiceCheckScript, 1413 Interval: 10 * time.Second, 1414 } 1415 1416 err := invalidCheck.validate() 1417 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1418 t.Fatalf("expected a timeout validation error but received: %q", err) 1419 } 1420 1421 check1 := ServiceCheck{ 1422 Name: "check-name", 1423 Type: ServiceCheckTCP, 1424 Interval: 10 * time.Second, 1425 Timeout: 2 * time.Second, 1426 } 1427 1428 if err := check1.validate(); err != nil { 1429 t.Fatalf("err: %v", err) 1430 } 1431 1432 check1.InitialStatus = "foo" 1433 err = check1.validate() 1434 if err == nil { 1435 t.Fatal("Expected an error") 1436 } 1437 1438 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1439 t.Fatalf("err: %v", err) 1440 } 1441 1442 check1.InitialStatus = api.HealthCritical 1443 err = check1.validate() 1444 if err != nil { 1445 t.Fatalf("err: %v", err) 1446 } 1447 1448 check1.InitialStatus = api.HealthPassing 1449 err = check1.validate() 1450 if err != nil { 1451 t.Fatalf("err: %v", err) 1452 } 1453 1454 check1.InitialStatus = "" 1455 err = check1.validate() 1456 if err != nil { 1457 t.Fatalf("err: %v", err) 1458 } 1459 1460 check2 := ServiceCheck{ 1461 Name: "check-name-2", 1462 Type: ServiceCheckHTTP, 1463 Interval: 10 * time.Second, 1464 Timeout: 2 * time.Second, 1465 Path: "/foo/bar", 1466 } 1467 1468 err = check2.validate() 1469 if err != nil { 1470 t.Fatalf("err: %v", err) 1471 } 1472 1473 check2.Path = "" 1474 err = check2.validate() 1475 if err == nil { 1476 t.Fatal("Expected an error") 1477 } 1478 if !strings.Contains(err.Error(), "valid http path") { 1479 t.Fatalf("err: %v", err) 1480 } 1481 1482 check2.Path = "http://www.example.com" 1483 err = check2.validate() 1484 if err == nil { 1485 t.Fatal("Expected an error") 1486 } 1487 if !strings.Contains(err.Error(), "relative http path") { 1488 t.Fatalf("err: %v", err) 1489 } 1490 1491 t.Run("check expose", func(t *testing.T) { 1492 t.Run("type http", func(t *testing.T) { 1493 require.NoError(t, (&ServiceCheck{ 1494 Type: ServiceCheckHTTP, 1495 Interval: 1 * time.Second, 1496 Timeout: 1 * time.Second, 1497 Path: "/health", 1498 Expose: true, 1499 }).validate()) 1500 }) 1501 t.Run("type tcp", func(t *testing.T) { 1502 require.EqualError(t, (&ServiceCheck{ 1503 Type: ServiceCheckTCP, 1504 Interval: 1 * time.Second, 1505 Timeout: 1 * time.Second, 1506 Expose: true, 1507 }).validate(), "expose may only be set on HTTP or gRPC checks") 1508 }) 1509 }) 1510 } 1511 1512 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1513 // inherit address mode but do inherit ports. 1514 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1515 getTask := func(s *Service) *Task { 1516 return &Task{ 1517 Resources: &Resources{ 1518 Networks: []*NetworkResource{ 1519 { 1520 DynamicPorts: []Port{ 1521 { 1522 Label: "http", 1523 Value: 9999, 1524 }, 1525 }, 1526 }, 1527 }, 1528 }, 1529 Services: []*Service{s}, 1530 } 1531 } 1532 1533 cases := []struct { 1534 Service *Service 1535 ErrContains string 1536 }{ 1537 { 1538 Service: &Service{ 1539 Name: "invalid-driver", 1540 PortLabel: "80", 1541 AddressMode: "host", 1542 }, 1543 ErrContains: `port label "80" referenced`, 1544 }, 1545 { 1546 Service: &Service{ 1547 Name: "http-driver-fail-1", 1548 PortLabel: "80", 1549 AddressMode: "driver", 1550 Checks: []*ServiceCheck{ 1551 { 1552 Name: "invalid-check-1", 1553 Type: "tcp", 1554 Interval: time.Second, 1555 Timeout: time.Second, 1556 }, 1557 }, 1558 }, 1559 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1560 }, 1561 { 1562 Service: &Service{ 1563 Name: "http-driver-fail-2", 1564 PortLabel: "80", 1565 AddressMode: "driver", 1566 Checks: []*ServiceCheck{ 1567 { 1568 Name: "invalid-check-2", 1569 Type: "tcp", 1570 PortLabel: "80", 1571 Interval: time.Second, 1572 Timeout: time.Second, 1573 }, 1574 }, 1575 }, 1576 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1577 }, 1578 { 1579 Service: &Service{ 1580 Name: "http-driver-fail-3", 1581 PortLabel: "80", 1582 AddressMode: "driver", 1583 Checks: []*ServiceCheck{ 1584 { 1585 Name: "invalid-check-3", 1586 Type: "tcp", 1587 PortLabel: "missing-port-label", 1588 Interval: time.Second, 1589 Timeout: time.Second, 1590 }, 1591 }, 1592 }, 1593 ErrContains: `port label "missing-port-label" referenced`, 1594 }, 1595 { 1596 Service: &Service{ 1597 Name: "http-driver-passes", 1598 PortLabel: "80", 1599 AddressMode: "driver", 1600 Checks: []*ServiceCheck{ 1601 { 1602 Name: "valid-script-check", 1603 Type: "script", 1604 Command: "ok", 1605 Interval: time.Second, 1606 Timeout: time.Second, 1607 }, 1608 { 1609 Name: "valid-host-check", 1610 Type: "tcp", 1611 PortLabel: "http", 1612 Interval: time.Second, 1613 Timeout: time.Second, 1614 }, 1615 { 1616 Name: "valid-driver-check", 1617 Type: "tcp", 1618 AddressMode: "driver", 1619 Interval: time.Second, 1620 Timeout: time.Second, 1621 }, 1622 }, 1623 }, 1624 }, 1625 { 1626 Service: &Service{ 1627 Name: "empty-address-3673-passes-1", 1628 Checks: []*ServiceCheck{ 1629 { 1630 Name: "valid-port-label", 1631 Type: "tcp", 1632 PortLabel: "http", 1633 Interval: time.Second, 1634 Timeout: time.Second, 1635 }, 1636 { 1637 Name: "empty-is-ok", 1638 Type: "script", 1639 Command: "ok", 1640 Interval: time.Second, 1641 Timeout: time.Second, 1642 }, 1643 }, 1644 }, 1645 }, 1646 { 1647 Service: &Service{ 1648 Name: "empty-address-3673-passes-2", 1649 }, 1650 }, 1651 { 1652 Service: &Service{ 1653 Name: "empty-address-3673-fails", 1654 Checks: []*ServiceCheck{ 1655 { 1656 Name: "empty-is-not-ok", 1657 Type: "tcp", 1658 Interval: time.Second, 1659 Timeout: time.Second, 1660 }, 1661 }, 1662 }, 1663 ErrContains: `invalid: check requires a port but neither check nor service`, 1664 }, 1665 } 1666 1667 for _, tc := range cases { 1668 tc := tc 1669 task := getTask(tc.Service) 1670 t.Run(tc.Service.Name, func(t *testing.T) { 1671 err := validateServices(task) 1672 if err == nil && tc.ErrContains == "" { 1673 // Ok! 1674 return 1675 } 1676 if err == nil { 1677 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1678 } 1679 if !strings.Contains(err.Error(), tc.ErrContains) { 1680 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1681 } 1682 }) 1683 } 1684 } 1685 1686 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 1687 t.Parallel() 1688 // Bad (no port) 1689 invalidGRPC := &ServiceCheck{ 1690 Type: ServiceCheckGRPC, 1691 Interval: time.Second, 1692 Timeout: time.Second, 1693 } 1694 service := &Service{ 1695 Name: "test", 1696 Checks: []*ServiceCheck{invalidGRPC}, 1697 } 1698 1699 assert.Error(t, service.Validate()) 1700 1701 // Good 1702 service.Checks[0] = &ServiceCheck{ 1703 Type: ServiceCheckGRPC, 1704 Interval: time.Second, 1705 Timeout: time.Second, 1706 PortLabel: "some-port-label", 1707 } 1708 1709 assert.NoError(t, service.Validate()) 1710 } 1711 1712 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1713 t.Parallel() 1714 invalidCheckRestart := &CheckRestart{ 1715 Limit: -1, 1716 Grace: -1, 1717 } 1718 1719 err := invalidCheckRestart.Validate() 1720 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1721 assert.Len(t, err.(*multierror.Error).Errors, 2) 1722 1723 validCheckRestart := &CheckRestart{} 1724 assert.Nil(t, validCheckRestart.Validate()) 1725 1726 validCheckRestart.Limit = 1 1727 validCheckRestart.Grace = 1 1728 assert.Nil(t, validCheckRestart.Validate()) 1729 } 1730 1731 func TestTask_Validate_ConnectProxyKind(t *testing.T) { 1732 ephemeralDisk := DefaultEphemeralDisk() 1733 getTask := func(kind TaskKind, leader bool) *Task { 1734 task := &Task{ 1735 Name: "web", 1736 Driver: "docker", 1737 Resources: DefaultResources(), 1738 LogConfig: DefaultLogConfig(), 1739 Kind: kind, 1740 Leader: leader, 1741 } 1742 task.Resources.Networks = []*NetworkResource{ 1743 { 1744 MBits: 10, 1745 DynamicPorts: []Port{ 1746 { 1747 Label: "http", 1748 Value: 80, 1749 }, 1750 }, 1751 }, 1752 } 1753 return task 1754 } 1755 1756 cases := []struct { 1757 Desc string 1758 Kind TaskKind 1759 Leader bool 1760 Service *Service 1761 TgService []*Service 1762 ErrContains string 1763 }{ 1764 { 1765 Desc: "Not connect", 1766 Kind: "test", 1767 }, 1768 { 1769 Desc: "Invalid because of service in task definition", 1770 Kind: "connect-proxy:redis", 1771 Service: &Service{ 1772 Name: "redis", 1773 }, 1774 ErrContains: "Connect proxy task must not have a service stanza", 1775 }, 1776 { 1777 Desc: "Leader should not be set", 1778 Kind: "connect-proxy:redis", 1779 Leader: true, 1780 Service: &Service{ 1781 Name: "redis", 1782 }, 1783 ErrContains: "Connect proxy task must not have leader set", 1784 }, 1785 { 1786 Desc: "Service name invalid", 1787 Kind: "connect-proxy:redis:test", 1788 Service: &Service{ 1789 Name: "redis", 1790 }, 1791 ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`, 1792 }, 1793 { 1794 Desc: "Service name not found in group", 1795 Kind: "connect-proxy:redis", 1796 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1797 }, 1798 { 1799 Desc: "Connect stanza not configured in group", 1800 Kind: "connect-proxy:redis", 1801 TgService: []*Service{{ 1802 Name: "redis", 1803 }}, 1804 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1805 }, 1806 { 1807 Desc: "Valid connect proxy kind", 1808 Kind: "connect-proxy:redis", 1809 TgService: []*Service{{ 1810 Name: "redis", 1811 Connect: &ConsulConnect{ 1812 SidecarService: &ConsulSidecarService{ 1813 Port: "db", 1814 }, 1815 }, 1816 }}, 1817 }, 1818 } 1819 1820 for _, tc := range cases { 1821 tc := tc 1822 task := getTask(tc.Kind, tc.Leader) 1823 if tc.Service != nil { 1824 task.Services = []*Service{tc.Service} 1825 } 1826 t.Run(tc.Desc, func(t *testing.T) { 1827 err := task.Validate(ephemeralDisk, "service", tc.TgService) 1828 if err == nil && tc.ErrContains == "" { 1829 // Ok! 1830 return 1831 } 1832 require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains) 1833 require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err) 1834 }) 1835 } 1836 1837 } 1838 func TestTask_Validate_LogConfig(t *testing.T) { 1839 task := &Task{ 1840 LogConfig: DefaultLogConfig(), 1841 } 1842 ephemeralDisk := &EphemeralDisk{ 1843 SizeMB: 1, 1844 } 1845 1846 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1847 mErr := err.(*multierror.Error) 1848 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1849 t.Fatalf("err: %s", err) 1850 } 1851 } 1852 1853 func TestTask_Validate_CSIPluginConfig(t *testing.T) { 1854 table := []struct { 1855 name string 1856 pc *TaskCSIPluginConfig 1857 expectedErr string 1858 }{ 1859 { 1860 name: "no errors when not specified", 1861 pc: nil, 1862 }, 1863 { 1864 name: "requires non-empty plugin id", 1865 pc: &TaskCSIPluginConfig{}, 1866 expectedErr: "CSIPluginConfig must have a non-empty PluginID", 1867 }, 1868 { 1869 name: "requires valid plugin type", 1870 pc: &TaskCSIPluginConfig{ 1871 ID: "com.hashicorp.csi", 1872 Type: "nonsense", 1873 }, 1874 expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"", 1875 }, 1876 } 1877 1878 for _, tt := range table { 1879 t.Run(tt.name, func(t *testing.T) { 1880 task := &Task{ 1881 CSIPluginConfig: tt.pc, 1882 } 1883 ephemeralDisk := &EphemeralDisk{ 1884 SizeMB: 1, 1885 } 1886 1887 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1888 mErr := err.(*multierror.Error) 1889 if tt.expectedErr != "" { 1890 if !strings.Contains(mErr.Errors[4].Error(), tt.expectedErr) { 1891 t.Fatalf("err: %s", err) 1892 } 1893 } else { 1894 if len(mErr.Errors) != 4 { 1895 t.Fatalf("unexpected err: %s", mErr.Errors[4]) 1896 } 1897 } 1898 }) 1899 } 1900 } 1901 1902 func TestTask_Validate_Template(t *testing.T) { 1903 1904 bad := &Template{} 1905 task := &Task{ 1906 Templates: []*Template{bad}, 1907 } 1908 ephemeralDisk := &EphemeralDisk{ 1909 SizeMB: 1, 1910 } 1911 1912 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1913 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1914 t.Fatalf("err: %s", err) 1915 } 1916 1917 // Have two templates that share the same destination 1918 good := &Template{ 1919 SourcePath: "foo", 1920 DestPath: "local/foo", 1921 ChangeMode: "noop", 1922 } 1923 1924 task.Templates = []*Template{good, good} 1925 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1926 if !strings.Contains(err.Error(), "same destination as") { 1927 t.Fatalf("err: %s", err) 1928 } 1929 1930 // Env templates can't use signals 1931 task.Templates = []*Template{ 1932 { 1933 Envvars: true, 1934 ChangeMode: "signal", 1935 }, 1936 } 1937 1938 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1939 if err == nil { 1940 t.Fatalf("expected error from Template.Validate") 1941 } 1942 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1943 t.Errorf("expected to find %q but found %v", expected, err) 1944 } 1945 } 1946 1947 func TestTemplate_Validate(t *testing.T) { 1948 cases := []struct { 1949 Tmpl *Template 1950 Fail bool 1951 ContainsErrs []string 1952 }{ 1953 { 1954 Tmpl: &Template{}, 1955 Fail: true, 1956 ContainsErrs: []string{ 1957 "specify a source path", 1958 "specify a destination", 1959 TemplateChangeModeInvalidError.Error(), 1960 }, 1961 }, 1962 { 1963 Tmpl: &Template{ 1964 Splay: -100, 1965 }, 1966 Fail: true, 1967 ContainsErrs: []string{ 1968 "positive splay", 1969 }, 1970 }, 1971 { 1972 Tmpl: &Template{ 1973 ChangeMode: "foo", 1974 }, 1975 Fail: true, 1976 ContainsErrs: []string{ 1977 TemplateChangeModeInvalidError.Error(), 1978 }, 1979 }, 1980 { 1981 Tmpl: &Template{ 1982 ChangeMode: "signal", 1983 }, 1984 Fail: true, 1985 ContainsErrs: []string{ 1986 "specify signal value", 1987 }, 1988 }, 1989 { 1990 Tmpl: &Template{ 1991 SourcePath: "foo", 1992 DestPath: "../../root", 1993 ChangeMode: "noop", 1994 }, 1995 Fail: true, 1996 ContainsErrs: []string{ 1997 "destination escapes", 1998 }, 1999 }, 2000 { 2001 Tmpl: &Template{ 2002 SourcePath: "foo", 2003 DestPath: "local/foo", 2004 ChangeMode: "noop", 2005 }, 2006 Fail: false, 2007 }, 2008 { 2009 Tmpl: &Template{ 2010 SourcePath: "foo", 2011 DestPath: "local/foo", 2012 ChangeMode: "noop", 2013 Perms: "0444", 2014 }, 2015 Fail: false, 2016 }, 2017 { 2018 Tmpl: &Template{ 2019 SourcePath: "foo", 2020 DestPath: "local/foo", 2021 ChangeMode: "noop", 2022 Perms: "zza", 2023 }, 2024 Fail: true, 2025 ContainsErrs: []string{ 2026 "as octal", 2027 }, 2028 }, 2029 } 2030 2031 for i, c := range cases { 2032 err := c.Tmpl.Validate() 2033 if err != nil { 2034 if !c.Fail { 2035 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 2036 } 2037 2038 e := err.Error() 2039 for _, exp := range c.ContainsErrs { 2040 if !strings.Contains(e, exp) { 2041 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 2042 } 2043 } 2044 } else if c.Fail { 2045 t.Fatalf("Case %d: should have failed: %v", i+1, err) 2046 } 2047 } 2048 } 2049 2050 func TestConstraint_Validate(t *testing.T) { 2051 c := &Constraint{} 2052 err := c.Validate() 2053 mErr := err.(*multierror.Error) 2054 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 2055 t.Fatalf("err: %s", err) 2056 } 2057 2058 c = &Constraint{ 2059 LTarget: "$attr.kernel.name", 2060 RTarget: "linux", 2061 Operand: "=", 2062 } 2063 err = c.Validate() 2064 require.NoError(t, err) 2065 2066 // Perform additional regexp validation 2067 c.Operand = ConstraintRegex 2068 c.RTarget = "(foo" 2069 err = c.Validate() 2070 mErr = err.(*multierror.Error) 2071 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 2072 t.Fatalf("err: %s", err) 2073 } 2074 2075 // Perform version validation 2076 c.Operand = ConstraintVersion 2077 c.RTarget = "~> foo" 2078 err = c.Validate() 2079 mErr = err.(*multierror.Error) 2080 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 2081 t.Fatalf("err: %s", err) 2082 } 2083 2084 // Perform semver validation 2085 c.Operand = ConstraintSemver 2086 err = c.Validate() 2087 require.Error(t, err) 2088 require.Contains(t, err.Error(), "Malformed constraint") 2089 2090 c.RTarget = ">= 0.6.1" 2091 require.NoError(t, c.Validate()) 2092 2093 // Perform distinct_property validation 2094 c.Operand = ConstraintDistinctProperty 2095 c.RTarget = "0" 2096 err = c.Validate() 2097 mErr = err.(*multierror.Error) 2098 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 2099 t.Fatalf("err: %s", err) 2100 } 2101 2102 c.RTarget = "-1" 2103 err = c.Validate() 2104 mErr = err.(*multierror.Error) 2105 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 2106 t.Fatalf("err: %s", err) 2107 } 2108 2109 // Perform distinct_hosts validation 2110 c.Operand = ConstraintDistinctHosts 2111 c.LTarget = "" 2112 c.RTarget = "" 2113 if err := c.Validate(); err != nil { 2114 t.Fatalf("expected valid constraint: %v", err) 2115 } 2116 2117 // Perform set_contains* validation 2118 c.RTarget = "" 2119 for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} { 2120 c.Operand = o 2121 err = c.Validate() 2122 mErr = err.(*multierror.Error) 2123 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 2124 t.Fatalf("err: %s", err) 2125 } 2126 } 2127 2128 // Perform LTarget validation 2129 c.Operand = ConstraintRegex 2130 c.RTarget = "foo" 2131 c.LTarget = "" 2132 err = c.Validate() 2133 mErr = err.(*multierror.Error) 2134 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 2135 t.Fatalf("err: %s", err) 2136 } 2137 2138 // Perform constraint type validation 2139 c.Operand = "foo" 2140 err = c.Validate() 2141 mErr = err.(*multierror.Error) 2142 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 2143 t.Fatalf("err: %s", err) 2144 } 2145 } 2146 2147 func TestAffinity_Validate(t *testing.T) { 2148 2149 type tc struct { 2150 affinity *Affinity 2151 err error 2152 name string 2153 } 2154 2155 testCases := []tc{ 2156 { 2157 affinity: &Affinity{}, 2158 err: fmt.Errorf("Missing affinity operand"), 2159 }, 2160 { 2161 affinity: &Affinity{ 2162 Operand: "foo", 2163 LTarget: "${meta.node_class}", 2164 Weight: 10, 2165 }, 2166 err: fmt.Errorf("Unknown affinity operator \"foo\""), 2167 }, 2168 { 2169 affinity: &Affinity{ 2170 Operand: "=", 2171 LTarget: "${meta.node_class}", 2172 Weight: 10, 2173 }, 2174 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2175 }, 2176 { 2177 affinity: &Affinity{ 2178 Operand: "=", 2179 LTarget: "${meta.node_class}", 2180 RTarget: "c4", 2181 Weight: 0, 2182 }, 2183 err: fmt.Errorf("Affinity weight cannot be zero"), 2184 }, 2185 { 2186 affinity: &Affinity{ 2187 Operand: "=", 2188 LTarget: "${meta.node_class}", 2189 RTarget: "c4", 2190 Weight: 110, 2191 }, 2192 err: fmt.Errorf("Affinity weight must be within the range [-100,100]"), 2193 }, 2194 { 2195 affinity: &Affinity{ 2196 Operand: "=", 2197 LTarget: "${node.class}", 2198 Weight: 10, 2199 }, 2200 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2201 }, 2202 { 2203 affinity: &Affinity{ 2204 Operand: "version", 2205 LTarget: "${meta.os}", 2206 RTarget: ">>2.0", 2207 Weight: 110, 2208 }, 2209 err: fmt.Errorf("Version affinity is invalid"), 2210 }, 2211 { 2212 affinity: &Affinity{ 2213 Operand: "regexp", 2214 LTarget: "${meta.os}", 2215 RTarget: "\\K2.0", 2216 Weight: 100, 2217 }, 2218 err: fmt.Errorf("Regular expression failed to compile"), 2219 }, 2220 } 2221 2222 for _, tc := range testCases { 2223 t.Run(tc.name, func(t *testing.T) { 2224 err := tc.affinity.Validate() 2225 if tc.err != nil { 2226 require.NotNil(t, err) 2227 require.Contains(t, err.Error(), tc.err.Error()) 2228 } else { 2229 require.Nil(t, err) 2230 } 2231 }) 2232 } 2233 } 2234 2235 func TestUpdateStrategy_Validate(t *testing.T) { 2236 u := &UpdateStrategy{ 2237 MaxParallel: -1, 2238 HealthCheck: "foo", 2239 MinHealthyTime: -10, 2240 HealthyDeadline: -15, 2241 ProgressDeadline: -25, 2242 AutoRevert: false, 2243 Canary: -1, 2244 } 2245 2246 err := u.Validate() 2247 mErr := err.(*multierror.Error) 2248 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 2249 t.Fatalf("err: %s", err) 2250 } 2251 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than zero") { 2252 t.Fatalf("err: %s", err) 2253 } 2254 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 2255 t.Fatalf("err: %s", err) 2256 } 2257 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 2258 t.Fatalf("err: %s", err) 2259 } 2260 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 2261 t.Fatalf("err: %s", err) 2262 } 2263 if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") { 2264 t.Fatalf("err: %s", err) 2265 } 2266 if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") { 2267 t.Fatalf("err: %s", err) 2268 } 2269 if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") { 2270 t.Fatalf("err: %s", err) 2271 } 2272 } 2273 2274 func TestResource_NetIndex(t *testing.T) { 2275 r := &Resources{ 2276 Networks: []*NetworkResource{ 2277 {Device: "eth0"}, 2278 {Device: "lo0"}, 2279 {Device: ""}, 2280 }, 2281 } 2282 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 2283 t.Fatalf("Bad: %d", idx) 2284 } 2285 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 2286 t.Fatalf("Bad: %d", idx) 2287 } 2288 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 2289 t.Fatalf("Bad: %d", idx) 2290 } 2291 } 2292 2293 func TestResource_Superset(t *testing.T) { 2294 r1 := &Resources{ 2295 CPU: 2000, 2296 MemoryMB: 2048, 2297 DiskMB: 10000, 2298 } 2299 r2 := &Resources{ 2300 CPU: 2000, 2301 MemoryMB: 1024, 2302 DiskMB: 5000, 2303 } 2304 2305 if s, _ := r1.Superset(r1); !s { 2306 t.Fatalf("bad") 2307 } 2308 if s, _ := r1.Superset(r2); !s { 2309 t.Fatalf("bad") 2310 } 2311 if s, _ := r2.Superset(r1); s { 2312 t.Fatalf("bad") 2313 } 2314 if s, _ := r2.Superset(r2); !s { 2315 t.Fatalf("bad") 2316 } 2317 } 2318 2319 func TestResource_Add(t *testing.T) { 2320 r1 := &Resources{ 2321 CPU: 2000, 2322 MemoryMB: 2048, 2323 DiskMB: 10000, 2324 Networks: []*NetworkResource{ 2325 { 2326 CIDR: "10.0.0.0/8", 2327 MBits: 100, 2328 ReservedPorts: []Port{{"ssh", 22, 0}}, 2329 }, 2330 }, 2331 } 2332 r2 := &Resources{ 2333 CPU: 2000, 2334 MemoryMB: 1024, 2335 DiskMB: 5000, 2336 Networks: []*NetworkResource{ 2337 { 2338 IP: "10.0.0.1", 2339 MBits: 50, 2340 ReservedPorts: []Port{{"web", 80, 0}}, 2341 }, 2342 }, 2343 } 2344 2345 err := r1.Add(r2) 2346 if err != nil { 2347 t.Fatalf("Err: %v", err) 2348 } 2349 2350 expect := &Resources{ 2351 CPU: 3000, 2352 MemoryMB: 3072, 2353 DiskMB: 15000, 2354 Networks: []*NetworkResource{ 2355 { 2356 CIDR: "10.0.0.0/8", 2357 MBits: 150, 2358 ReservedPorts: []Port{{"ssh", 22, 0}, {"web", 80, 0}}, 2359 }, 2360 }, 2361 } 2362 2363 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2364 t.Fatalf("bad: %#v %#v", expect, r1) 2365 } 2366 } 2367 2368 func TestResource_Add_Network(t *testing.T) { 2369 r1 := &Resources{} 2370 r2 := &Resources{ 2371 Networks: []*NetworkResource{ 2372 { 2373 MBits: 50, 2374 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}}, 2375 }, 2376 }, 2377 } 2378 r3 := &Resources{ 2379 Networks: []*NetworkResource{ 2380 { 2381 MBits: 25, 2382 DynamicPorts: []Port{{"admin", 0, 8080}}, 2383 }, 2384 }, 2385 } 2386 2387 err := r1.Add(r2) 2388 if err != nil { 2389 t.Fatalf("Err: %v", err) 2390 } 2391 err = r1.Add(r3) 2392 if err != nil { 2393 t.Fatalf("Err: %v", err) 2394 } 2395 2396 expect := &Resources{ 2397 Networks: []*NetworkResource{ 2398 { 2399 MBits: 75, 2400 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}}, 2401 }, 2402 }, 2403 } 2404 2405 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2406 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 2407 } 2408 } 2409 2410 func TestComparableResources_Subtract(t *testing.T) { 2411 r1 := &ComparableResources{ 2412 Flattened: AllocatedTaskResources{ 2413 Cpu: AllocatedCpuResources{ 2414 CpuShares: 2000, 2415 }, 2416 Memory: AllocatedMemoryResources{ 2417 MemoryMB: 2048, 2418 }, 2419 Networks: []*NetworkResource{ 2420 { 2421 CIDR: "10.0.0.0/8", 2422 MBits: 100, 2423 ReservedPorts: []Port{{"ssh", 22, 0}}, 2424 }, 2425 }, 2426 }, 2427 Shared: AllocatedSharedResources{ 2428 DiskMB: 10000, 2429 }, 2430 } 2431 2432 r2 := &ComparableResources{ 2433 Flattened: AllocatedTaskResources{ 2434 Cpu: AllocatedCpuResources{ 2435 CpuShares: 1000, 2436 }, 2437 Memory: AllocatedMemoryResources{ 2438 MemoryMB: 1024, 2439 }, 2440 Networks: []*NetworkResource{ 2441 { 2442 CIDR: "10.0.0.0/8", 2443 MBits: 20, 2444 ReservedPorts: []Port{{"ssh", 22, 0}}, 2445 }, 2446 }, 2447 }, 2448 Shared: AllocatedSharedResources{ 2449 DiskMB: 5000, 2450 }, 2451 } 2452 r1.Subtract(r2) 2453 2454 expect := &ComparableResources{ 2455 Flattened: AllocatedTaskResources{ 2456 Cpu: AllocatedCpuResources{ 2457 CpuShares: 1000, 2458 }, 2459 Memory: AllocatedMemoryResources{ 2460 MemoryMB: 1024, 2461 }, 2462 Networks: []*NetworkResource{ 2463 { 2464 CIDR: "10.0.0.0/8", 2465 MBits: 100, 2466 ReservedPorts: []Port{{"ssh", 22, 0}}, 2467 }, 2468 }, 2469 }, 2470 Shared: AllocatedSharedResources{ 2471 DiskMB: 5000, 2472 }, 2473 } 2474 2475 require := require.New(t) 2476 require.Equal(expect, r1) 2477 } 2478 2479 func TestEncodeDecode(t *testing.T) { 2480 type FooRequest struct { 2481 Foo string 2482 Bar int 2483 Baz bool 2484 } 2485 arg := &FooRequest{ 2486 Foo: "test", 2487 Bar: 42, 2488 Baz: true, 2489 } 2490 buf, err := Encode(1, arg) 2491 if err != nil { 2492 t.Fatalf("err: %v", err) 2493 } 2494 2495 var out FooRequest 2496 err = Decode(buf[1:], &out) 2497 if err != nil { 2498 t.Fatalf("err: %v", err) 2499 } 2500 2501 if !reflect.DeepEqual(arg, &out) { 2502 t.Fatalf("bad: %#v %#v", arg, out) 2503 } 2504 } 2505 2506 func BenchmarkEncodeDecode(b *testing.B) { 2507 job := testJob() 2508 2509 for i := 0; i < b.N; i++ { 2510 buf, err := Encode(1, job) 2511 if err != nil { 2512 b.Fatalf("err: %v", err) 2513 } 2514 2515 var out Job 2516 err = Decode(buf[1:], &out) 2517 if err != nil { 2518 b.Fatalf("err: %v", err) 2519 } 2520 } 2521 } 2522 2523 func TestInvalidServiceCheck(t *testing.T) { 2524 s := Service{ 2525 Name: "service-name", 2526 PortLabel: "bar", 2527 Checks: []*ServiceCheck{ 2528 { 2529 Name: "check-name", 2530 Type: "lol", 2531 }, 2532 }, 2533 } 2534 if err := s.Validate(); err == nil { 2535 t.Fatalf("Service should be invalid (invalid type)") 2536 } 2537 2538 s = Service{ 2539 Name: "service.name", 2540 PortLabel: "bar", 2541 } 2542 if err := s.ValidateName(s.Name); err == nil { 2543 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2544 } 2545 2546 s = Service{ 2547 Name: "-my-service", 2548 PortLabel: "bar", 2549 } 2550 if err := s.Validate(); err == nil { 2551 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2552 } 2553 2554 s = Service{ 2555 Name: "my-service-${NOMAD_META_FOO}", 2556 PortLabel: "bar", 2557 } 2558 if err := s.Validate(); err != nil { 2559 t.Fatalf("Service should be valid: %v", err) 2560 } 2561 2562 s = Service{ 2563 Name: "my_service-${NOMAD_META_FOO}", 2564 PortLabel: "bar", 2565 } 2566 if err := s.Validate(); err == nil { 2567 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2568 } 2569 2570 s = Service{ 2571 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2572 PortLabel: "bar", 2573 } 2574 if err := s.ValidateName(s.Name); err == nil { 2575 t.Fatalf("Service should be invalid (too long): %v", err) 2576 } 2577 2578 s = Service{ 2579 Name: "service-name", 2580 Checks: []*ServiceCheck{ 2581 { 2582 Name: "check-tcp", 2583 Type: ServiceCheckTCP, 2584 Interval: 5 * time.Second, 2585 Timeout: 2 * time.Second, 2586 }, 2587 { 2588 Name: "check-http", 2589 Type: ServiceCheckHTTP, 2590 Path: "/foo", 2591 Interval: 5 * time.Second, 2592 Timeout: 2 * time.Second, 2593 }, 2594 }, 2595 } 2596 if err := s.Validate(); err == nil { 2597 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2598 } 2599 2600 s = Service{ 2601 Name: "service-name", 2602 Checks: []*ServiceCheck{ 2603 { 2604 Name: "check-script", 2605 Type: ServiceCheckScript, 2606 Command: "/bin/date", 2607 Interval: 5 * time.Second, 2608 Timeout: 2 * time.Second, 2609 }, 2610 }, 2611 } 2612 if err := s.Validate(); err != nil { 2613 t.Fatalf("un-expected error: %v", err) 2614 } 2615 2616 s = Service{ 2617 Name: "service-name", 2618 Checks: []*ServiceCheck{ 2619 { 2620 Name: "tcp-check", 2621 Type: ServiceCheckTCP, 2622 Interval: 5 * time.Second, 2623 Timeout: 2 * time.Second, 2624 }, 2625 }, 2626 Connect: &ConsulConnect{ 2627 SidecarService: &ConsulSidecarService{}, 2628 }, 2629 } 2630 require.Error(t, s.Validate()) 2631 } 2632 2633 func TestDistinctCheckID(t *testing.T) { 2634 c1 := ServiceCheck{ 2635 Name: "web-health", 2636 Type: "http", 2637 Path: "/health", 2638 Interval: 2 * time.Second, 2639 Timeout: 3 * time.Second, 2640 } 2641 c2 := ServiceCheck{ 2642 Name: "web-health", 2643 Type: "http", 2644 Path: "/health1", 2645 Interval: 2 * time.Second, 2646 Timeout: 3 * time.Second, 2647 } 2648 2649 c3 := ServiceCheck{ 2650 Name: "web-health", 2651 Type: "http", 2652 Path: "/health", 2653 Interval: 4 * time.Second, 2654 Timeout: 3 * time.Second, 2655 } 2656 serviceID := "123" 2657 c1Hash := c1.Hash(serviceID) 2658 c2Hash := c2.Hash(serviceID) 2659 c3Hash := c3.Hash(serviceID) 2660 2661 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2662 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2663 } 2664 2665 } 2666 2667 func TestService_Canonicalize(t *testing.T) { 2668 job := "example" 2669 taskGroup := "cache" 2670 task := "redis" 2671 2672 s := Service{ 2673 Name: "${TASK}-db", 2674 } 2675 2676 s.Canonicalize(job, taskGroup, task) 2677 if s.Name != "redis-db" { 2678 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2679 } 2680 2681 s.Name = "db" 2682 s.Canonicalize(job, taskGroup, task) 2683 if s.Name != "db" { 2684 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2685 } 2686 2687 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2688 s.Canonicalize(job, taskGroup, task) 2689 if s.Name != "example-cache-redis-db" { 2690 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2691 } 2692 2693 s.Name = "${BASE}-db" 2694 s.Canonicalize(job, taskGroup, task) 2695 if s.Name != "example-cache-redis-db" { 2696 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2697 } 2698 2699 } 2700 2701 func TestService_Validate(t *testing.T) { 2702 s := Service{ 2703 Name: "testservice", 2704 } 2705 2706 s.Canonicalize("testjob", "testgroup", "testtask") 2707 2708 // Base service should be valid 2709 require.NoError(t, s.Validate()) 2710 2711 // Native Connect should be valid 2712 s.Connect = &ConsulConnect{ 2713 Native: true, 2714 } 2715 require.NoError(t, s.Validate()) 2716 2717 // Native Connect + Sidecar should be invalid 2718 s.Connect.SidecarService = &ConsulSidecarService{} 2719 require.Error(t, s.Validate()) 2720 } 2721 2722 func TestService_Equals(t *testing.T) { 2723 s := Service{ 2724 Name: "testservice", 2725 } 2726 2727 s.Canonicalize("testjob", "testgroup", "testtask") 2728 2729 o := s.Copy() 2730 2731 // Base service should be equal to copy of itself 2732 require.True(t, s.Equals(o)) 2733 2734 // create a helper to assert a diff and reset the struct 2735 assertDiff := func() { 2736 require.False(t, s.Equals(o)) 2737 o = s.Copy() 2738 require.True(t, s.Equals(o), "bug in copy") 2739 } 2740 2741 // Changing any field should cause inequality 2742 o.Name = "diff" 2743 assertDiff() 2744 2745 o.PortLabel = "diff" 2746 assertDiff() 2747 2748 o.AddressMode = AddressModeDriver 2749 assertDiff() 2750 2751 o.Tags = []string{"diff"} 2752 assertDiff() 2753 2754 o.CanaryTags = []string{"diff"} 2755 assertDiff() 2756 2757 o.Checks = []*ServiceCheck{{Name: "diff"}} 2758 assertDiff() 2759 2760 o.Connect = &ConsulConnect{Native: true} 2761 assertDiff() 2762 2763 o.EnableTagOverride = true 2764 assertDiff() 2765 } 2766 2767 func TestJob_ExpandServiceNames(t *testing.T) { 2768 j := &Job{ 2769 Name: "my-job", 2770 TaskGroups: []*TaskGroup{ 2771 { 2772 Name: "web", 2773 Tasks: []*Task{ 2774 { 2775 Name: "frontend", 2776 Services: []*Service{ 2777 { 2778 Name: "${BASE}-default", 2779 }, 2780 { 2781 Name: "jmx", 2782 }, 2783 }, 2784 }, 2785 }, 2786 }, 2787 { 2788 Name: "admin", 2789 Tasks: []*Task{ 2790 { 2791 Name: "admin-web", 2792 }, 2793 }, 2794 }, 2795 }, 2796 } 2797 2798 j.Canonicalize() 2799 2800 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 2801 if service1Name != "my-job-web-frontend-default" { 2802 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 2803 } 2804 2805 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 2806 if service2Name != "jmx" { 2807 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 2808 } 2809 2810 } 2811 2812 func TestJob_CombinedTaskMeta(t *testing.T) { 2813 j := &Job{ 2814 Meta: map[string]string{ 2815 "job_test": "job", 2816 "group_test": "job", 2817 "task_test": "job", 2818 }, 2819 TaskGroups: []*TaskGroup{ 2820 { 2821 Name: "group", 2822 Meta: map[string]string{ 2823 "group_test": "group", 2824 "task_test": "group", 2825 }, 2826 Tasks: []*Task{ 2827 { 2828 Name: "task", 2829 Meta: map[string]string{ 2830 "task_test": "task", 2831 }, 2832 }, 2833 }, 2834 }, 2835 }, 2836 } 2837 2838 require := require.New(t) 2839 require.EqualValues(map[string]string{ 2840 "job_test": "job", 2841 "group_test": "group", 2842 "task_test": "task", 2843 }, j.CombinedTaskMeta("group", "task")) 2844 require.EqualValues(map[string]string{ 2845 "job_test": "job", 2846 "group_test": "group", 2847 "task_test": "group", 2848 }, j.CombinedTaskMeta("group", "")) 2849 require.EqualValues(map[string]string{ 2850 "job_test": "job", 2851 "group_test": "job", 2852 "task_test": "job", 2853 }, j.CombinedTaskMeta("", "task")) 2854 2855 } 2856 2857 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 2858 // Create a config that is enabled but with no interval specified. 2859 p := &PeriodicConfig{Enabled: true} 2860 if err := p.Validate(); err == nil { 2861 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2862 } 2863 2864 // Create a config that is enabled, with a spec but no type specified. 2865 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2866 if err := p.Validate(); err == nil { 2867 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2868 } 2869 2870 // Create a config that is enabled, with a spec type but no spec specified. 2871 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2872 if err := p.Validate(); err == nil { 2873 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2874 } 2875 2876 // Create a config that is enabled, with a bad time zone. 2877 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2878 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2879 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2880 } 2881 } 2882 2883 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2884 specs := []string{"foo", "* *", "@foo"} 2885 for _, spec := range specs { 2886 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2887 p.Canonicalize() 2888 if err := p.Validate(); err == nil { 2889 t.Fatal("Invalid cron spec") 2890 } 2891 } 2892 } 2893 2894 func TestPeriodicConfig_ValidCron(t *testing.T) { 2895 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2896 for _, spec := range specs { 2897 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2898 p.Canonicalize() 2899 if err := p.Validate(); err != nil { 2900 t.Fatal("Passed valid cron") 2901 } 2902 } 2903 } 2904 2905 func TestPeriodicConfig_NextCron(t *testing.T) { 2906 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2907 2908 cases := []struct { 2909 spec string 2910 nextTime time.Time 2911 errorMsg string 2912 }{ 2913 { 2914 spec: "0 0 29 2 * 1980", 2915 nextTime: time.Time{}, 2916 }, 2917 { 2918 spec: "*/5 * * * *", 2919 nextTime: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2920 }, 2921 { 2922 spec: "1 15-0 *", 2923 nextTime: time.Time{}, 2924 errorMsg: "failed parsing cron expression", 2925 }, 2926 } 2927 2928 for i, c := range cases { 2929 t.Run(fmt.Sprintf("case: %d: %s", i, c.spec), func(t *testing.T) { 2930 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: c.spec} 2931 p.Canonicalize() 2932 n, err := p.Next(from) 2933 2934 require.Equal(t, c.nextTime, n) 2935 if c.errorMsg == "" { 2936 require.NoError(t, err) 2937 } else { 2938 require.Error(t, err) 2939 require.Contains(t, err.Error(), c.errorMsg) 2940 } 2941 }) 2942 } 2943 } 2944 2945 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2946 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2947 for _, zone := range zones { 2948 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2949 p.Canonicalize() 2950 if err := p.Validate(); err != nil { 2951 t.Fatalf("Valid tz errored: %v", err) 2952 } 2953 } 2954 } 2955 2956 func TestPeriodicConfig_DST(t *testing.T) { 2957 require := require.New(t) 2958 2959 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2960 p := &PeriodicConfig{ 2961 Enabled: true, 2962 SpecType: PeriodicSpecCron, 2963 Spec: "0 2 11-13 3 * 2017", 2964 TimeZone: "America/Los_Angeles", 2965 } 2966 p.Canonicalize() 2967 2968 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2969 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2970 2971 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2972 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2973 e2 := time.Date(2017, time.March, 13, 9, 0, 0, 0, time.UTC) 2974 2975 n1, err := p.Next(t1) 2976 require.Nil(err) 2977 2978 n2, err := p.Next(t2) 2979 require.Nil(err) 2980 2981 require.Equal(e1, n1.UTC()) 2982 require.Equal(e2, n2.UTC()) 2983 } 2984 2985 func TestTaskLifecycleConfig_Validate(t *testing.T) { 2986 testCases := []struct { 2987 name string 2988 tlc *TaskLifecycleConfig 2989 err error 2990 }{ 2991 { 2992 name: "prestart completed", 2993 tlc: &TaskLifecycleConfig{ 2994 Hook: "prestart", 2995 Sidecar: false, 2996 }, 2997 err: nil, 2998 }, 2999 { 3000 name: "prestart running", 3001 tlc: &TaskLifecycleConfig{ 3002 Hook: "prestart", 3003 Sidecar: true, 3004 }, 3005 err: nil, 3006 }, 3007 { 3008 name: "no hook", 3009 tlc: &TaskLifecycleConfig{ 3010 Sidecar: true, 3011 }, 3012 err: fmt.Errorf("no lifecycle hook provided"), 3013 }, 3014 } 3015 3016 for _, tc := range testCases { 3017 t.Run(tc.name, func(t *testing.T) { 3018 err := tc.tlc.Validate() 3019 if tc.err != nil { 3020 require.Error(t, err) 3021 require.Contains(t, err.Error(), tc.err.Error()) 3022 } else { 3023 require.Nil(t, err) 3024 } 3025 }) 3026 3027 } 3028 } 3029 3030 func TestRestartPolicy_Validate(t *testing.T) { 3031 // Policy with acceptable restart options passes 3032 p := &RestartPolicy{ 3033 Mode: RestartPolicyModeFail, 3034 Attempts: 0, 3035 Interval: 5 * time.Second, 3036 } 3037 if err := p.Validate(); err != nil { 3038 t.Fatalf("err: %v", err) 3039 } 3040 3041 // Policy with ambiguous restart options fails 3042 p = &RestartPolicy{ 3043 Mode: RestartPolicyModeDelay, 3044 Attempts: 0, 3045 Interval: 5 * time.Second, 3046 } 3047 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 3048 t.Fatalf("expect ambiguity error, got: %v", err) 3049 } 3050 3051 // Bad policy mode fails 3052 p = &RestartPolicy{ 3053 Mode: "nope", 3054 Attempts: 1, 3055 Interval: 5 * time.Second, 3056 } 3057 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 3058 t.Fatalf("expect mode error, got: %v", err) 3059 } 3060 3061 // Fails when attempts*delay does not fit inside interval 3062 p = &RestartPolicy{ 3063 Mode: RestartPolicyModeDelay, 3064 Attempts: 3, 3065 Delay: 5 * time.Second, 3066 Interval: 5 * time.Second, 3067 } 3068 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 3069 t.Fatalf("expect restart interval error, got: %v", err) 3070 } 3071 3072 // Fails when interval is to small 3073 p = &RestartPolicy{ 3074 Mode: RestartPolicyModeDelay, 3075 Attempts: 3, 3076 Delay: 5 * time.Second, 3077 Interval: 2 * time.Second, 3078 } 3079 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 3080 t.Fatalf("expect interval too small error, got: %v", err) 3081 } 3082 } 3083 3084 func TestReschedulePolicy_Validate(t *testing.T) { 3085 type testCase struct { 3086 desc string 3087 ReschedulePolicy *ReschedulePolicy 3088 errors []error 3089 } 3090 3091 testCases := []testCase{ 3092 { 3093 desc: "Nil", 3094 }, 3095 { 3096 desc: "Disabled", 3097 ReschedulePolicy: &ReschedulePolicy{ 3098 Attempts: 0, 3099 Interval: 0 * time.Second}, 3100 }, 3101 { 3102 desc: "Disabled", 3103 ReschedulePolicy: &ReschedulePolicy{ 3104 Attempts: -1, 3105 Interval: 5 * time.Minute}, 3106 }, 3107 { 3108 desc: "Valid Linear Delay", 3109 ReschedulePolicy: &ReschedulePolicy{ 3110 Attempts: 1, 3111 Interval: 5 * time.Minute, 3112 Delay: 10 * time.Second, 3113 DelayFunction: "constant"}, 3114 }, 3115 { 3116 desc: "Valid Exponential Delay", 3117 ReschedulePolicy: &ReschedulePolicy{ 3118 Attempts: 5, 3119 Interval: 1 * time.Hour, 3120 Delay: 30 * time.Second, 3121 MaxDelay: 5 * time.Minute, 3122 DelayFunction: "exponential"}, 3123 }, 3124 { 3125 desc: "Valid Fibonacci Delay", 3126 ReschedulePolicy: &ReschedulePolicy{ 3127 Attempts: 5, 3128 Interval: 15 * time.Minute, 3129 Delay: 10 * time.Second, 3130 MaxDelay: 5 * time.Minute, 3131 DelayFunction: "fibonacci"}, 3132 }, 3133 { 3134 desc: "Invalid delay function", 3135 ReschedulePolicy: &ReschedulePolicy{ 3136 Attempts: 1, 3137 Interval: 1 * time.Second, 3138 DelayFunction: "blah"}, 3139 errors: []error{ 3140 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3141 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3142 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 3143 }, 3144 }, 3145 { 3146 desc: "Invalid delay ceiling", 3147 ReschedulePolicy: &ReschedulePolicy{ 3148 Attempts: 1, 3149 Interval: 8 * time.Second, 3150 DelayFunction: "exponential", 3151 Delay: 15 * time.Second, 3152 MaxDelay: 5 * time.Second}, 3153 errors: []error{ 3154 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 3155 15*time.Second, 5*time.Second), 3156 }, 3157 }, 3158 { 3159 desc: "Invalid delay and interval", 3160 ReschedulePolicy: &ReschedulePolicy{ 3161 Attempts: 1, 3162 Interval: 1 * time.Second, 3163 DelayFunction: "constant"}, 3164 errors: []error{ 3165 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3166 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3167 }, 3168 }, { 3169 // Should suggest 2h40m as the interval 3170 desc: "Invalid Attempts - linear delay", 3171 ReschedulePolicy: &ReschedulePolicy{ 3172 Attempts: 10, 3173 Interval: 1 * time.Hour, 3174 Delay: 20 * time.Minute, 3175 DelayFunction: "constant", 3176 }, 3177 errors: []error{ 3178 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 3179 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 3180 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3181 200*time.Minute, 10), 3182 }, 3183 }, 3184 { 3185 // Should suggest 4h40m as the interval 3186 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 3187 desc: "Invalid Attempts - exponential delay", 3188 ReschedulePolicy: &ReschedulePolicy{ 3189 Attempts: 10, 3190 Interval: 30 * time.Minute, 3191 Delay: 5 * time.Minute, 3192 MaxDelay: 40 * time.Minute, 3193 DelayFunction: "exponential", 3194 }, 3195 errors: []error{ 3196 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3197 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 3198 "exponential", 40*time.Minute), 3199 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3200 280*time.Minute, 10), 3201 }, 3202 }, 3203 { 3204 // Should suggest 8h as the interval 3205 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 3206 desc: "Invalid Attempts - fibonacci delay", 3207 ReschedulePolicy: &ReschedulePolicy{ 3208 Attempts: 10, 3209 Interval: 1 * time.Hour, 3210 Delay: 20 * time.Minute, 3211 MaxDelay: 80 * time.Minute, 3212 DelayFunction: "fibonacci", 3213 }, 3214 errors: []error{ 3215 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3216 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 3217 "fibonacci", 80*time.Minute), 3218 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3219 480*time.Minute, 10), 3220 }, 3221 }, 3222 { 3223 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 3224 ReschedulePolicy: &ReschedulePolicy{ 3225 Attempts: 1, 3226 Unlimited: true, 3227 DelayFunction: "exponential", 3228 Delay: 5 * time.Minute, 3229 MaxDelay: 1 * time.Hour, 3230 }, 3231 errors: []error{ 3232 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 3233 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 3234 }, 3235 }, 3236 { 3237 desc: "Invalid Unlimited config", 3238 ReschedulePolicy: &ReschedulePolicy{ 3239 Attempts: 1, 3240 Interval: 1 * time.Second, 3241 Unlimited: true, 3242 DelayFunction: "exponential", 3243 }, 3244 errors: []error{ 3245 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3246 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3247 }, 3248 }, 3249 { 3250 desc: "Valid Unlimited config", 3251 ReschedulePolicy: &ReschedulePolicy{ 3252 Unlimited: true, 3253 DelayFunction: "exponential", 3254 Delay: 5 * time.Second, 3255 MaxDelay: 1 * time.Hour, 3256 }, 3257 }, 3258 } 3259 3260 for _, tc := range testCases { 3261 t.Run(tc.desc, func(t *testing.T) { 3262 require := require.New(t) 3263 gotErr := tc.ReschedulePolicy.Validate() 3264 if tc.errors != nil { 3265 // Validate all errors 3266 for _, err := range tc.errors { 3267 require.Contains(gotErr.Error(), err.Error()) 3268 } 3269 } else { 3270 require.Nil(gotErr) 3271 } 3272 }) 3273 } 3274 } 3275 3276 func TestAllocation_Index(t *testing.T) { 3277 a1 := Allocation{ 3278 Name: "example.cache[1]", 3279 TaskGroup: "cache", 3280 JobID: "example", 3281 Job: &Job{ 3282 ID: "example", 3283 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 3284 } 3285 e1 := uint(1) 3286 a2 := a1.Copy() 3287 a2.Name = "example.cache[713127]" 3288 e2 := uint(713127) 3289 3290 if a1.Index() != e1 || a2.Index() != e2 { 3291 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 3292 } 3293 } 3294 3295 func TestTaskArtifact_Validate_Source(t *testing.T) { 3296 valid := &TaskArtifact{GetterSource: "google.com"} 3297 if err := valid.Validate(); err != nil { 3298 t.Fatalf("unexpected error: %v", err) 3299 } 3300 } 3301 3302 func TestTaskArtifact_Validate_Dest(t *testing.T) { 3303 valid := &TaskArtifact{GetterSource: "google.com"} 3304 if err := valid.Validate(); err != nil { 3305 t.Fatalf("unexpected error: %v", err) 3306 } 3307 3308 valid.RelativeDest = "local/" 3309 if err := valid.Validate(); err != nil { 3310 t.Fatalf("unexpected error: %v", err) 3311 } 3312 3313 valid.RelativeDest = "local/.." 3314 if err := valid.Validate(); err != nil { 3315 t.Fatalf("unexpected error: %v", err) 3316 } 3317 3318 valid.RelativeDest = "local/../../.." 3319 if err := valid.Validate(); err == nil { 3320 t.Fatalf("expected error: %v", err) 3321 } 3322 } 3323 3324 // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the 3325 // fields change. 3326 func TestTaskArtifact_Hash(t *testing.T) { 3327 t.Parallel() 3328 3329 cases := []TaskArtifact{ 3330 {}, 3331 { 3332 GetterSource: "a", 3333 }, 3334 { 3335 GetterSource: "b", 3336 }, 3337 { 3338 GetterSource: "b", 3339 GetterOptions: map[string]string{"c": "c"}, 3340 }, 3341 { 3342 GetterSource: "b", 3343 GetterOptions: map[string]string{ 3344 "c": "c", 3345 "d": "d", 3346 }, 3347 }, 3348 { 3349 GetterSource: "b", 3350 GetterOptions: map[string]string{ 3351 "c": "c", 3352 "d": "e", 3353 }, 3354 }, 3355 { 3356 GetterSource: "b", 3357 GetterOptions: map[string]string{ 3358 "c": "c", 3359 "d": "e", 3360 }, 3361 GetterMode: "f", 3362 }, 3363 { 3364 GetterSource: "b", 3365 GetterOptions: map[string]string{ 3366 "c": "c", 3367 "d": "e", 3368 }, 3369 GetterMode: "g", 3370 }, 3371 { 3372 GetterSource: "b", 3373 GetterOptions: map[string]string{ 3374 "c": "c", 3375 "d": "e", 3376 }, 3377 GetterMode: "g", 3378 RelativeDest: "h", 3379 }, 3380 { 3381 GetterSource: "b", 3382 GetterOptions: map[string]string{ 3383 "c": "c", 3384 "d": "e", 3385 }, 3386 GetterMode: "g", 3387 RelativeDest: "i", 3388 }, 3389 } 3390 3391 // Map of hash to source 3392 hashes := make(map[string]TaskArtifact, len(cases)) 3393 for _, tc := range cases { 3394 h := tc.Hash() 3395 3396 // Hash should be deterministic 3397 require.Equal(t, h, tc.Hash()) 3398 3399 // Hash should be unique 3400 if orig, ok := hashes[h]; ok { 3401 require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n", 3402 pretty.Sprint(tc), pretty.Sprint(orig), 3403 ) 3404 } 3405 hashes[h] = tc 3406 } 3407 3408 require.Len(t, hashes, len(cases)) 3409 } 3410 3411 func TestAllocation_ShouldMigrate(t *testing.T) { 3412 alloc := Allocation{ 3413 PreviousAllocation: "123", 3414 TaskGroup: "foo", 3415 Job: &Job{ 3416 TaskGroups: []*TaskGroup{ 3417 { 3418 Name: "foo", 3419 EphemeralDisk: &EphemeralDisk{ 3420 Migrate: true, 3421 Sticky: true, 3422 }, 3423 }, 3424 }, 3425 }, 3426 } 3427 3428 if !alloc.ShouldMigrate() { 3429 t.Fatalf("bad: %v", alloc) 3430 } 3431 3432 alloc1 := Allocation{ 3433 PreviousAllocation: "123", 3434 TaskGroup: "foo", 3435 Job: &Job{ 3436 TaskGroups: []*TaskGroup{ 3437 { 3438 Name: "foo", 3439 EphemeralDisk: &EphemeralDisk{}, 3440 }, 3441 }, 3442 }, 3443 } 3444 3445 if alloc1.ShouldMigrate() { 3446 t.Fatalf("bad: %v", alloc) 3447 } 3448 3449 alloc2 := Allocation{ 3450 PreviousAllocation: "123", 3451 TaskGroup: "foo", 3452 Job: &Job{ 3453 TaskGroups: []*TaskGroup{ 3454 { 3455 Name: "foo", 3456 EphemeralDisk: &EphemeralDisk{ 3457 Sticky: false, 3458 Migrate: true, 3459 }, 3460 }, 3461 }, 3462 }, 3463 } 3464 3465 if alloc2.ShouldMigrate() { 3466 t.Fatalf("bad: %v", alloc) 3467 } 3468 3469 alloc3 := Allocation{ 3470 PreviousAllocation: "123", 3471 TaskGroup: "foo", 3472 Job: &Job{ 3473 TaskGroups: []*TaskGroup{ 3474 { 3475 Name: "foo", 3476 }, 3477 }, 3478 }, 3479 } 3480 3481 if alloc3.ShouldMigrate() { 3482 t.Fatalf("bad: %v", alloc) 3483 } 3484 3485 // No previous 3486 alloc4 := Allocation{ 3487 TaskGroup: "foo", 3488 Job: &Job{ 3489 TaskGroups: []*TaskGroup{ 3490 { 3491 Name: "foo", 3492 EphemeralDisk: &EphemeralDisk{ 3493 Migrate: true, 3494 Sticky: true, 3495 }, 3496 }, 3497 }, 3498 }, 3499 } 3500 3501 if alloc4.ShouldMigrate() { 3502 t.Fatalf("bad: %v", alloc4) 3503 } 3504 } 3505 3506 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 3507 cases := []struct { 3508 Input *TaskArtifact 3509 Err bool 3510 }{ 3511 { 3512 &TaskArtifact{ 3513 GetterSource: "foo.com", 3514 GetterOptions: map[string]string{ 3515 "checksum": "no-type", 3516 }, 3517 }, 3518 true, 3519 }, 3520 { 3521 &TaskArtifact{ 3522 GetterSource: "foo.com", 3523 GetterOptions: map[string]string{ 3524 "checksum": "md5:toosmall", 3525 }, 3526 }, 3527 true, 3528 }, 3529 { 3530 &TaskArtifact{ 3531 GetterSource: "foo.com", 3532 GetterOptions: map[string]string{ 3533 "checksum": "invalid:type", 3534 }, 3535 }, 3536 true, 3537 }, 3538 { 3539 &TaskArtifact{ 3540 GetterSource: "foo.com", 3541 GetterOptions: map[string]string{ 3542 "checksum": "md5:${ARTIFACT_CHECKSUM}", 3543 }, 3544 }, 3545 false, 3546 }, 3547 } 3548 3549 for i, tc := range cases { 3550 err := tc.Input.Validate() 3551 if (err != nil) != tc.Err { 3552 t.Fatalf("case %d: %v", i, err) 3553 continue 3554 } 3555 } 3556 } 3557 3558 func TestPlan_NormalizeAllocations(t *testing.T) { 3559 t.Parallel() 3560 plan := &Plan{ 3561 NodeUpdate: make(map[string][]*Allocation), 3562 NodePreemptions: make(map[string][]*Allocation), 3563 } 3564 stoppedAlloc := MockAlloc() 3565 desiredDesc := "Desired desc" 3566 plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost, "followup-eval-id") 3567 preemptedAlloc := MockAlloc() 3568 preemptingAllocID := uuid.Generate() 3569 plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID) 3570 3571 plan.NormalizeAllocations() 3572 3573 actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0] 3574 expectedStoppedAlloc := &Allocation{ 3575 ID: stoppedAlloc.ID, 3576 DesiredDescription: desiredDesc, 3577 ClientStatus: AllocClientStatusLost, 3578 FollowupEvalID: "followup-eval-id", 3579 } 3580 assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc) 3581 actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0] 3582 expectedPreemptedAlloc := &Allocation{ 3583 ID: preemptedAlloc.ID, 3584 PreemptedByAllocation: preemptingAllocID, 3585 } 3586 assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc) 3587 } 3588 3589 func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3590 t.Parallel() 3591 plan := &Plan{ 3592 NodeUpdate: make(map[string][]*Allocation), 3593 } 3594 alloc := MockAlloc() 3595 desiredDesc := "Desired desc" 3596 3597 plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost, "") 3598 3599 expectedAlloc := new(Allocation) 3600 *expectedAlloc = *alloc 3601 expectedAlloc.DesiredDescription = desiredDesc 3602 expectedAlloc.DesiredStatus = AllocDesiredStatusStop 3603 expectedAlloc.ClientStatus = AllocClientStatusLost 3604 expectedAlloc.Job = nil 3605 expectedAlloc.AllocStates = []*AllocState{{ 3606 Field: AllocStateFieldClientStatus, 3607 Value: "lost", 3608 }} 3609 3610 // This value is set to time.Now() in AppendStoppedAlloc, so clear it 3611 appendedAlloc := plan.NodeUpdate[alloc.NodeID][0] 3612 appendedAlloc.AllocStates[0].Time = time.Time{} 3613 3614 assert.Equal(t, expectedAlloc, appendedAlloc) 3615 assert.Equal(t, alloc.Job, plan.Job) 3616 } 3617 3618 func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3619 t.Parallel() 3620 plan := &Plan{ 3621 NodePreemptions: make(map[string][]*Allocation), 3622 } 3623 alloc := MockAlloc() 3624 preemptingAllocID := uuid.Generate() 3625 3626 plan.AppendPreemptedAlloc(alloc, preemptingAllocID) 3627 3628 appendedAlloc := plan.NodePreemptions[alloc.NodeID][0] 3629 expectedAlloc := &Allocation{ 3630 ID: alloc.ID, 3631 PreemptedByAllocation: preemptingAllocID, 3632 JobID: alloc.JobID, 3633 Namespace: alloc.Namespace, 3634 DesiredStatus: AllocDesiredStatusEvict, 3635 DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID), 3636 AllocatedResources: alloc.AllocatedResources, 3637 TaskResources: alloc.TaskResources, 3638 SharedResources: alloc.SharedResources, 3639 } 3640 assert.Equal(t, expectedAlloc, appendedAlloc) 3641 } 3642 3643 func TestAllocation_MsgPackTags(t *testing.T) { 3644 t.Parallel() 3645 planType := reflect.TypeOf(Allocation{}) 3646 3647 msgPackTags, _ := planType.FieldByName("_struct") 3648 3649 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3650 } 3651 3652 func TestEvaluation_MsgPackTags(t *testing.T) { 3653 t.Parallel() 3654 planType := reflect.TypeOf(Evaluation{}) 3655 3656 msgPackTags, _ := planType.FieldByName("_struct") 3657 3658 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3659 } 3660 3661 func TestAllocation_Terminated(t *testing.T) { 3662 type desiredState struct { 3663 ClientStatus string 3664 DesiredStatus string 3665 Terminated bool 3666 } 3667 3668 harness := []desiredState{ 3669 { 3670 ClientStatus: AllocClientStatusPending, 3671 DesiredStatus: AllocDesiredStatusStop, 3672 Terminated: false, 3673 }, 3674 { 3675 ClientStatus: AllocClientStatusRunning, 3676 DesiredStatus: AllocDesiredStatusStop, 3677 Terminated: false, 3678 }, 3679 { 3680 ClientStatus: AllocClientStatusFailed, 3681 DesiredStatus: AllocDesiredStatusStop, 3682 Terminated: true, 3683 }, 3684 { 3685 ClientStatus: AllocClientStatusFailed, 3686 DesiredStatus: AllocDesiredStatusRun, 3687 Terminated: true, 3688 }, 3689 } 3690 3691 for _, state := range harness { 3692 alloc := Allocation{} 3693 alloc.DesiredStatus = state.DesiredStatus 3694 alloc.ClientStatus = state.ClientStatus 3695 if alloc.Terminated() != state.Terminated { 3696 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 3697 } 3698 } 3699 } 3700 3701 func TestAllocation_ShouldReschedule(t *testing.T) { 3702 type testCase struct { 3703 Desc string 3704 FailTime time.Time 3705 ClientStatus string 3706 DesiredStatus string 3707 ReschedulePolicy *ReschedulePolicy 3708 RescheduleTrackers []*RescheduleEvent 3709 ShouldReschedule bool 3710 } 3711 3712 fail := time.Now() 3713 3714 harness := []testCase{ 3715 { 3716 Desc: "Reschedule when desired state is stop", 3717 ClientStatus: AllocClientStatusPending, 3718 DesiredStatus: AllocDesiredStatusStop, 3719 FailTime: fail, 3720 ReschedulePolicy: nil, 3721 ShouldReschedule: false, 3722 }, 3723 { 3724 Desc: "Disabled rescheduling", 3725 ClientStatus: AllocClientStatusFailed, 3726 DesiredStatus: AllocDesiredStatusRun, 3727 FailTime: fail, 3728 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 3729 ShouldReschedule: false, 3730 }, 3731 { 3732 Desc: "Reschedule when client status is complete", 3733 ClientStatus: AllocClientStatusComplete, 3734 DesiredStatus: AllocDesiredStatusRun, 3735 FailTime: fail, 3736 ReschedulePolicy: nil, 3737 ShouldReschedule: false, 3738 }, 3739 { 3740 Desc: "Reschedule with nil reschedule policy", 3741 ClientStatus: AllocClientStatusFailed, 3742 DesiredStatus: AllocDesiredStatusRun, 3743 FailTime: fail, 3744 ReschedulePolicy: nil, 3745 ShouldReschedule: false, 3746 }, 3747 { 3748 Desc: "Reschedule with unlimited and attempts >0", 3749 ClientStatus: AllocClientStatusFailed, 3750 DesiredStatus: AllocDesiredStatusRun, 3751 FailTime: fail, 3752 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 3753 ShouldReschedule: true, 3754 }, 3755 { 3756 Desc: "Reschedule when client status is complete", 3757 ClientStatus: AllocClientStatusComplete, 3758 DesiredStatus: AllocDesiredStatusRun, 3759 FailTime: fail, 3760 ReschedulePolicy: nil, 3761 ShouldReschedule: false, 3762 }, 3763 { 3764 Desc: "Reschedule with policy when client status complete", 3765 ClientStatus: AllocClientStatusComplete, 3766 DesiredStatus: AllocDesiredStatusRun, 3767 FailTime: fail, 3768 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3769 ShouldReschedule: false, 3770 }, 3771 { 3772 Desc: "Reschedule with no previous attempts", 3773 ClientStatus: AllocClientStatusFailed, 3774 DesiredStatus: AllocDesiredStatusRun, 3775 FailTime: fail, 3776 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3777 ShouldReschedule: true, 3778 }, 3779 { 3780 Desc: "Reschedule with leftover attempts", 3781 ClientStatus: AllocClientStatusFailed, 3782 DesiredStatus: AllocDesiredStatusRun, 3783 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3784 FailTime: fail, 3785 RescheduleTrackers: []*RescheduleEvent{ 3786 { 3787 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 3788 }, 3789 }, 3790 ShouldReschedule: true, 3791 }, 3792 { 3793 Desc: "Reschedule with too old previous attempts", 3794 ClientStatus: AllocClientStatusFailed, 3795 DesiredStatus: AllocDesiredStatusRun, 3796 FailTime: fail, 3797 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 3798 RescheduleTrackers: []*RescheduleEvent{ 3799 { 3800 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 3801 }, 3802 }, 3803 ShouldReschedule: true, 3804 }, 3805 { 3806 Desc: "Reschedule with no leftover attempts", 3807 ClientStatus: AllocClientStatusFailed, 3808 DesiredStatus: AllocDesiredStatusRun, 3809 FailTime: fail, 3810 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3811 RescheduleTrackers: []*RescheduleEvent{ 3812 { 3813 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 3814 }, 3815 { 3816 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 3817 }, 3818 }, 3819 ShouldReschedule: false, 3820 }, 3821 } 3822 3823 for _, state := range harness { 3824 alloc := Allocation{} 3825 alloc.DesiredStatus = state.DesiredStatus 3826 alloc.ClientStatus = state.ClientStatus 3827 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 3828 3829 t.Run(state.Desc, func(t *testing.T) { 3830 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 3831 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 3832 } 3833 }) 3834 3835 } 3836 } 3837 3838 func TestAllocation_LastEventTime(t *testing.T) { 3839 type testCase struct { 3840 desc string 3841 taskState map[string]*TaskState 3842 expectedLastEventTime time.Time 3843 } 3844 3845 t1 := time.Now().UTC() 3846 3847 testCases := []testCase{ 3848 { 3849 desc: "nil task state", 3850 expectedLastEventTime: t1, 3851 }, 3852 { 3853 desc: "empty task state", 3854 taskState: make(map[string]*TaskState), 3855 expectedLastEventTime: t1, 3856 }, 3857 { 3858 desc: "Finished At not set", 3859 taskState: map[string]*TaskState{"foo": {State: "start", 3860 StartedAt: t1.Add(-2 * time.Hour)}}, 3861 expectedLastEventTime: t1, 3862 }, 3863 { 3864 desc: "One finished ", 3865 taskState: map[string]*TaskState{"foo": {State: "start", 3866 StartedAt: t1.Add(-2 * time.Hour), 3867 FinishedAt: t1.Add(-1 * time.Hour)}}, 3868 expectedLastEventTime: t1.Add(-1 * time.Hour), 3869 }, 3870 { 3871 desc: "Multiple task groups", 3872 taskState: map[string]*TaskState{"foo": {State: "start", 3873 StartedAt: t1.Add(-2 * time.Hour), 3874 FinishedAt: t1.Add(-1 * time.Hour)}, 3875 "bar": {State: "start", 3876 StartedAt: t1.Add(-2 * time.Hour), 3877 FinishedAt: t1.Add(-40 * time.Minute)}}, 3878 expectedLastEventTime: t1.Add(-40 * time.Minute), 3879 }, 3880 { 3881 desc: "No finishedAt set, one task event, should use modify time", 3882 taskState: map[string]*TaskState{"foo": { 3883 State: "run", 3884 StartedAt: t1.Add(-2 * time.Hour), 3885 Events: []*TaskEvent{ 3886 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 3887 }}, 3888 }, 3889 expectedLastEventTime: t1, 3890 }, 3891 } 3892 for _, tc := range testCases { 3893 t.Run(tc.desc, func(t *testing.T) { 3894 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 3895 alloc.TaskStates = tc.taskState 3896 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 3897 }) 3898 } 3899 } 3900 3901 func TestAllocation_NextDelay(t *testing.T) { 3902 type testCase struct { 3903 desc string 3904 reschedulePolicy *ReschedulePolicy 3905 alloc *Allocation 3906 expectedRescheduleTime time.Time 3907 expectedRescheduleEligible bool 3908 } 3909 now := time.Now() 3910 testCases := []testCase{ 3911 { 3912 desc: "Allocation hasn't failed yet", 3913 reschedulePolicy: &ReschedulePolicy{ 3914 DelayFunction: "constant", 3915 Delay: 5 * time.Second, 3916 }, 3917 alloc: &Allocation{}, 3918 expectedRescheduleTime: time.Time{}, 3919 expectedRescheduleEligible: false, 3920 }, 3921 { 3922 desc: "Allocation has no reschedule policy", 3923 alloc: &Allocation{}, 3924 expectedRescheduleTime: time.Time{}, 3925 expectedRescheduleEligible: false, 3926 }, 3927 { 3928 desc: "Allocation lacks task state", 3929 reschedulePolicy: &ReschedulePolicy{ 3930 DelayFunction: "constant", 3931 Delay: 5 * time.Second, 3932 Unlimited: true, 3933 }, 3934 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 3935 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 3936 expectedRescheduleEligible: true, 3937 }, 3938 { 3939 desc: "linear delay, unlimited restarts, no reschedule tracker", 3940 reschedulePolicy: &ReschedulePolicy{ 3941 DelayFunction: "constant", 3942 Delay: 5 * time.Second, 3943 Unlimited: true, 3944 }, 3945 alloc: &Allocation{ 3946 ClientStatus: AllocClientStatusFailed, 3947 TaskStates: map[string]*TaskState{"foo": {State: "dead", 3948 StartedAt: now.Add(-1 * time.Hour), 3949 FinishedAt: now.Add(-2 * time.Second)}}, 3950 }, 3951 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3952 expectedRescheduleEligible: true, 3953 }, 3954 { 3955 desc: "linear delay with reschedule tracker", 3956 reschedulePolicy: &ReschedulePolicy{ 3957 DelayFunction: "constant", 3958 Delay: 5 * time.Second, 3959 Interval: 10 * time.Minute, 3960 Attempts: 2, 3961 }, 3962 alloc: &Allocation{ 3963 ClientStatus: AllocClientStatusFailed, 3964 TaskStates: map[string]*TaskState{"foo": {State: "start", 3965 StartedAt: now.Add(-1 * time.Hour), 3966 FinishedAt: now.Add(-2 * time.Second)}}, 3967 RescheduleTracker: &RescheduleTracker{ 3968 Events: []*RescheduleEvent{{ 3969 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3970 Delay: 5 * time.Second, 3971 }}, 3972 }}, 3973 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3974 expectedRescheduleEligible: true, 3975 }, 3976 { 3977 desc: "linear delay with reschedule tracker, attempts exhausted", 3978 reschedulePolicy: &ReschedulePolicy{ 3979 DelayFunction: "constant", 3980 Delay: 5 * time.Second, 3981 Interval: 10 * time.Minute, 3982 Attempts: 2, 3983 }, 3984 alloc: &Allocation{ 3985 ClientStatus: AllocClientStatusFailed, 3986 TaskStates: map[string]*TaskState{"foo": {State: "start", 3987 StartedAt: now.Add(-1 * time.Hour), 3988 FinishedAt: now.Add(-2 * time.Second)}}, 3989 RescheduleTracker: &RescheduleTracker{ 3990 Events: []*RescheduleEvent{ 3991 { 3992 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 3993 Delay: 5 * time.Second, 3994 }, 3995 { 3996 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3997 Delay: 5 * time.Second, 3998 }, 3999 }, 4000 }}, 4001 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4002 expectedRescheduleEligible: false, 4003 }, 4004 { 4005 desc: "exponential delay - no reschedule tracker", 4006 reschedulePolicy: &ReschedulePolicy{ 4007 DelayFunction: "exponential", 4008 Delay: 5 * time.Second, 4009 MaxDelay: 90 * time.Second, 4010 Unlimited: true, 4011 }, 4012 alloc: &Allocation{ 4013 ClientStatus: AllocClientStatusFailed, 4014 TaskStates: map[string]*TaskState{"foo": {State: "start", 4015 StartedAt: now.Add(-1 * time.Hour), 4016 FinishedAt: now.Add(-2 * time.Second)}}, 4017 }, 4018 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4019 expectedRescheduleEligible: true, 4020 }, 4021 { 4022 desc: "exponential delay with reschedule tracker", 4023 reschedulePolicy: &ReschedulePolicy{ 4024 DelayFunction: "exponential", 4025 Delay: 5 * time.Second, 4026 MaxDelay: 90 * time.Second, 4027 Unlimited: true, 4028 }, 4029 alloc: &Allocation{ 4030 ClientStatus: AllocClientStatusFailed, 4031 TaskStates: map[string]*TaskState{"foo": {State: "start", 4032 StartedAt: now.Add(-1 * time.Hour), 4033 FinishedAt: now.Add(-2 * time.Second)}}, 4034 RescheduleTracker: &RescheduleTracker{ 4035 Events: []*RescheduleEvent{ 4036 { 4037 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4038 Delay: 5 * time.Second, 4039 }, 4040 { 4041 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4042 Delay: 10 * time.Second, 4043 }, 4044 { 4045 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4046 Delay: 20 * time.Second, 4047 }, 4048 }, 4049 }}, 4050 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4051 expectedRescheduleEligible: true, 4052 }, 4053 { 4054 desc: "exponential delay with delay ceiling reached", 4055 reschedulePolicy: &ReschedulePolicy{ 4056 DelayFunction: "exponential", 4057 Delay: 5 * time.Second, 4058 MaxDelay: 90 * time.Second, 4059 Unlimited: true, 4060 }, 4061 alloc: &Allocation{ 4062 ClientStatus: AllocClientStatusFailed, 4063 TaskStates: map[string]*TaskState{"foo": {State: "start", 4064 StartedAt: now.Add(-1 * time.Hour), 4065 FinishedAt: now.Add(-15 * time.Second)}}, 4066 RescheduleTracker: &RescheduleTracker{ 4067 Events: []*RescheduleEvent{ 4068 { 4069 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4070 Delay: 5 * time.Second, 4071 }, 4072 { 4073 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4074 Delay: 10 * time.Second, 4075 }, 4076 { 4077 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4078 Delay: 20 * time.Second, 4079 }, 4080 { 4081 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4082 Delay: 40 * time.Second, 4083 }, 4084 { 4085 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4086 Delay: 80 * time.Second, 4087 }, 4088 }, 4089 }}, 4090 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 4091 expectedRescheduleEligible: true, 4092 }, 4093 { 4094 // Test case where most recent reschedule ran longer than delay ceiling 4095 desc: "exponential delay, delay ceiling reset condition met", 4096 reschedulePolicy: &ReschedulePolicy{ 4097 DelayFunction: "exponential", 4098 Delay: 5 * time.Second, 4099 MaxDelay: 90 * time.Second, 4100 Unlimited: true, 4101 }, 4102 alloc: &Allocation{ 4103 ClientStatus: AllocClientStatusFailed, 4104 TaskStates: map[string]*TaskState{"foo": {State: "start", 4105 StartedAt: now.Add(-1 * time.Hour), 4106 FinishedAt: now.Add(-15 * time.Minute)}}, 4107 RescheduleTracker: &RescheduleTracker{ 4108 Events: []*RescheduleEvent{ 4109 { 4110 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4111 Delay: 5 * time.Second, 4112 }, 4113 { 4114 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4115 Delay: 10 * time.Second, 4116 }, 4117 { 4118 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4119 Delay: 20 * time.Second, 4120 }, 4121 { 4122 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4123 Delay: 40 * time.Second, 4124 }, 4125 { 4126 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4127 Delay: 80 * time.Second, 4128 }, 4129 { 4130 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4131 Delay: 90 * time.Second, 4132 }, 4133 { 4134 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4135 Delay: 90 * time.Second, 4136 }, 4137 }, 4138 }}, 4139 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 4140 expectedRescheduleEligible: true, 4141 }, 4142 { 4143 desc: "fibonacci delay - no reschedule tracker", 4144 reschedulePolicy: &ReschedulePolicy{ 4145 DelayFunction: "fibonacci", 4146 Delay: 5 * time.Second, 4147 MaxDelay: 90 * time.Second, 4148 Unlimited: true, 4149 }, 4150 alloc: &Allocation{ 4151 ClientStatus: AllocClientStatusFailed, 4152 TaskStates: map[string]*TaskState{"foo": {State: "start", 4153 StartedAt: now.Add(-1 * time.Hour), 4154 FinishedAt: now.Add(-2 * time.Second)}}}, 4155 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4156 expectedRescheduleEligible: true, 4157 }, 4158 { 4159 desc: "fibonacci delay with reschedule tracker", 4160 reschedulePolicy: &ReschedulePolicy{ 4161 DelayFunction: "fibonacci", 4162 Delay: 5 * time.Second, 4163 MaxDelay: 90 * time.Second, 4164 Unlimited: true, 4165 }, 4166 alloc: &Allocation{ 4167 ClientStatus: AllocClientStatusFailed, 4168 TaskStates: map[string]*TaskState{"foo": {State: "start", 4169 StartedAt: now.Add(-1 * time.Hour), 4170 FinishedAt: now.Add(-2 * time.Second)}}, 4171 RescheduleTracker: &RescheduleTracker{ 4172 Events: []*RescheduleEvent{ 4173 { 4174 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4175 Delay: 5 * time.Second, 4176 }, 4177 { 4178 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 4179 Delay: 5 * time.Second, 4180 }, 4181 }, 4182 }}, 4183 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 4184 expectedRescheduleEligible: true, 4185 }, 4186 { 4187 desc: "fibonacci delay with more events", 4188 reschedulePolicy: &ReschedulePolicy{ 4189 DelayFunction: "fibonacci", 4190 Delay: 5 * time.Second, 4191 MaxDelay: 90 * time.Second, 4192 Unlimited: true, 4193 }, 4194 alloc: &Allocation{ 4195 ClientStatus: AllocClientStatusFailed, 4196 TaskStates: map[string]*TaskState{"foo": {State: "start", 4197 StartedAt: now.Add(-1 * time.Hour), 4198 FinishedAt: now.Add(-2 * time.Second)}}, 4199 RescheduleTracker: &RescheduleTracker{ 4200 Events: []*RescheduleEvent{ 4201 { 4202 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4203 Delay: 5 * time.Second, 4204 }, 4205 { 4206 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4207 Delay: 5 * time.Second, 4208 }, 4209 { 4210 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4211 Delay: 10 * time.Second, 4212 }, 4213 { 4214 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4215 Delay: 15 * time.Second, 4216 }, 4217 { 4218 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4219 Delay: 25 * time.Second, 4220 }, 4221 }, 4222 }}, 4223 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4224 expectedRescheduleEligible: true, 4225 }, 4226 { 4227 desc: "fibonacci delay with delay ceiling reached", 4228 reschedulePolicy: &ReschedulePolicy{ 4229 DelayFunction: "fibonacci", 4230 Delay: 5 * time.Second, 4231 MaxDelay: 50 * time.Second, 4232 Unlimited: true, 4233 }, 4234 alloc: &Allocation{ 4235 ClientStatus: AllocClientStatusFailed, 4236 TaskStates: map[string]*TaskState{"foo": {State: "start", 4237 StartedAt: now.Add(-1 * time.Hour), 4238 FinishedAt: now.Add(-15 * time.Second)}}, 4239 RescheduleTracker: &RescheduleTracker{ 4240 Events: []*RescheduleEvent{ 4241 { 4242 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4243 Delay: 5 * time.Second, 4244 }, 4245 { 4246 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4247 Delay: 5 * time.Second, 4248 }, 4249 { 4250 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4251 Delay: 10 * time.Second, 4252 }, 4253 { 4254 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4255 Delay: 15 * time.Second, 4256 }, 4257 { 4258 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4259 Delay: 25 * time.Second, 4260 }, 4261 { 4262 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4263 Delay: 40 * time.Second, 4264 }, 4265 }, 4266 }}, 4267 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 4268 expectedRescheduleEligible: true, 4269 }, 4270 { 4271 desc: "fibonacci delay with delay reset condition met", 4272 reschedulePolicy: &ReschedulePolicy{ 4273 DelayFunction: "fibonacci", 4274 Delay: 5 * time.Second, 4275 MaxDelay: 50 * time.Second, 4276 Unlimited: true, 4277 }, 4278 alloc: &Allocation{ 4279 ClientStatus: AllocClientStatusFailed, 4280 TaskStates: map[string]*TaskState{"foo": {State: "start", 4281 StartedAt: now.Add(-1 * time.Hour), 4282 FinishedAt: now.Add(-5 * time.Minute)}}, 4283 RescheduleTracker: &RescheduleTracker{ 4284 Events: []*RescheduleEvent{ 4285 { 4286 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4287 Delay: 5 * time.Second, 4288 }, 4289 { 4290 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4291 Delay: 5 * time.Second, 4292 }, 4293 { 4294 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4295 Delay: 10 * time.Second, 4296 }, 4297 { 4298 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4299 Delay: 15 * time.Second, 4300 }, 4301 { 4302 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4303 Delay: 25 * time.Second, 4304 }, 4305 { 4306 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4307 Delay: 40 * time.Second, 4308 }, 4309 }, 4310 }}, 4311 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 4312 expectedRescheduleEligible: true, 4313 }, 4314 { 4315 desc: "fibonacci delay with the most recent event that reset delay value", 4316 reschedulePolicy: &ReschedulePolicy{ 4317 DelayFunction: "fibonacci", 4318 Delay: 5 * time.Second, 4319 MaxDelay: 50 * time.Second, 4320 Unlimited: true, 4321 }, 4322 alloc: &Allocation{ 4323 ClientStatus: AllocClientStatusFailed, 4324 TaskStates: map[string]*TaskState{"foo": {State: "start", 4325 StartedAt: now.Add(-1 * time.Hour), 4326 FinishedAt: now.Add(-5 * time.Second)}}, 4327 RescheduleTracker: &RescheduleTracker{ 4328 Events: []*RescheduleEvent{ 4329 { 4330 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4331 Delay: 5 * time.Second, 4332 }, 4333 { 4334 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4335 Delay: 5 * time.Second, 4336 }, 4337 { 4338 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4339 Delay: 10 * time.Second, 4340 }, 4341 { 4342 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4343 Delay: 15 * time.Second, 4344 }, 4345 { 4346 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4347 Delay: 25 * time.Second, 4348 }, 4349 { 4350 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4351 Delay: 40 * time.Second, 4352 }, 4353 { 4354 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4355 Delay: 50 * time.Second, 4356 }, 4357 { 4358 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 4359 Delay: 5 * time.Second, 4360 }, 4361 }, 4362 }}, 4363 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 4364 expectedRescheduleEligible: true, 4365 }, 4366 } 4367 for _, tc := range testCases { 4368 t.Run(tc.desc, func(t *testing.T) { 4369 require := require.New(t) 4370 j := testJob() 4371 if tc.reschedulePolicy != nil { 4372 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 4373 } 4374 tc.alloc.Job = j 4375 tc.alloc.TaskGroup = j.TaskGroups[0].Name 4376 reschedTime, allowed := tc.alloc.NextRescheduleTime() 4377 require.Equal(tc.expectedRescheduleEligible, allowed) 4378 require.Equal(tc.expectedRescheduleTime, reschedTime) 4379 }) 4380 } 4381 4382 } 4383 4384 func TestAllocation_WaitClientStop(t *testing.T) { 4385 type testCase struct { 4386 desc string 4387 stop time.Duration 4388 status string 4389 expectedShould bool 4390 expectedRescheduleTime time.Time 4391 } 4392 now := time.Now().UTC() 4393 testCases := []testCase{ 4394 { 4395 desc: "running", 4396 stop: 2 * time.Second, 4397 status: AllocClientStatusRunning, 4398 expectedShould: true, 4399 }, 4400 { 4401 desc: "no stop_after_client_disconnect", 4402 status: AllocClientStatusLost, 4403 expectedShould: false, 4404 }, 4405 { 4406 desc: "stop", 4407 status: AllocClientStatusLost, 4408 stop: 2 * time.Second, 4409 expectedShould: true, 4410 expectedRescheduleTime: now.Add((2 + 5) * time.Second), 4411 }, 4412 } 4413 for _, tc := range testCases { 4414 t.Run(tc.desc, func(t *testing.T) { 4415 j := testJob() 4416 a := &Allocation{ 4417 ClientStatus: tc.status, 4418 Job: j, 4419 TaskStates: map[string]*TaskState{}, 4420 } 4421 4422 if tc.status == AllocClientStatusLost { 4423 a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost) 4424 } 4425 4426 j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop 4427 a.TaskGroup = j.TaskGroups[0].Name 4428 4429 require.Equal(t, tc.expectedShould, a.ShouldClientStop()) 4430 4431 if !tc.expectedShould || tc.status != AllocClientStatusLost { 4432 return 4433 } 4434 4435 // the reschedTime is close to the expectedRescheduleTime 4436 reschedTime := a.WaitClientStop() 4437 e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix() 4438 require.Less(t, e, int64(2)) 4439 }) 4440 } 4441 } 4442 4443 func TestAllocation_Canonicalize_Old(t *testing.T) { 4444 alloc := MockAlloc() 4445 alloc.AllocatedResources = nil 4446 alloc.TaskResources = map[string]*Resources{ 4447 "web": { 4448 CPU: 500, 4449 MemoryMB: 256, 4450 Networks: []*NetworkResource{ 4451 { 4452 Device: "eth0", 4453 IP: "192.168.0.100", 4454 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4455 MBits: 50, 4456 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4457 }, 4458 }, 4459 }, 4460 } 4461 alloc.SharedResources = &Resources{ 4462 DiskMB: 150, 4463 } 4464 alloc.Canonicalize() 4465 4466 expected := &AllocatedResources{ 4467 Tasks: map[string]*AllocatedTaskResources{ 4468 "web": { 4469 Cpu: AllocatedCpuResources{ 4470 CpuShares: 500, 4471 }, 4472 Memory: AllocatedMemoryResources{ 4473 MemoryMB: 256, 4474 }, 4475 Networks: []*NetworkResource{ 4476 { 4477 Device: "eth0", 4478 IP: "192.168.0.100", 4479 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4480 MBits: 50, 4481 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4482 }, 4483 }, 4484 }, 4485 }, 4486 Shared: AllocatedSharedResources{ 4487 DiskMB: 150, 4488 }, 4489 } 4490 4491 require.Equal(t, expected, alloc.AllocatedResources) 4492 } 4493 4494 // TestAllocation_Canonicalize_New asserts that an alloc with latest 4495 // schema isn't modified with Canonicalize 4496 func TestAllocation_Canonicalize_New(t *testing.T) { 4497 alloc := MockAlloc() 4498 copy := alloc.Copy() 4499 4500 alloc.Canonicalize() 4501 require.Equal(t, copy, alloc) 4502 } 4503 4504 func TestRescheduleTracker_Copy(t *testing.T) { 4505 type testCase struct { 4506 original *RescheduleTracker 4507 expected *RescheduleTracker 4508 } 4509 4510 cases := []testCase{ 4511 {nil, nil}, 4512 {&RescheduleTracker{Events: []*RescheduleEvent{ 4513 {RescheduleTime: 2, 4514 PrevAllocID: "12", 4515 PrevNodeID: "12", 4516 Delay: 30 * time.Second}, 4517 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 4518 {RescheduleTime: 2, 4519 PrevAllocID: "12", 4520 PrevNodeID: "12", 4521 Delay: 30 * time.Second}, 4522 }}}, 4523 } 4524 4525 for _, tc := range cases { 4526 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 4527 t.Fatalf("expected %v but got %v", *tc.expected, *got) 4528 } 4529 } 4530 } 4531 4532 func TestVault_Validate(t *testing.T) { 4533 v := &Vault{ 4534 Env: true, 4535 ChangeMode: VaultChangeModeNoop, 4536 } 4537 4538 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 4539 t.Fatalf("Expected policy list empty error") 4540 } 4541 4542 v.Policies = []string{"foo", "root"} 4543 v.ChangeMode = VaultChangeModeSignal 4544 4545 err := v.Validate() 4546 if err == nil { 4547 t.Fatalf("Expected validation errors") 4548 } 4549 4550 if !strings.Contains(err.Error(), "Signal must") { 4551 t.Fatalf("Expected signal empty error") 4552 } 4553 if !strings.Contains(err.Error(), "root") { 4554 t.Fatalf("Expected root error") 4555 } 4556 } 4557 4558 func TestParameterizedJobConfig_Validate(t *testing.T) { 4559 d := &ParameterizedJobConfig{ 4560 Payload: "foo", 4561 } 4562 4563 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 4564 t.Fatalf("Expected unknown payload requirement: %v", err) 4565 } 4566 4567 d.Payload = DispatchPayloadOptional 4568 d.MetaOptional = []string{"foo", "bar"} 4569 d.MetaRequired = []string{"bar", "baz"} 4570 4571 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 4572 t.Fatalf("Expected meta not being disjoint error: %v", err) 4573 } 4574 } 4575 4576 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 4577 job := testJob() 4578 job.ParameterizedJob = &ParameterizedJobConfig{ 4579 Payload: DispatchPayloadOptional, 4580 } 4581 job.Type = JobTypeSystem 4582 4583 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 4584 t.Fatalf("Expected bad scheduler tpye: %v", err) 4585 } 4586 } 4587 4588 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 4589 d := &ParameterizedJobConfig{} 4590 d.Canonicalize() 4591 if d.Payload != DispatchPayloadOptional { 4592 t.Fatalf("Canonicalize failed") 4593 } 4594 } 4595 4596 func TestDispatchPayloadConfig_Validate(t *testing.T) { 4597 d := &DispatchPayloadConfig{ 4598 File: "foo", 4599 } 4600 4601 // task/local/haha 4602 if err := d.Validate(); err != nil { 4603 t.Fatalf("bad: %v", err) 4604 } 4605 4606 // task/haha 4607 d.File = "../haha" 4608 if err := d.Validate(); err != nil { 4609 t.Fatalf("bad: %v", err) 4610 } 4611 4612 // ../haha 4613 d.File = "../../../haha" 4614 if err := d.Validate(); err == nil { 4615 t.Fatalf("bad: %v", err) 4616 } 4617 } 4618 4619 func TestIsRecoverable(t *testing.T) { 4620 if IsRecoverable(nil) { 4621 t.Errorf("nil should not be recoverable") 4622 } 4623 if IsRecoverable(NewRecoverableError(nil, true)) { 4624 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 4625 } 4626 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 4627 t.Errorf("Custom errors should not be recoverable") 4628 } 4629 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 4630 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 4631 } 4632 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 4633 t.Errorf("Explicitly recoverable errors *should* be recoverable") 4634 } 4635 } 4636 4637 func TestACLTokenValidate(t *testing.T) { 4638 tk := &ACLToken{} 4639 4640 // Missing a type 4641 err := tk.Validate() 4642 assert.NotNil(t, err) 4643 if !strings.Contains(err.Error(), "client or management") { 4644 t.Fatalf("bad: %v", err) 4645 } 4646 4647 // Missing policies 4648 tk.Type = ACLClientToken 4649 err = tk.Validate() 4650 assert.NotNil(t, err) 4651 if !strings.Contains(err.Error(), "missing policies") { 4652 t.Fatalf("bad: %v", err) 4653 } 4654 4655 // Invalid policies 4656 tk.Type = ACLManagementToken 4657 tk.Policies = []string{"foo"} 4658 err = tk.Validate() 4659 assert.NotNil(t, err) 4660 if !strings.Contains(err.Error(), "associated with policies") { 4661 t.Fatalf("bad: %v", err) 4662 } 4663 4664 // Name too long policies 4665 tk.Name = "" 4666 for i := 0; i < 8; i++ { 4667 tk.Name += uuid.Generate() 4668 } 4669 tk.Policies = nil 4670 err = tk.Validate() 4671 assert.NotNil(t, err) 4672 if !strings.Contains(err.Error(), "too long") { 4673 t.Fatalf("bad: %v", err) 4674 } 4675 4676 // Make it valid 4677 tk.Name = "foo" 4678 err = tk.Validate() 4679 assert.Nil(t, err) 4680 } 4681 4682 func TestACLTokenPolicySubset(t *testing.T) { 4683 tk := &ACLToken{ 4684 Type: ACLClientToken, 4685 Policies: []string{"foo", "bar", "baz"}, 4686 } 4687 4688 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4689 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4690 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4691 assert.Equal(t, true, tk.PolicySubset([]string{})) 4692 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 4693 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 4694 4695 tk = &ACLToken{ 4696 Type: ACLManagementToken, 4697 } 4698 4699 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4700 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4701 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4702 assert.Equal(t, true, tk.PolicySubset([]string{})) 4703 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 4704 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 4705 } 4706 4707 func TestACLTokenSetHash(t *testing.T) { 4708 tk := &ACLToken{ 4709 Name: "foo", 4710 Type: ACLClientToken, 4711 Policies: []string{"foo", "bar"}, 4712 Global: false, 4713 } 4714 out1 := tk.SetHash() 4715 assert.NotNil(t, out1) 4716 assert.NotNil(t, tk.Hash) 4717 assert.Equal(t, out1, tk.Hash) 4718 4719 tk.Policies = []string{"foo"} 4720 out2 := tk.SetHash() 4721 assert.NotNil(t, out2) 4722 assert.NotNil(t, tk.Hash) 4723 assert.Equal(t, out2, tk.Hash) 4724 assert.NotEqual(t, out1, out2) 4725 } 4726 4727 func TestACLPolicySetHash(t *testing.T) { 4728 ap := &ACLPolicy{ 4729 Name: "foo", 4730 Description: "great policy", 4731 Rules: "node { policy = \"read\" }", 4732 } 4733 out1 := ap.SetHash() 4734 assert.NotNil(t, out1) 4735 assert.NotNil(t, ap.Hash) 4736 assert.Equal(t, out1, ap.Hash) 4737 4738 ap.Rules = "node { policy = \"write\" }" 4739 out2 := ap.SetHash() 4740 assert.NotNil(t, out2) 4741 assert.NotNil(t, ap.Hash) 4742 assert.Equal(t, out2, ap.Hash) 4743 assert.NotEqual(t, out1, out2) 4744 } 4745 4746 func TestTaskEventPopulate(t *testing.T) { 4747 prepopulatedEvent := NewTaskEvent(TaskSetup) 4748 prepopulatedEvent.DisplayMessage = "Hola" 4749 testcases := []struct { 4750 event *TaskEvent 4751 expectedMsg string 4752 }{ 4753 {nil, ""}, 4754 {prepopulatedEvent, "Hola"}, 4755 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 4756 {NewTaskEvent(TaskStarted), "Task started by client"}, 4757 {NewTaskEvent(TaskReceived), "Task received by client"}, 4758 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 4759 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 4760 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 4761 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 4762 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 4763 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 4764 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 4765 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 4766 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 4767 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 4768 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 4769 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 4770 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 4771 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 4772 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 4773 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 4774 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 4775 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 4776 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 4777 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 4778 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 4779 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 4780 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 4781 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 4782 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 4783 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 4784 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 4785 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 4786 {NewTaskEvent("Unknown Type, No message"), ""}, 4787 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 4788 } 4789 4790 for _, tc := range testcases { 4791 tc.event.PopulateEventDisplayMessage() 4792 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 4793 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 4794 } 4795 } 4796 } 4797 4798 func TestNetworkResourcesEquals(t *testing.T) { 4799 require := require.New(t) 4800 var networkResourcesTest = []struct { 4801 input []*NetworkResource 4802 expected bool 4803 errorMsg string 4804 }{ 4805 { 4806 []*NetworkResource{ 4807 { 4808 IP: "10.0.0.1", 4809 MBits: 50, 4810 ReservedPorts: []Port{{"web", 80, 0}}, 4811 }, 4812 { 4813 IP: "10.0.0.1", 4814 MBits: 50, 4815 ReservedPorts: []Port{{"web", 80, 0}}, 4816 }, 4817 }, 4818 true, 4819 "Equal network resources should return true", 4820 }, 4821 { 4822 []*NetworkResource{ 4823 { 4824 IP: "10.0.0.0", 4825 MBits: 50, 4826 ReservedPorts: []Port{{"web", 80, 0}}, 4827 }, 4828 { 4829 IP: "10.0.0.1", 4830 MBits: 50, 4831 ReservedPorts: []Port{{"web", 80, 0}}, 4832 }, 4833 }, 4834 false, 4835 "Different IP addresses should return false", 4836 }, 4837 { 4838 []*NetworkResource{ 4839 { 4840 IP: "10.0.0.1", 4841 MBits: 40, 4842 ReservedPorts: []Port{{"web", 80, 0}}, 4843 }, 4844 { 4845 IP: "10.0.0.1", 4846 MBits: 50, 4847 ReservedPorts: []Port{{"web", 80, 0}}, 4848 }, 4849 }, 4850 false, 4851 "Different MBits values should return false", 4852 }, 4853 { 4854 []*NetworkResource{ 4855 { 4856 IP: "10.0.0.1", 4857 MBits: 50, 4858 ReservedPorts: []Port{{"web", 80, 0}}, 4859 }, 4860 { 4861 IP: "10.0.0.1", 4862 MBits: 50, 4863 ReservedPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4864 }, 4865 }, 4866 false, 4867 "Different ReservedPorts lengths should return false", 4868 }, 4869 { 4870 []*NetworkResource{ 4871 { 4872 IP: "10.0.0.1", 4873 MBits: 50, 4874 ReservedPorts: []Port{{"web", 80, 0}}, 4875 }, 4876 { 4877 IP: "10.0.0.1", 4878 MBits: 50, 4879 ReservedPorts: []Port{}, 4880 }, 4881 }, 4882 false, 4883 "Empty and non empty ReservedPorts values should return false", 4884 }, 4885 { 4886 []*NetworkResource{ 4887 { 4888 IP: "10.0.0.1", 4889 MBits: 50, 4890 ReservedPorts: []Port{{"web", 80, 0}}, 4891 }, 4892 { 4893 IP: "10.0.0.1", 4894 MBits: 50, 4895 ReservedPorts: []Port{{"notweb", 80, 0}}, 4896 }, 4897 }, 4898 false, 4899 "Different valued ReservedPorts values should return false", 4900 }, 4901 { 4902 []*NetworkResource{ 4903 { 4904 IP: "10.0.0.1", 4905 MBits: 50, 4906 DynamicPorts: []Port{{"web", 80, 0}}, 4907 }, 4908 { 4909 IP: "10.0.0.1", 4910 MBits: 50, 4911 DynamicPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4912 }, 4913 }, 4914 false, 4915 "Different DynamicPorts lengths should return false", 4916 }, 4917 { 4918 []*NetworkResource{ 4919 { 4920 IP: "10.0.0.1", 4921 MBits: 50, 4922 DynamicPorts: []Port{{"web", 80, 0}}, 4923 }, 4924 { 4925 IP: "10.0.0.1", 4926 MBits: 50, 4927 DynamicPorts: []Port{}, 4928 }, 4929 }, 4930 false, 4931 "Empty and non empty DynamicPorts values should return false", 4932 }, 4933 { 4934 []*NetworkResource{ 4935 { 4936 IP: "10.0.0.1", 4937 MBits: 50, 4938 DynamicPorts: []Port{{"web", 80, 0}}, 4939 }, 4940 { 4941 IP: "10.0.0.1", 4942 MBits: 50, 4943 DynamicPorts: []Port{{"notweb", 80, 0}}, 4944 }, 4945 }, 4946 false, 4947 "Different valued DynamicPorts values should return false", 4948 }, 4949 } 4950 for _, testCase := range networkResourcesTest { 4951 first := testCase.input[0] 4952 second := testCase.input[1] 4953 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 4954 } 4955 } 4956 4957 func TestNode_Canonicalize(t *testing.T) { 4958 t.Parallel() 4959 require := require.New(t) 4960 4961 // Make sure the eligiblity is set properly 4962 node := &Node{} 4963 node.Canonicalize() 4964 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 4965 4966 node = &Node{ 4967 Drain: true, 4968 } 4969 node.Canonicalize() 4970 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 4971 } 4972 4973 func TestNode_Copy(t *testing.T) { 4974 t.Parallel() 4975 require := require.New(t) 4976 4977 node := &Node{ 4978 ID: uuid.Generate(), 4979 SecretID: uuid.Generate(), 4980 Datacenter: "dc1", 4981 Name: "foobar", 4982 Attributes: map[string]string{ 4983 "kernel.name": "linux", 4984 "arch": "x86", 4985 "nomad.version": "0.5.0", 4986 "driver.exec": "1", 4987 "driver.mock_driver": "1", 4988 }, 4989 Resources: &Resources{ 4990 CPU: 4000, 4991 MemoryMB: 8192, 4992 DiskMB: 100 * 1024, 4993 Networks: []*NetworkResource{ 4994 { 4995 Device: "eth0", 4996 CIDR: "192.168.0.100/32", 4997 MBits: 1000, 4998 }, 4999 }, 5000 }, 5001 Reserved: &Resources{ 5002 CPU: 100, 5003 MemoryMB: 256, 5004 DiskMB: 4 * 1024, 5005 Networks: []*NetworkResource{ 5006 { 5007 Device: "eth0", 5008 IP: "192.168.0.100", 5009 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 5010 MBits: 1, 5011 }, 5012 }, 5013 }, 5014 NodeResources: &NodeResources{ 5015 Cpu: NodeCpuResources{ 5016 CpuShares: 4000, 5017 }, 5018 Memory: NodeMemoryResources{ 5019 MemoryMB: 8192, 5020 }, 5021 Disk: NodeDiskResources{ 5022 DiskMB: 100 * 1024, 5023 }, 5024 Networks: []*NetworkResource{ 5025 { 5026 Device: "eth0", 5027 CIDR: "192.168.0.100/32", 5028 MBits: 1000, 5029 }, 5030 }, 5031 }, 5032 ReservedResources: &NodeReservedResources{ 5033 Cpu: NodeReservedCpuResources{ 5034 CpuShares: 100, 5035 }, 5036 Memory: NodeReservedMemoryResources{ 5037 MemoryMB: 256, 5038 }, 5039 Disk: NodeReservedDiskResources{ 5040 DiskMB: 4 * 1024, 5041 }, 5042 Networks: NodeReservedNetworkResources{ 5043 ReservedHostPorts: "22", 5044 }, 5045 }, 5046 Links: map[string]string{ 5047 "consul": "foobar.dc1", 5048 }, 5049 Meta: map[string]string{ 5050 "pci-dss": "true", 5051 "database": "mysql", 5052 "version": "5.6", 5053 }, 5054 NodeClass: "linux-medium-pci", 5055 Status: NodeStatusReady, 5056 SchedulingEligibility: NodeSchedulingEligible, 5057 Drivers: map[string]*DriverInfo{ 5058 "mock_driver": { 5059 Attributes: map[string]string{"running": "1"}, 5060 Detected: true, 5061 Healthy: true, 5062 HealthDescription: "Currently active", 5063 UpdateTime: time.Now(), 5064 }, 5065 }, 5066 } 5067 node.ComputeClass() 5068 5069 node2 := node.Copy() 5070 5071 require.Equal(node.Attributes, node2.Attributes) 5072 require.Equal(node.Resources, node2.Resources) 5073 require.Equal(node.Reserved, node2.Reserved) 5074 require.Equal(node.Links, node2.Links) 5075 require.Equal(node.Meta, node2.Meta) 5076 require.Equal(node.Events, node2.Events) 5077 require.Equal(node.DrainStrategy, node2.DrainStrategy) 5078 require.Equal(node.Drivers, node2.Drivers) 5079 } 5080 5081 func TestSpread_Validate(t *testing.T) { 5082 type tc struct { 5083 spread *Spread 5084 err error 5085 name string 5086 } 5087 5088 testCases := []tc{ 5089 { 5090 spread: &Spread{}, 5091 err: fmt.Errorf("Missing spread attribute"), 5092 name: "empty spread", 5093 }, 5094 { 5095 spread: &Spread{ 5096 Attribute: "${node.datacenter}", 5097 Weight: -1, 5098 }, 5099 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5100 name: "Invalid weight", 5101 }, 5102 { 5103 spread: &Spread{ 5104 Attribute: "${node.datacenter}", 5105 Weight: 110, 5106 }, 5107 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5108 name: "Invalid weight", 5109 }, 5110 { 5111 spread: &Spread{ 5112 Attribute: "${node.datacenter}", 5113 Weight: 50, 5114 SpreadTarget: []*SpreadTarget{ 5115 { 5116 Value: "dc1", 5117 Percent: 25, 5118 }, 5119 { 5120 Value: "dc2", 5121 Percent: 150, 5122 }, 5123 }, 5124 }, 5125 err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"), 5126 name: "Invalid percentages", 5127 }, 5128 { 5129 spread: &Spread{ 5130 Attribute: "${node.datacenter}", 5131 Weight: 50, 5132 SpreadTarget: []*SpreadTarget{ 5133 { 5134 Value: "dc1", 5135 Percent: 75, 5136 }, 5137 { 5138 Value: "dc2", 5139 Percent: 75, 5140 }, 5141 }, 5142 }, 5143 err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150), 5144 name: "Invalid percentages", 5145 }, 5146 { 5147 spread: &Spread{ 5148 Attribute: "${node.datacenter}", 5149 Weight: 50, 5150 SpreadTarget: []*SpreadTarget{ 5151 { 5152 Value: "dc1", 5153 Percent: 25, 5154 }, 5155 { 5156 Value: "dc1", 5157 Percent: 50, 5158 }, 5159 }, 5160 }, 5161 err: fmt.Errorf("Spread target value \"dc1\" already defined"), 5162 name: "No spread targets", 5163 }, 5164 { 5165 spread: &Spread{ 5166 Attribute: "${node.datacenter}", 5167 Weight: 50, 5168 SpreadTarget: []*SpreadTarget{ 5169 { 5170 Value: "dc1", 5171 Percent: 25, 5172 }, 5173 { 5174 Value: "dc2", 5175 Percent: 50, 5176 }, 5177 }, 5178 }, 5179 err: nil, 5180 name: "Valid spread", 5181 }, 5182 } 5183 5184 for _, tc := range testCases { 5185 t.Run(tc.name, func(t *testing.T) { 5186 err := tc.spread.Validate() 5187 if tc.err != nil { 5188 require.NotNil(t, err) 5189 require.Contains(t, err.Error(), tc.err.Error()) 5190 } else { 5191 require.Nil(t, err) 5192 } 5193 }) 5194 } 5195 } 5196 5197 func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { 5198 require := require.New(t) 5199 cases := []struct { 5200 Input string 5201 Parsed []uint64 5202 Err bool 5203 }{ 5204 { 5205 "1,2,3", 5206 []uint64{1, 2, 3}, 5207 false, 5208 }, 5209 { 5210 "3,1,2,1,2,3,1-3", 5211 []uint64{1, 2, 3}, 5212 false, 5213 }, 5214 { 5215 "3-1", 5216 nil, 5217 true, 5218 }, 5219 { 5220 "1-3,2-4", 5221 []uint64{1, 2, 3, 4}, 5222 false, 5223 }, 5224 { 5225 "1-3,4,5-5,6,7,8-10", 5226 []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 5227 false, 5228 }, 5229 } 5230 5231 for i, tc := range cases { 5232 r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input} 5233 out, err := r.ParseReservedHostPorts() 5234 if (err != nil) != tc.Err { 5235 t.Fatalf("test case %d: %v", i, err) 5236 continue 5237 } 5238 5239 require.Equal(out, tc.Parsed) 5240 } 5241 }