github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 15 "github.com/kr/pretty" 16 "github.com/stretchr/testify/assert" 17 "github.com/stretchr/testify/require" 18 ) 19 20 func TestJob_Validate(t *testing.T) { 21 j := &Job{} 22 err := j.Validate() 23 requireErrors(t, err, 24 "datacenters", 25 "job ID", 26 "job name", 27 "job region", 28 "job type", 29 "namespace", 30 "priority", 31 "task groups", 32 ) 33 34 j = &Job{ 35 Type: "invalid-job-type", 36 } 37 err = j.Validate() 38 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 39 t.Errorf("expected %s but found: %v", expected, err) 40 } 41 42 j = &Job{ 43 Type: JobTypeService, 44 Periodic: &PeriodicConfig{ 45 Enabled: true, 46 }, 47 } 48 err = j.Validate() 49 require.Error(t, err, "Periodic") 50 51 j = &Job{ 52 Region: "global", 53 ID: uuid.Generate(), 54 Namespace: "test", 55 Name: "my-job", 56 Type: JobTypeService, 57 Priority: 50, 58 Datacenters: []string{"dc1"}, 59 TaskGroups: []*TaskGroup{ 60 { 61 Name: "web", 62 RestartPolicy: &RestartPolicy{ 63 Interval: 5 * time.Minute, 64 Delay: 10 * time.Second, 65 Attempts: 10, 66 }, 67 }, 68 { 69 Name: "web", 70 RestartPolicy: &RestartPolicy{ 71 Interval: 5 * time.Minute, 72 Delay: 10 * time.Second, 73 Attempts: 10, 74 }, 75 }, 76 { 77 RestartPolicy: &RestartPolicy{ 78 Interval: 5 * time.Minute, 79 Delay: 10 * time.Second, 80 Attempts: 10, 81 }, 82 }, 83 }, 84 } 85 err = j.Validate() 86 requireErrors(t, err, 87 "2 redefines 'web' from group 1", 88 "group 3 missing name", 89 "Task group web validation failed", 90 ) 91 // test for empty datacenters 92 j = &Job{ 93 Datacenters: []string{""}, 94 } 95 err = j.Validate() 96 require.Error(t, err, "datacenter must be non-empty string") 97 } 98 99 func TestJob_ValidateScaling(t *testing.T) { 100 require := require.New(t) 101 102 p := &ScalingPolicy{ 103 Policy: nil, // allowed to be nil 104 Type: ScalingPolicyTypeHorizontal, 105 Min: 5, 106 Max: 5, 107 Enabled: true, 108 } 109 job := testJob() 110 job.TaskGroups[0].Scaling = p 111 job.TaskGroups[0].Count = 5 112 113 require.NoError(job.Validate()) 114 115 // min <= max 116 p.Max = 0 117 p.Min = 10 118 err := job.Validate() 119 requireErrors(t, err, 120 "task group count must not be less than minimum count in scaling policy", 121 "task group count must not be greater than maximum count in scaling policy", 122 ) 123 124 // count <= max 125 p.Max = 0 126 p.Min = 5 127 job.TaskGroups[0].Count = 5 128 err = job.Validate() 129 require.Error(err, 130 "task group count must not be greater than maximum count in scaling policy", 131 ) 132 133 // min <= count 134 job.TaskGroups[0].Count = 0 135 p.Min = 5 136 p.Max = 5 137 err = job.Validate() 138 require.Error(err, 139 "task group count must not be less than minimum count in scaling policy", 140 ) 141 } 142 143 func TestJob_ValidateNullChar(t *testing.T) { 144 assert := assert.New(t) 145 146 // job id should not allow null characters 147 job := testJob() 148 job.ID = "id_with\000null_character" 149 assert.Error(job.Validate(), "null character in job ID should not validate") 150 151 // job name should not allow null characters 152 job.ID = "happy_little_job_id" 153 job.Name = "my job name with \000 characters" 154 assert.Error(job.Validate(), "null character in job name should not validate") 155 156 // task group name should not allow null characters 157 job.Name = "my job" 158 job.TaskGroups[0].Name = "oh_no_another_\000_char" 159 assert.Error(job.Validate(), "null character in task group name should not validate") 160 161 // task name should not allow null characters 162 job.TaskGroups[0].Name = "so_much_better" 163 job.TaskGroups[0].Tasks[0].Name = "ive_had_it_with_these_\000_chars_in_these_names" 164 assert.Error(job.Validate(), "null character in task name should not validate") 165 } 166 167 func TestJob_Warnings(t *testing.T) { 168 cases := []struct { 169 Name string 170 Job *Job 171 Expected []string 172 }{ 173 { 174 Name: "Higher counts for update stanza", 175 Expected: []string{"max parallel count is greater"}, 176 Job: &Job{ 177 Type: JobTypeService, 178 TaskGroups: []*TaskGroup{ 179 { 180 Name: "foo", 181 Count: 2, 182 Update: &UpdateStrategy{ 183 MaxParallel: 10, 184 }, 185 }, 186 }, 187 }, 188 }, 189 { 190 Name: "AutoPromote mixed TaskGroups", 191 Expected: []string{"auto_promote must be true for all groups"}, 192 Job: &Job{ 193 Type: JobTypeService, 194 TaskGroups: []*TaskGroup{ 195 { 196 Update: &UpdateStrategy{ 197 AutoPromote: true, 198 }, 199 }, 200 { 201 Update: &UpdateStrategy{ 202 AutoPromote: false, 203 }, 204 }, 205 }, 206 }, 207 }, 208 { 209 Name: "Template.VaultGrace Deprecated", 210 Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."}, 211 Job: &Job{ 212 Type: JobTypeService, 213 TaskGroups: []*TaskGroup{ 214 { 215 Tasks: []*Task{ 216 { 217 Templates: []*Template{ 218 { 219 VaultGrace: 1, 220 }, 221 }, 222 }, 223 }, 224 }, 225 }, 226 }, 227 }, 228 } 229 230 for _, c := range cases { 231 t.Run(c.Name, func(t *testing.T) { 232 warnings := c.Job.Warnings() 233 if warnings == nil { 234 if len(c.Expected) == 0 { 235 return 236 } 237 t.Fatal("Got no warnings when they were expected") 238 } 239 240 a := warnings.Error() 241 for _, e := range c.Expected { 242 if !strings.Contains(a, e) { 243 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 244 } 245 } 246 }) 247 } 248 } 249 250 func TestJob_SpecChanged(t *testing.T) { 251 // Get a base test job 252 base := testJob() 253 254 // Only modify the indexes/mutable state of the job 255 mutatedBase := base.Copy() 256 mutatedBase.Status = "foo" 257 mutatedBase.ModifyIndex = base.ModifyIndex + 100 258 259 // changed contains a spec change that should be detected 260 change := base.Copy() 261 change.Priority = 99 262 263 cases := []struct { 264 Name string 265 Original *Job 266 New *Job 267 Changed bool 268 }{ 269 { 270 Name: "Same job except mutable indexes", 271 Changed: false, 272 Original: base, 273 New: mutatedBase, 274 }, 275 { 276 Name: "Different", 277 Changed: true, 278 Original: base, 279 New: change, 280 }, 281 } 282 283 for _, c := range cases { 284 t.Run(c.Name, func(t *testing.T) { 285 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 286 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 287 } 288 }) 289 } 290 } 291 292 func testJob() *Job { 293 return &Job{ 294 Region: "global", 295 ID: uuid.Generate(), 296 Namespace: "test", 297 Name: "my-job", 298 Type: JobTypeService, 299 Priority: 50, 300 AllAtOnce: false, 301 Datacenters: []string{"dc1"}, 302 Constraints: []*Constraint{ 303 { 304 LTarget: "$attr.kernel.name", 305 RTarget: "linux", 306 Operand: "=", 307 }, 308 }, 309 Periodic: &PeriodicConfig{ 310 Enabled: false, 311 }, 312 TaskGroups: []*TaskGroup{ 313 { 314 Name: "web", 315 Count: 10, 316 EphemeralDisk: DefaultEphemeralDisk(), 317 RestartPolicy: &RestartPolicy{ 318 Mode: RestartPolicyModeFail, 319 Attempts: 3, 320 Interval: 10 * time.Minute, 321 Delay: 1 * time.Minute, 322 }, 323 ReschedulePolicy: &ReschedulePolicy{ 324 Interval: 5 * time.Minute, 325 Attempts: 10, 326 Delay: 5 * time.Second, 327 DelayFunction: "constant", 328 }, 329 Networks: []*NetworkResource{ 330 { 331 DynamicPorts: []Port{ 332 {Label: "http"}, 333 }, 334 }, 335 }, 336 Services: []*Service{ 337 { 338 Name: "${TASK}-frontend", 339 PortLabel: "http", 340 }, 341 }, 342 Tasks: []*Task{ 343 { 344 Name: "web", 345 Driver: "exec", 346 Config: map[string]interface{}{ 347 "command": "/bin/date", 348 }, 349 Env: map[string]string{ 350 "FOO": "bar", 351 }, 352 Artifacts: []*TaskArtifact{ 353 { 354 GetterSource: "http://foo.com", 355 }, 356 }, 357 Resources: &Resources{ 358 CPU: 500, 359 MemoryMB: 256, 360 }, 361 LogConfig: &LogConfig{ 362 MaxFiles: 10, 363 MaxFileSizeMB: 1, 364 }, 365 }, 366 }, 367 Meta: map[string]string{ 368 "elb_check_type": "http", 369 "elb_check_interval": "30s", 370 "elb_check_min": "3", 371 }, 372 }, 373 }, 374 Meta: map[string]string{ 375 "owner": "armon", 376 }, 377 } 378 } 379 380 func TestJob_Copy(t *testing.T) { 381 j := testJob() 382 c := j.Copy() 383 if !reflect.DeepEqual(j, c) { 384 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 385 } 386 } 387 388 func TestJob_IsPeriodic(t *testing.T) { 389 j := &Job{ 390 Type: JobTypeService, 391 Periodic: &PeriodicConfig{ 392 Enabled: true, 393 }, 394 } 395 if !j.IsPeriodic() { 396 t.Fatalf("IsPeriodic() returned false on periodic job") 397 } 398 399 j = &Job{ 400 Type: JobTypeService, 401 } 402 if j.IsPeriodic() { 403 t.Fatalf("IsPeriodic() returned true on non-periodic job") 404 } 405 } 406 407 func TestJob_IsPeriodicActive(t *testing.T) { 408 cases := []struct { 409 job *Job 410 active bool 411 }{ 412 { 413 job: &Job{ 414 Type: JobTypeService, 415 Periodic: &PeriodicConfig{ 416 Enabled: true, 417 }, 418 }, 419 active: true, 420 }, 421 { 422 job: &Job{ 423 Type: JobTypeService, 424 Periodic: &PeriodicConfig{ 425 Enabled: false, 426 }, 427 }, 428 active: false, 429 }, 430 { 431 job: &Job{ 432 Type: JobTypeService, 433 Periodic: &PeriodicConfig{ 434 Enabled: true, 435 }, 436 Stop: true, 437 }, 438 active: false, 439 }, 440 { 441 job: &Job{ 442 Type: JobTypeService, 443 Periodic: &PeriodicConfig{ 444 Enabled: false, 445 }, 446 ParameterizedJob: &ParameterizedJobConfig{}, 447 }, 448 active: false, 449 }, 450 } 451 452 for i, c := range cases { 453 if act := c.job.IsPeriodicActive(); act != c.active { 454 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 455 } 456 } 457 } 458 459 func TestJob_SystemJob_Validate(t *testing.T) { 460 j := testJob() 461 j.Type = JobTypeSystem 462 j.TaskGroups[0].ReschedulePolicy = nil 463 j.Canonicalize() 464 465 err := j.Validate() 466 if err == nil || !strings.Contains(err.Error(), "exceed") { 467 t.Fatalf("expect error due to count") 468 } 469 470 j.TaskGroups[0].Count = 0 471 if err := j.Validate(); err != nil { 472 t.Fatalf("unexpected err: %v", err) 473 } 474 475 j.TaskGroups[0].Count = 1 476 if err := j.Validate(); err != nil { 477 t.Fatalf("unexpected err: %v", err) 478 } 479 480 // Add affinities at job, task group and task level, that should fail validation 481 482 j.Affinities = []*Affinity{{ 483 Operand: "=", 484 LTarget: "${node.datacenter}", 485 RTarget: "dc1", 486 }} 487 j.TaskGroups[0].Affinities = []*Affinity{{ 488 Operand: "=", 489 LTarget: "${meta.rack}", 490 RTarget: "r1", 491 }} 492 j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{ 493 Operand: "=", 494 LTarget: "${meta.rack}", 495 RTarget: "r1", 496 }} 497 err = j.Validate() 498 require.NotNil(t, err) 499 require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") 500 501 // Add spread at job and task group level, that should fail validation 502 j.Spreads = []*Spread{{ 503 Attribute: "${node.datacenter}", 504 Weight: 100, 505 }} 506 j.TaskGroups[0].Spreads = []*Spread{{ 507 Attribute: "${node.datacenter}", 508 Weight: 100, 509 }} 510 511 err = j.Validate() 512 require.NotNil(t, err) 513 require.Contains(t, err.Error(), "System jobs may not have a spread stanza") 514 515 } 516 517 func TestJob_VaultPolicies(t *testing.T) { 518 j0 := &Job{} 519 e0 := make(map[string]map[string]*Vault, 0) 520 521 vj1 := &Vault{ 522 Policies: []string{ 523 "p1", 524 "p2", 525 }, 526 } 527 vj2 := &Vault{ 528 Policies: []string{ 529 "p3", 530 "p4", 531 }, 532 } 533 vj3 := &Vault{ 534 Policies: []string{ 535 "p5", 536 }, 537 } 538 j1 := &Job{ 539 TaskGroups: []*TaskGroup{ 540 { 541 Name: "foo", 542 Tasks: []*Task{ 543 { 544 Name: "t1", 545 }, 546 { 547 Name: "t2", 548 Vault: vj1, 549 }, 550 }, 551 }, 552 { 553 Name: "bar", 554 Tasks: []*Task{ 555 { 556 Name: "t3", 557 Vault: vj2, 558 }, 559 { 560 Name: "t4", 561 Vault: vj3, 562 }, 563 }, 564 }, 565 }, 566 } 567 568 e1 := map[string]map[string]*Vault{ 569 "foo": { 570 "t2": vj1, 571 }, 572 "bar": { 573 "t3": vj2, 574 "t4": vj3, 575 }, 576 } 577 578 cases := []struct { 579 Job *Job 580 Expected map[string]map[string]*Vault 581 }{ 582 { 583 Job: j0, 584 Expected: e0, 585 }, 586 { 587 Job: j1, 588 Expected: e1, 589 }, 590 } 591 592 for i, c := range cases { 593 got := c.Job.VaultPolicies() 594 if !reflect.DeepEqual(got, c.Expected) { 595 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 596 } 597 } 598 } 599 600 func TestJob_ConnectTasks(t *testing.T) { 601 t.Parallel() 602 r := require.New(t) 603 604 j0 := &Job{ 605 TaskGroups: []*TaskGroup{{ 606 Name: "tg1", 607 Tasks: []*Task{{ 608 Name: "connect-proxy-task1", 609 Kind: "connect-proxy:task1", 610 }, { 611 Name: "task2", 612 Kind: "task2", 613 }, { 614 Name: "connect-proxy-task3", 615 Kind: "connect-proxy:task3", 616 }}, 617 }, { 618 Name: "tg2", 619 Tasks: []*Task{{ 620 Name: "task1", 621 Kind: "task1", 622 }, { 623 Name: "connect-proxy-task2", 624 Kind: "connect-proxy:task2", 625 }}, 626 }, { 627 Name: "tg3", 628 Tasks: []*Task{{ 629 Name: "ingress", 630 Kind: "connect-ingress:ingress", 631 }}, 632 }, { 633 Name: "tg4", 634 Tasks: []*Task{{ 635 Name: "frontend", 636 Kind: "connect-native:uuid-fe", 637 }, { 638 Name: "generator", 639 Kind: "connect-native:uuid-api", 640 }}, 641 }, { 642 Name: "tg5", 643 Tasks: []*Task{{ 644 Name: "t1000", 645 Kind: "connect-terminating:t1000", 646 }}, 647 }}, 648 } 649 650 connectTasks := j0.ConnectTasks() 651 652 exp := []TaskKind{ 653 NewTaskKind(ConnectProxyPrefix, "task1"), 654 NewTaskKind(ConnectProxyPrefix, "task3"), 655 NewTaskKind(ConnectProxyPrefix, "task2"), 656 NewTaskKind(ConnectIngressPrefix, "ingress"), 657 NewTaskKind(ConnectNativePrefix, "uuid-fe"), 658 NewTaskKind(ConnectNativePrefix, "uuid-api"), 659 NewTaskKind(ConnectTerminatingPrefix, "t1000"), 660 } 661 662 r.Equal(exp, connectTasks) 663 } 664 665 func TestJob_RequiredSignals(t *testing.T) { 666 j0 := &Job{} 667 e0 := make(map[string]map[string][]string, 0) 668 669 vj1 := &Vault{ 670 Policies: []string{"p1"}, 671 ChangeMode: VaultChangeModeNoop, 672 } 673 vj2 := &Vault{ 674 Policies: []string{"p1"}, 675 ChangeMode: VaultChangeModeSignal, 676 ChangeSignal: "SIGUSR1", 677 } 678 tj1 := &Template{ 679 SourcePath: "foo", 680 DestPath: "bar", 681 ChangeMode: TemplateChangeModeNoop, 682 } 683 tj2 := &Template{ 684 SourcePath: "foo", 685 DestPath: "bar", 686 ChangeMode: TemplateChangeModeSignal, 687 ChangeSignal: "SIGUSR2", 688 } 689 j1 := &Job{ 690 TaskGroups: []*TaskGroup{ 691 { 692 Name: "foo", 693 Tasks: []*Task{ 694 { 695 Name: "t1", 696 }, 697 { 698 Name: "t2", 699 Vault: vj2, 700 Templates: []*Template{tj2}, 701 }, 702 }, 703 }, 704 { 705 Name: "bar", 706 Tasks: []*Task{ 707 { 708 Name: "t3", 709 Vault: vj1, 710 Templates: []*Template{tj1}, 711 }, 712 { 713 Name: "t4", 714 Vault: vj2, 715 }, 716 }, 717 }, 718 }, 719 } 720 721 e1 := map[string]map[string][]string{ 722 "foo": { 723 "t2": {"SIGUSR1", "SIGUSR2"}, 724 }, 725 "bar": { 726 "t4": {"SIGUSR1"}, 727 }, 728 } 729 730 j2 := &Job{ 731 TaskGroups: []*TaskGroup{ 732 { 733 Name: "foo", 734 Tasks: []*Task{ 735 { 736 Name: "t1", 737 KillSignal: "SIGQUIT", 738 }, 739 }, 740 }, 741 }, 742 } 743 744 e2 := map[string]map[string][]string{ 745 "foo": { 746 "t1": {"SIGQUIT"}, 747 }, 748 } 749 750 cases := []struct { 751 Job *Job 752 Expected map[string]map[string][]string 753 }{ 754 { 755 Job: j0, 756 Expected: e0, 757 }, 758 { 759 Job: j1, 760 Expected: e1, 761 }, 762 { 763 Job: j2, 764 Expected: e2, 765 }, 766 } 767 768 for i, c := range cases { 769 got := c.Job.RequiredSignals() 770 if !reflect.DeepEqual(got, c.Expected) { 771 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 772 } 773 } 774 } 775 776 // test new Equal comparisons for components of Jobs 777 func TestJob_PartEqual(t *testing.T) { 778 ns := &Networks{} 779 require.True(t, ns.Equals(&Networks{})) 780 781 ns = &Networks{ 782 &NetworkResource{Device: "eth0"}, 783 } 784 require.True(t, ns.Equals(&Networks{ 785 &NetworkResource{Device: "eth0"}, 786 })) 787 788 ns = &Networks{ 789 &NetworkResource{Device: "eth0"}, 790 &NetworkResource{Device: "eth1"}, 791 &NetworkResource{Device: "eth2"}, 792 } 793 require.True(t, ns.Equals(&Networks{ 794 &NetworkResource{Device: "eth2"}, 795 &NetworkResource{Device: "eth0"}, 796 &NetworkResource{Device: "eth1"}, 797 })) 798 799 cs := &Constraints{ 800 &Constraint{"left0", "right0", "=", ""}, 801 &Constraint{"left1", "right1", "=", ""}, 802 &Constraint{"left2", "right2", "=", ""}, 803 } 804 require.True(t, cs.Equals(&Constraints{ 805 &Constraint{"left0", "right0", "=", ""}, 806 &Constraint{"left2", "right2", "=", ""}, 807 &Constraint{"left1", "right1", "=", ""}, 808 })) 809 810 as := &Affinities{ 811 &Affinity{"left0", "right0", "=", 0, ""}, 812 &Affinity{"left1", "right1", "=", 0, ""}, 813 &Affinity{"left2", "right2", "=", 0, ""}, 814 } 815 require.True(t, as.Equals(&Affinities{ 816 &Affinity{"left0", "right0", "=", 0, ""}, 817 &Affinity{"left2", "right2", "=", 0, ""}, 818 &Affinity{"left1", "right1", "=", 0, ""}, 819 })) 820 } 821 822 func TestTask_UsesConnect(t *testing.T) { 823 t.Parallel() 824 825 t.Run("normal task", func(t *testing.T) { 826 task := testJob().TaskGroups[0].Tasks[0] 827 usesConnect := task.UsesConnect() 828 require.False(t, usesConnect) 829 }) 830 831 t.Run("sidecar proxy", func(t *testing.T) { 832 task := &Task{ 833 Name: "connect-proxy-task1", 834 Kind: NewTaskKind(ConnectProxyPrefix, "task1"), 835 } 836 usesConnect := task.UsesConnect() 837 require.True(t, usesConnect) 838 }) 839 840 t.Run("native task", func(t *testing.T) { 841 task := &Task{ 842 Name: "task1", 843 Kind: NewTaskKind(ConnectNativePrefix, "task1"), 844 } 845 usesConnect := task.UsesConnect() 846 require.True(t, usesConnect) 847 }) 848 849 t.Run("ingress gateway", func(t *testing.T) { 850 task := &Task{ 851 Name: "task1", 852 Kind: NewTaskKind(ConnectIngressPrefix, "task1"), 853 } 854 usesConnect := task.UsesConnect() 855 require.True(t, usesConnect) 856 }) 857 858 t.Run("terminating gateway", func(t *testing.T) { 859 task := &Task{ 860 Name: "task1", 861 Kind: NewTaskKind(ConnectTerminatingPrefix, "task1"), 862 } 863 usesConnect := task.UsesConnect() 864 require.True(t, usesConnect) 865 }) 866 } 867 868 func TestTaskGroup_UsesConnect(t *testing.T) { 869 t.Parallel() 870 871 try := func(t *testing.T, tg *TaskGroup, exp bool) { 872 result := tg.UsesConnect() 873 require.Equal(t, exp, result) 874 } 875 876 t.Run("tg uses native", func(t *testing.T) { 877 try(t, &TaskGroup{ 878 Services: []*Service{ 879 {Connect: nil}, 880 {Connect: &ConsulConnect{Native: true}}, 881 }, 882 }, true) 883 }) 884 885 t.Run("tg uses sidecar", func(t *testing.T) { 886 try(t, &TaskGroup{ 887 Services: []*Service{{ 888 Connect: &ConsulConnect{ 889 SidecarService: &ConsulSidecarService{ 890 Port: "9090", 891 }, 892 }, 893 }}, 894 }, true) 895 }) 896 897 t.Run("tg uses gateway", func(t *testing.T) { 898 try(t, &TaskGroup{ 899 Services: []*Service{{ 900 Connect: &ConsulConnect{ 901 Gateway: consulIngressGateway1, 902 }, 903 }}, 904 }, true) 905 }) 906 907 t.Run("tg does not use connect", func(t *testing.T) { 908 try(t, &TaskGroup{ 909 Services: []*Service{ 910 {Connect: nil}, 911 }, 912 }, false) 913 }) 914 } 915 916 func TestTaskGroup_Validate(t *testing.T) { 917 j := testJob() 918 tg := &TaskGroup{ 919 Count: -1, 920 RestartPolicy: &RestartPolicy{ 921 Interval: 5 * time.Minute, 922 Delay: 10 * time.Second, 923 Attempts: 10, 924 Mode: RestartPolicyModeDelay, 925 }, 926 ReschedulePolicy: &ReschedulePolicy{ 927 Interval: 5 * time.Minute, 928 Attempts: 5, 929 Delay: 5 * time.Second, 930 }, 931 } 932 err := tg.Validate(j) 933 requireErrors(t, err, 934 "group name", 935 "count can't be negative", 936 "Missing tasks", 937 ) 938 939 tg = &TaskGroup{ 940 Tasks: []*Task{ 941 { 942 Name: "task-a", 943 Resources: &Resources{ 944 Networks: []*NetworkResource{ 945 { 946 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 947 }, 948 }, 949 }, 950 }, 951 { 952 Name: "task-b", 953 Resources: &Resources{ 954 Networks: []*NetworkResource{ 955 { 956 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 957 }, 958 }, 959 }, 960 }, 961 }, 962 } 963 err = tg.Validate(&Job{}) 964 expected := `Static port 123 already reserved by task-a:foo` 965 if !strings.Contains(err.Error(), expected) { 966 t.Errorf("expected %s but found: %v", expected, err) 967 } 968 969 tg = &TaskGroup{ 970 Tasks: []*Task{ 971 { 972 Name: "task-a", 973 Resources: &Resources{ 974 Networks: []*NetworkResource{ 975 { 976 ReservedPorts: []Port{ 977 {Label: "foo", Value: 123}, 978 {Label: "bar", Value: 123}, 979 }, 980 }, 981 }, 982 }, 983 }, 984 }, 985 } 986 err = tg.Validate(&Job{}) 987 expected = `Static port 123 already reserved by task-a:foo` 988 if !strings.Contains(err.Error(), expected) { 989 t.Errorf("expected %s but found: %v", expected, err) 990 } 991 992 tg = &TaskGroup{ 993 Name: "web", 994 Count: 1, 995 Tasks: []*Task{ 996 {Name: "web", Leader: true}, 997 {Name: "web", Leader: true}, 998 {}, 999 }, 1000 RestartPolicy: &RestartPolicy{ 1001 Interval: 5 * time.Minute, 1002 Delay: 10 * time.Second, 1003 Attempts: 10, 1004 Mode: RestartPolicyModeDelay, 1005 }, 1006 ReschedulePolicy: &ReschedulePolicy{ 1007 Interval: 5 * time.Minute, 1008 Attempts: 10, 1009 Delay: 5 * time.Second, 1010 DelayFunction: "constant", 1011 }, 1012 } 1013 1014 err = tg.Validate(j) 1015 requireErrors(t, err, 1016 "should have an ephemeral disk object", 1017 "2 redefines 'web' from task 1", 1018 "Task 3 missing name", 1019 "Only one task may be marked as leader", 1020 "Task web validation failed", 1021 ) 1022 1023 tg = &TaskGroup{ 1024 Name: "web", 1025 Count: 1, 1026 Tasks: []*Task{ 1027 {Name: "web", Leader: true}, 1028 }, 1029 Update: DefaultUpdateStrategy.Copy(), 1030 } 1031 j.Type = JobTypeBatch 1032 err = tg.Validate(j) 1033 require.Error(t, err, "does not allow update block") 1034 1035 tg = &TaskGroup{ 1036 Count: -1, 1037 RestartPolicy: &RestartPolicy{ 1038 Interval: 5 * time.Minute, 1039 Delay: 10 * time.Second, 1040 Attempts: 10, 1041 Mode: RestartPolicyModeDelay, 1042 }, 1043 ReschedulePolicy: &ReschedulePolicy{ 1044 Interval: 5 * time.Minute, 1045 Attempts: 5, 1046 Delay: 5 * time.Second, 1047 }, 1048 } 1049 j.Type = JobTypeSystem 1050 err = tg.Validate(j) 1051 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 1052 t.Fatalf("err: %s", err) 1053 } 1054 1055 tg = &TaskGroup{ 1056 Networks: []*NetworkResource{ 1057 { 1058 DynamicPorts: []Port{{"http", 0, 80, ""}}, 1059 }, 1060 }, 1061 Tasks: []*Task{ 1062 { 1063 Resources: &Resources{ 1064 Networks: []*NetworkResource{ 1065 { 1066 DynamicPorts: []Port{{"http", 0, 80, ""}}, 1067 }, 1068 }, 1069 }, 1070 }, 1071 }, 1072 } 1073 err = tg.Validate(j) 1074 require.Contains(t, err.Error(), "Port label http already in use") 1075 1076 tg = &TaskGroup{ 1077 Volumes: map[string]*VolumeRequest{ 1078 "foo": { 1079 Type: "nothost", 1080 Source: "foo", 1081 }, 1082 }, 1083 Tasks: []*Task{ 1084 { 1085 Name: "task-a", 1086 Resources: &Resources{}, 1087 }, 1088 }, 1089 } 1090 err = tg.Validate(&Job{}) 1091 require.Contains(t, err.Error(), `Volume foo has unrecognised type nothost`) 1092 1093 tg = &TaskGroup{ 1094 Volumes: map[string]*VolumeRequest{ 1095 "foo": { 1096 Type: "host", 1097 }, 1098 }, 1099 Tasks: []*Task{ 1100 { 1101 Name: "task-a", 1102 Resources: &Resources{}, 1103 }, 1104 }, 1105 } 1106 err = tg.Validate(&Job{}) 1107 require.Contains(t, err.Error(), `Volume foo has an empty source`) 1108 1109 tg = &TaskGroup{ 1110 Volumes: map[string]*VolumeRequest{ 1111 "foo": { 1112 Type: "host", 1113 }, 1114 }, 1115 Tasks: []*Task{ 1116 { 1117 Name: "task-a", 1118 Resources: &Resources{}, 1119 VolumeMounts: []*VolumeMount{ 1120 { 1121 Volume: "", 1122 }, 1123 }, 1124 }, 1125 { 1126 Name: "task-b", 1127 Resources: &Resources{}, 1128 VolumeMounts: []*VolumeMount{ 1129 { 1130 Volume: "foob", 1131 }, 1132 }, 1133 }, 1134 }, 1135 } 1136 err = tg.Validate(&Job{}) 1137 expected = `Task task-a has a volume mount (0) referencing an empty volume` 1138 require.Contains(t, err.Error(), expected) 1139 1140 expected = `Task task-b has a volume mount (0) referencing undefined volume foob` 1141 require.Contains(t, err.Error(), expected) 1142 1143 taskA := &Task{Name: "task-a"} 1144 tg = &TaskGroup{ 1145 Name: "group-a", 1146 Services: []*Service{ 1147 { 1148 Name: "service-a", 1149 Checks: []*ServiceCheck{ 1150 { 1151 Name: "check-a", 1152 Type: "tcp", 1153 TaskName: "task-b", 1154 PortLabel: "http", 1155 Interval: time.Duration(1 * time.Second), 1156 Timeout: time.Duration(1 * time.Second), 1157 }, 1158 }, 1159 }, 1160 }, 1161 Tasks: []*Task{taskA}, 1162 } 1163 err = tg.Validate(&Job{}) 1164 expected = `Check check-a invalid: refers to non-existent task task-b` 1165 require.Contains(t, err.Error(), expected) 1166 1167 expected = `Check check-a invalid: only script and gRPC checks should have tasks` 1168 require.Contains(t, err.Error(), expected) 1169 1170 } 1171 1172 func TestTaskGroupNetwork_Validate(t *testing.T) { 1173 cases := []struct { 1174 TG *TaskGroup 1175 ErrContains string 1176 }{ 1177 { 1178 TG: &TaskGroup{ 1179 Name: "group-static-value-ok", 1180 Networks: Networks{ 1181 &NetworkResource{ 1182 ReservedPorts: []Port{ 1183 { 1184 Label: "ok", 1185 Value: 65535, 1186 }, 1187 }, 1188 }, 1189 }, 1190 }, 1191 }, 1192 { 1193 TG: &TaskGroup{ 1194 Name: "group-dynamic-value-ok", 1195 Networks: Networks{ 1196 &NetworkResource{ 1197 DynamicPorts: []Port{ 1198 { 1199 Label: "ok", 1200 Value: 65535, 1201 }, 1202 }, 1203 }, 1204 }, 1205 }, 1206 }, 1207 { 1208 TG: &TaskGroup{ 1209 Name: "group-static-to-ok", 1210 Networks: Networks{ 1211 &NetworkResource{ 1212 ReservedPorts: []Port{ 1213 { 1214 Label: "ok", 1215 To: 65535, 1216 }, 1217 }, 1218 }, 1219 }, 1220 }, 1221 }, 1222 { 1223 TG: &TaskGroup{ 1224 Name: "group-dynamic-to-ok", 1225 Networks: Networks{ 1226 &NetworkResource{ 1227 DynamicPorts: []Port{ 1228 { 1229 Label: "ok", 1230 To: 65535, 1231 }, 1232 }, 1233 }, 1234 }, 1235 }, 1236 }, 1237 { 1238 TG: &TaskGroup{ 1239 Name: "group-static-value-too-high", 1240 Networks: Networks{ 1241 &NetworkResource{ 1242 ReservedPorts: []Port{ 1243 { 1244 Label: "too-high", 1245 Value: 65536, 1246 }, 1247 }, 1248 }, 1249 }, 1250 }, 1251 ErrContains: "greater than", 1252 }, 1253 { 1254 TG: &TaskGroup{ 1255 Name: "group-dynamic-value-too-high", 1256 Networks: Networks{ 1257 &NetworkResource{ 1258 DynamicPorts: []Port{ 1259 { 1260 Label: "too-high", 1261 Value: 65536, 1262 }, 1263 }, 1264 }, 1265 }, 1266 }, 1267 ErrContains: "greater than", 1268 }, 1269 { 1270 TG: &TaskGroup{ 1271 Name: "group-static-to-too-high", 1272 Networks: Networks{ 1273 &NetworkResource{ 1274 ReservedPorts: []Port{ 1275 { 1276 Label: "too-high", 1277 To: 65536, 1278 }, 1279 }, 1280 }, 1281 }, 1282 }, 1283 ErrContains: "greater than", 1284 }, 1285 { 1286 TG: &TaskGroup{ 1287 Name: "group-dynamic-to-too-high", 1288 Networks: Networks{ 1289 &NetworkResource{ 1290 DynamicPorts: []Port{ 1291 { 1292 Label: "too-high", 1293 To: 65536, 1294 }, 1295 }, 1296 }, 1297 }, 1298 }, 1299 ErrContains: "greater than", 1300 }, 1301 { 1302 TG: &TaskGroup{ 1303 Name: "group-same-static-port-different-host_network", 1304 Networks: Networks{ 1305 &NetworkResource{ 1306 ReservedPorts: []Port{ 1307 { 1308 Label: "net1_http", 1309 Value: 80, 1310 HostNetwork: "net1", 1311 }, 1312 { 1313 Label: "net2_http", 1314 Value: 80, 1315 HostNetwork: "net2", 1316 }, 1317 }, 1318 }, 1319 }, 1320 }, 1321 }, 1322 { 1323 TG: &TaskGroup{ 1324 Name: "mixing-group-task-ports", 1325 Networks: Networks{ 1326 &NetworkResource{ 1327 ReservedPorts: []Port{ 1328 { 1329 Label: "group_http", 1330 Value: 80, 1331 }, 1332 }, 1333 }, 1334 }, 1335 Tasks: []*Task{ 1336 &Task{ 1337 Name: "task1", 1338 Resources: &Resources{ 1339 Networks: Networks{ 1340 &NetworkResource{ 1341 ReservedPorts: []Port{ 1342 { 1343 Label: "task_http", 1344 Value: 80, 1345 }, 1346 }, 1347 }, 1348 }, 1349 }, 1350 }, 1351 }, 1352 }, 1353 ErrContains: "already reserved by", 1354 }, 1355 { 1356 TG: &TaskGroup{ 1357 Name: "mixing-group-task-ports-with-host_network", 1358 Networks: Networks{ 1359 &NetworkResource{ 1360 ReservedPorts: []Port{ 1361 { 1362 Label: "group_http", 1363 Value: 80, 1364 HostNetwork: "net1", 1365 }, 1366 }, 1367 }, 1368 }, 1369 Tasks: []*Task{ 1370 &Task{ 1371 Name: "task1", 1372 Resources: &Resources{ 1373 Networks: Networks{ 1374 &NetworkResource{ 1375 ReservedPorts: []Port{ 1376 { 1377 Label: "task_http", 1378 Value: 80, 1379 }, 1380 }, 1381 }, 1382 }, 1383 }, 1384 }, 1385 }, 1386 }, 1387 }, 1388 } 1389 1390 for i := range cases { 1391 tc := cases[i] 1392 t.Run(tc.TG.Name, func(t *testing.T) { 1393 err := tc.TG.validateNetworks() 1394 t.Logf("%s -> %v", tc.TG.Name, err) 1395 if tc.ErrContains == "" { 1396 require.NoError(t, err) 1397 return 1398 } 1399 1400 require.Error(t, err) 1401 require.Contains(t, err.Error(), tc.ErrContains) 1402 }) 1403 } 1404 } 1405 1406 func TestTask_Validate(t *testing.T) { 1407 task := &Task{} 1408 ephemeralDisk := DefaultEphemeralDisk() 1409 err := task.Validate(ephemeralDisk, JobTypeBatch, nil, nil) 1410 requireErrors(t, err, 1411 "task name", 1412 "task driver", 1413 "task resources", 1414 ) 1415 1416 task = &Task{Name: "web/foo"} 1417 err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil) 1418 require.Error(t, err, "slashes") 1419 1420 task = &Task{ 1421 Name: "web", 1422 Driver: "docker", 1423 Resources: &Resources{ 1424 CPU: 100, 1425 MemoryMB: 100, 1426 }, 1427 LogConfig: DefaultLogConfig(), 1428 } 1429 ephemeralDisk.SizeMB = 200 1430 err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil) 1431 if err != nil { 1432 t.Fatalf("err: %s", err) 1433 } 1434 1435 task.Constraints = append(task.Constraints, 1436 &Constraint{ 1437 Operand: ConstraintDistinctHosts, 1438 }, 1439 &Constraint{ 1440 Operand: ConstraintDistinctProperty, 1441 LTarget: "${meta.rack}", 1442 }) 1443 1444 err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil) 1445 requireErrors(t, err, 1446 "task level: distinct_hosts", 1447 "task level: distinct_property", 1448 ) 1449 } 1450 1451 func TestTask_Validate_Resources(t *testing.T) { 1452 cases := []struct { 1453 name string 1454 res *Resources 1455 }{ 1456 { 1457 name: "Minimum", 1458 res: MinResources(), 1459 }, 1460 { 1461 name: "Default", 1462 res: DefaultResources(), 1463 }, 1464 { 1465 name: "Full", 1466 res: &Resources{ 1467 CPU: 1000, 1468 MemoryMB: 1000, 1469 IOPS: 1000, 1470 Networks: []*NetworkResource{ 1471 { 1472 Mode: "host", 1473 Device: "localhost", 1474 CIDR: "127.0.0.0/8", 1475 IP: "127.0.0.1", 1476 MBits: 1000, 1477 DNS: &DNSConfig{ 1478 Servers: []string{"localhost"}, 1479 Searches: []string{"localdomain"}, 1480 Options: []string{"ndots:5"}, 1481 }, 1482 ReservedPorts: []Port{ 1483 { 1484 Label: "reserved", 1485 Value: 1234, 1486 To: 1234, 1487 HostNetwork: "loopback", 1488 }, 1489 }, 1490 DynamicPorts: []Port{ 1491 { 1492 Label: "dynamic", 1493 Value: 5678, 1494 To: 5678, 1495 HostNetwork: "loopback", 1496 }, 1497 }, 1498 }, 1499 }, 1500 }, 1501 }, 1502 } 1503 1504 for i := range cases { 1505 tc := cases[i] 1506 t.Run(tc.name, func(t *testing.T) { 1507 require.NoError(t, tc.res.Validate()) 1508 }) 1509 } 1510 } 1511 1512 func TestTask_Validate_Services(t *testing.T) { 1513 s1 := &Service{ 1514 Name: "service-name", 1515 PortLabel: "bar", 1516 Checks: []*ServiceCheck{ 1517 { 1518 Name: "check-name", 1519 Type: ServiceCheckTCP, 1520 Interval: 0 * time.Second, 1521 }, 1522 { 1523 Name: "check-name", 1524 Type: ServiceCheckTCP, 1525 Timeout: 2 * time.Second, 1526 }, 1527 { 1528 Name: "check-name", 1529 Type: ServiceCheckTCP, 1530 Interval: 1 * time.Second, 1531 }, 1532 }, 1533 } 1534 1535 s2 := &Service{ 1536 Name: "service-name", 1537 PortLabel: "bar", 1538 } 1539 1540 s3 := &Service{ 1541 Name: "service-A", 1542 PortLabel: "a", 1543 } 1544 s4 := &Service{ 1545 Name: "service-A", 1546 PortLabel: "b", 1547 } 1548 1549 ephemeralDisk := DefaultEphemeralDisk() 1550 ephemeralDisk.SizeMB = 200 1551 task := &Task{ 1552 Name: "web", 1553 Driver: "docker", 1554 Resources: &Resources{ 1555 CPU: 100, 1556 MemoryMB: 100, 1557 }, 1558 Services: []*Service{s1, s2}, 1559 } 1560 1561 task1 := &Task{ 1562 Name: "web", 1563 Driver: "docker", 1564 Resources: DefaultResources(), 1565 Services: []*Service{s3, s4}, 1566 LogConfig: DefaultLogConfig(), 1567 } 1568 tgNetworks := []*NetworkResource{ 1569 { 1570 MBits: 10, 1571 DynamicPorts: []Port{ 1572 { 1573 Label: "a", 1574 Value: 1000, 1575 }, 1576 { 1577 Label: "b", 1578 Value: 2000, 1579 }, 1580 }, 1581 }, 1582 } 1583 1584 err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks) 1585 if err == nil { 1586 t.Fatal("expected an error") 1587 } 1588 1589 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 1590 t.Fatalf("err: %v", err) 1591 } 1592 1593 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 1594 t.Fatalf("err: %v", err) 1595 } 1596 1597 if !strings.Contains(err.Error(), "missing required value interval") { 1598 t.Fatalf("err: %v", err) 1599 } 1600 1601 if !strings.Contains(err.Error(), "cannot be less than") { 1602 t.Fatalf("err: %v", err) 1603 } 1604 1605 if err = task1.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks); err != nil { 1606 t.Fatalf("err : %v", err) 1607 } 1608 } 1609 1610 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1611 ephemeralDisk := DefaultEphemeralDisk() 1612 getTask := func(s *Service) *Task { 1613 task := &Task{ 1614 Name: "web", 1615 Driver: "docker", 1616 Resources: DefaultResources(), 1617 Services: []*Service{s}, 1618 LogConfig: DefaultLogConfig(), 1619 } 1620 1621 return task 1622 } 1623 tgNetworks := []*NetworkResource{ 1624 { 1625 DynamicPorts: []Port{ 1626 { 1627 Label: "http", 1628 Value: 80, 1629 }, 1630 }, 1631 }, 1632 } 1633 1634 cases := []*Service{ 1635 { 1636 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1637 Name: "DriverModeWithLabel", 1638 PortLabel: "http", 1639 AddressMode: AddressModeDriver, 1640 }, 1641 { 1642 Name: "DriverModeWithPort", 1643 PortLabel: "80", 1644 AddressMode: AddressModeDriver, 1645 }, 1646 { 1647 Name: "HostModeWithLabel", 1648 PortLabel: "http", 1649 AddressMode: AddressModeHost, 1650 }, 1651 { 1652 Name: "HostModeWithoutLabel", 1653 AddressMode: AddressModeHost, 1654 }, 1655 { 1656 Name: "DriverModeWithoutLabel", 1657 AddressMode: AddressModeDriver, 1658 }, 1659 } 1660 1661 for _, service := range cases { 1662 task := getTask(service) 1663 t.Run(service.Name, func(t *testing.T) { 1664 if err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks); err != nil { 1665 t.Fatalf("unexpected err: %v", err) 1666 } 1667 }) 1668 } 1669 } 1670 1671 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1672 ephemeralDisk := DefaultEphemeralDisk() 1673 getTask := func(s *Service) *Task { 1674 return &Task{ 1675 Name: "web", 1676 Driver: "docker", 1677 Resources: DefaultResources(), 1678 Services: []*Service{s}, 1679 LogConfig: DefaultLogConfig(), 1680 } 1681 } 1682 tgNetworks := []*NetworkResource{ 1683 { 1684 DynamicPorts: []Port{ 1685 { 1686 Label: "http", 1687 Value: 80, 1688 }, 1689 }, 1690 }, 1691 } 1692 1693 cases := []*Service{ 1694 { 1695 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1696 Name: "DriverModeWithLabel", 1697 PortLabel: "asdf", 1698 AddressMode: AddressModeDriver, 1699 }, 1700 { 1701 Name: "HostModeWithLabel", 1702 PortLabel: "asdf", 1703 AddressMode: AddressModeHost, 1704 }, 1705 { 1706 Name: "HostModeWithPort", 1707 PortLabel: "80", 1708 AddressMode: AddressModeHost, 1709 }, 1710 } 1711 1712 for _, service := range cases { 1713 task := getTask(service) 1714 t.Run(service.Name, func(t *testing.T) { 1715 err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks) 1716 if err == nil { 1717 t.Fatalf("expected an error") 1718 } 1719 //t.Logf("err: %v", err) 1720 }) 1721 } 1722 } 1723 1724 func TestTask_Validate_Service_Check(t *testing.T) { 1725 1726 invalidCheck := ServiceCheck{ 1727 Name: "check-name", 1728 Command: "/bin/true", 1729 Type: ServiceCheckScript, 1730 Interval: 10 * time.Second, 1731 } 1732 1733 err := invalidCheck.validate() 1734 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1735 t.Fatalf("expected a timeout validation error but received: %q", err) 1736 } 1737 1738 check1 := ServiceCheck{ 1739 Name: "check-name", 1740 Type: ServiceCheckTCP, 1741 Interval: 10 * time.Second, 1742 Timeout: 2 * time.Second, 1743 } 1744 1745 if err := check1.validate(); err != nil { 1746 t.Fatalf("err: %v", err) 1747 } 1748 1749 check1.InitialStatus = "foo" 1750 err = check1.validate() 1751 if err == nil { 1752 t.Fatal("Expected an error") 1753 } 1754 1755 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1756 t.Fatalf("err: %v", err) 1757 } 1758 1759 check1.InitialStatus = api.HealthCritical 1760 err = check1.validate() 1761 if err != nil { 1762 t.Fatalf("err: %v", err) 1763 } 1764 1765 check1.InitialStatus = api.HealthPassing 1766 err = check1.validate() 1767 if err != nil { 1768 t.Fatalf("err: %v", err) 1769 } 1770 1771 check1.InitialStatus = "" 1772 err = check1.validate() 1773 if err != nil { 1774 t.Fatalf("err: %v", err) 1775 } 1776 1777 check2 := ServiceCheck{ 1778 Name: "check-name-2", 1779 Type: ServiceCheckHTTP, 1780 Interval: 10 * time.Second, 1781 Timeout: 2 * time.Second, 1782 Path: "/foo/bar", 1783 } 1784 1785 err = check2.validate() 1786 if err != nil { 1787 t.Fatalf("err: %v", err) 1788 } 1789 1790 check2.Path = "" 1791 err = check2.validate() 1792 if err == nil { 1793 t.Fatal("Expected an error") 1794 } 1795 if !strings.Contains(err.Error(), "valid http path") { 1796 t.Fatalf("err: %v", err) 1797 } 1798 1799 check2.Path = "http://www.example.com" 1800 err = check2.validate() 1801 if err == nil { 1802 t.Fatal("Expected an error") 1803 } 1804 if !strings.Contains(err.Error(), "relative http path") { 1805 t.Fatalf("err: %v", err) 1806 } 1807 1808 t.Run("check expose", func(t *testing.T) { 1809 t.Run("type http", func(t *testing.T) { 1810 require.NoError(t, (&ServiceCheck{ 1811 Type: ServiceCheckHTTP, 1812 Interval: 1 * time.Second, 1813 Timeout: 1 * time.Second, 1814 Path: "/health", 1815 Expose: true, 1816 }).validate()) 1817 }) 1818 t.Run("type tcp", func(t *testing.T) { 1819 require.EqualError(t, (&ServiceCheck{ 1820 Type: ServiceCheckTCP, 1821 Interval: 1 * time.Second, 1822 Timeout: 1 * time.Second, 1823 Expose: true, 1824 }).validate(), "expose may only be set on HTTP or gRPC checks") 1825 }) 1826 }) 1827 } 1828 1829 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1830 // inherit address mode but do inherit ports. 1831 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1832 getTask := func(s *Service) (*Task, *TaskGroup) { 1833 return &Task{ 1834 Services: []*Service{s}, 1835 }, &TaskGroup{ 1836 Networks: []*NetworkResource{ 1837 { 1838 DynamicPorts: []Port{ 1839 { 1840 Label: "http", 1841 Value: 9999, 1842 }, 1843 }, 1844 }, 1845 }, 1846 } 1847 } 1848 1849 cases := []struct { 1850 Service *Service 1851 ErrContains string 1852 }{ 1853 { 1854 Service: &Service{ 1855 Name: "invalid-driver", 1856 PortLabel: "80", 1857 AddressMode: "host", 1858 }, 1859 ErrContains: `port label "80" referenced`, 1860 }, 1861 { 1862 Service: &Service{ 1863 Name: "http-driver-fail-1", 1864 PortLabel: "80", 1865 AddressMode: "driver", 1866 Checks: []*ServiceCheck{ 1867 { 1868 Name: "invalid-check-1", 1869 Type: "tcp", 1870 Interval: time.Second, 1871 Timeout: time.Second, 1872 }, 1873 }, 1874 }, 1875 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1876 }, 1877 { 1878 Service: &Service{ 1879 Name: "http-driver-fail-2", 1880 PortLabel: "80", 1881 AddressMode: "driver", 1882 Checks: []*ServiceCheck{ 1883 { 1884 Name: "invalid-check-2", 1885 Type: "tcp", 1886 PortLabel: "80", 1887 Interval: time.Second, 1888 Timeout: time.Second, 1889 }, 1890 }, 1891 }, 1892 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1893 }, 1894 { 1895 Service: &Service{ 1896 Name: "http-driver-fail-3", 1897 PortLabel: "80", 1898 AddressMode: "driver", 1899 Checks: []*ServiceCheck{ 1900 { 1901 Name: "invalid-check-3", 1902 Type: "tcp", 1903 PortLabel: "missing-port-label", 1904 Interval: time.Second, 1905 Timeout: time.Second, 1906 }, 1907 }, 1908 }, 1909 ErrContains: `port label "missing-port-label" referenced`, 1910 }, 1911 { 1912 Service: &Service{ 1913 Name: "http-driver-passes", 1914 PortLabel: "80", 1915 AddressMode: "driver", 1916 Checks: []*ServiceCheck{ 1917 { 1918 Name: "valid-script-check", 1919 Type: "script", 1920 Command: "ok", 1921 Interval: time.Second, 1922 Timeout: time.Second, 1923 }, 1924 { 1925 Name: "valid-host-check", 1926 Type: "tcp", 1927 PortLabel: "http", 1928 Interval: time.Second, 1929 Timeout: time.Second, 1930 }, 1931 { 1932 Name: "valid-driver-check", 1933 Type: "tcp", 1934 AddressMode: "driver", 1935 Interval: time.Second, 1936 Timeout: time.Second, 1937 }, 1938 }, 1939 }, 1940 }, 1941 { 1942 Service: &Service{ 1943 Name: "empty-address-3673-passes-1", 1944 Checks: []*ServiceCheck{ 1945 { 1946 Name: "valid-port-label", 1947 Type: "tcp", 1948 PortLabel: "http", 1949 Interval: time.Second, 1950 Timeout: time.Second, 1951 }, 1952 { 1953 Name: "empty-is-ok", 1954 Type: "script", 1955 Command: "ok", 1956 Interval: time.Second, 1957 Timeout: time.Second, 1958 }, 1959 }, 1960 }, 1961 }, 1962 { 1963 Service: &Service{ 1964 Name: "empty-address-3673-passes-2", 1965 }, 1966 }, 1967 { 1968 Service: &Service{ 1969 Name: "empty-address-3673-fails", 1970 Checks: []*ServiceCheck{ 1971 { 1972 Name: "empty-is-not-ok", 1973 Type: "tcp", 1974 Interval: time.Second, 1975 Timeout: time.Second, 1976 }, 1977 }, 1978 }, 1979 ErrContains: `invalid: check requires a port but neither check nor service`, 1980 }, 1981 { 1982 Service: &Service{ 1983 Name: "conect-block-on-task-level", 1984 Connect: &ConsulConnect{SidecarService: &ConsulSidecarService{}}, 1985 }, 1986 ErrContains: `cannot have "connect" block`, 1987 }, 1988 } 1989 1990 for _, tc := range cases { 1991 tc := tc 1992 task, tg := getTask(tc.Service) 1993 t.Run(tc.Service.Name, func(t *testing.T) { 1994 err := validateServices(task, tg.Networks) 1995 if err == nil && tc.ErrContains == "" { 1996 // Ok! 1997 return 1998 } 1999 if err == nil { 2000 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 2001 } 2002 if !strings.Contains(err.Error(), tc.ErrContains) { 2003 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 2004 } 2005 }) 2006 } 2007 } 2008 2009 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 2010 t.Parallel() 2011 // Bad (no port) 2012 invalidGRPC := &ServiceCheck{ 2013 Type: ServiceCheckGRPC, 2014 Interval: time.Second, 2015 Timeout: time.Second, 2016 } 2017 service := &Service{ 2018 Name: "test", 2019 Checks: []*ServiceCheck{invalidGRPC}, 2020 } 2021 2022 assert.Error(t, service.Validate()) 2023 2024 // Good 2025 service.Checks[0] = &ServiceCheck{ 2026 Type: ServiceCheckGRPC, 2027 Interval: time.Second, 2028 Timeout: time.Second, 2029 PortLabel: "some-port-label", 2030 } 2031 2032 assert.NoError(t, service.Validate()) 2033 } 2034 2035 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 2036 t.Parallel() 2037 invalidCheckRestart := &CheckRestart{ 2038 Limit: -1, 2039 Grace: -1, 2040 } 2041 2042 err := invalidCheckRestart.Validate() 2043 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 2044 assert.Len(t, err.(*multierror.Error).Errors, 2) 2045 2046 validCheckRestart := &CheckRestart{} 2047 assert.Nil(t, validCheckRestart.Validate()) 2048 2049 validCheckRestart.Limit = 1 2050 validCheckRestart.Grace = 1 2051 assert.Nil(t, validCheckRestart.Validate()) 2052 } 2053 2054 func TestTask_Validate_ConnectProxyKind(t *testing.T) { 2055 ephemeralDisk := DefaultEphemeralDisk() 2056 getTask := func(kind TaskKind, leader bool) *Task { 2057 task := &Task{ 2058 Name: "web", 2059 Driver: "docker", 2060 Resources: DefaultResources(), 2061 LogConfig: DefaultLogConfig(), 2062 Kind: kind, 2063 Leader: leader, 2064 } 2065 task.Resources.Networks = []*NetworkResource{ 2066 { 2067 MBits: 10, 2068 DynamicPorts: []Port{ 2069 { 2070 Label: "http", 2071 Value: 80, 2072 }, 2073 }, 2074 }, 2075 } 2076 return task 2077 } 2078 2079 cases := []struct { 2080 Desc string 2081 Kind TaskKind 2082 Leader bool 2083 Service *Service 2084 TgService []*Service 2085 ErrContains string 2086 }{ 2087 { 2088 Desc: "Not connect", 2089 Kind: "test", 2090 }, 2091 { 2092 Desc: "Invalid because of service in task definition", 2093 Kind: "connect-proxy:redis", 2094 Service: &Service{ 2095 Name: "redis", 2096 }, 2097 ErrContains: "Connect proxy task must not have a service stanza", 2098 }, 2099 { 2100 Desc: "Leader should not be set", 2101 Kind: "connect-proxy:redis", 2102 Leader: true, 2103 Service: &Service{ 2104 Name: "redis", 2105 }, 2106 ErrContains: "Connect proxy task must not have leader set", 2107 }, 2108 { 2109 Desc: "Service name invalid", 2110 Kind: "connect-proxy:redis:test", 2111 Service: &Service{ 2112 Name: "redis", 2113 }, 2114 ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`, 2115 }, 2116 { 2117 Desc: "Service name not found in group", 2118 Kind: "connect-proxy:redis", 2119 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 2120 }, 2121 { 2122 Desc: "Connect stanza not configured in group", 2123 Kind: "connect-proxy:redis", 2124 TgService: []*Service{{ 2125 Name: "redis", 2126 }}, 2127 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 2128 }, 2129 { 2130 Desc: "Valid connect proxy kind", 2131 Kind: "connect-proxy:redis", 2132 TgService: []*Service{{ 2133 Name: "redis", 2134 Connect: &ConsulConnect{ 2135 SidecarService: &ConsulSidecarService{ 2136 Port: "db", 2137 }, 2138 }, 2139 }}, 2140 }, 2141 } 2142 2143 for _, tc := range cases { 2144 tc := tc 2145 task := getTask(tc.Kind, tc.Leader) 2146 if tc.Service != nil { 2147 task.Services = []*Service{tc.Service} 2148 } 2149 t.Run(tc.Desc, func(t *testing.T) { 2150 err := task.Validate(ephemeralDisk, "service", tc.TgService, nil) 2151 if err == nil && tc.ErrContains == "" { 2152 // Ok! 2153 return 2154 } 2155 require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains) 2156 require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err) 2157 }) 2158 } 2159 2160 } 2161 func TestTask_Validate_LogConfig(t *testing.T) { 2162 task := &Task{ 2163 LogConfig: DefaultLogConfig(), 2164 } 2165 ephemeralDisk := &EphemeralDisk{ 2166 SizeMB: 1, 2167 } 2168 2169 err := task.Validate(ephemeralDisk, JobTypeService, nil, nil) 2170 require.Error(t, err, "log storage") 2171 } 2172 2173 func TestLogConfig_Equals(t *testing.T) { 2174 t.Run("both nil", func(t *testing.T) { 2175 a := (*LogConfig)(nil) 2176 b := (*LogConfig)(nil) 2177 require.True(t, a.Equals(b)) 2178 }) 2179 2180 t.Run("one nil", func(t *testing.T) { 2181 a := new(LogConfig) 2182 b := (*LogConfig)(nil) 2183 require.False(t, a.Equals(b)) 2184 }) 2185 2186 t.Run("max files", func(t *testing.T) { 2187 a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200} 2188 b := &LogConfig{MaxFiles: 2, MaxFileSizeMB: 200} 2189 require.False(t, a.Equals(b)) 2190 }) 2191 2192 t.Run("max file size", func(t *testing.T) { 2193 a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 100} 2194 b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200} 2195 require.False(t, a.Equals(b)) 2196 }) 2197 2198 t.Run("same", func(t *testing.T) { 2199 a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200} 2200 b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200} 2201 require.True(t, a.Equals(b)) 2202 }) 2203 } 2204 2205 func TestTask_Validate_CSIPluginConfig(t *testing.T) { 2206 table := []struct { 2207 name string 2208 pc *TaskCSIPluginConfig 2209 expectedErr string 2210 unexpectedErr string 2211 }{ 2212 { 2213 name: "no errors when not specified", 2214 pc: nil, 2215 unexpectedErr: "CSIPluginConfig", 2216 }, 2217 { 2218 name: "requires non-empty plugin id", 2219 pc: &TaskCSIPluginConfig{}, 2220 expectedErr: "CSIPluginConfig must have a non-empty PluginID", 2221 }, 2222 { 2223 name: "requires valid plugin type", 2224 pc: &TaskCSIPluginConfig{ 2225 ID: "com.hashicorp.csi", 2226 Type: "nonsense", 2227 }, 2228 expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"", 2229 }, 2230 } 2231 2232 for _, tt := range table { 2233 t.Run(tt.name, func(t *testing.T) { 2234 task := testJob().TaskGroups[0].Tasks[0] 2235 task.CSIPluginConfig = tt.pc 2236 ephemeralDisk := &EphemeralDisk{ 2237 SizeMB: 100, 2238 } 2239 2240 err := task.Validate(ephemeralDisk, JobTypeService, nil, nil) 2241 if tt.expectedErr != "" { 2242 require.Error(t, err) 2243 require.Contains(t, err.Error(), tt.expectedErr) 2244 } else { 2245 require.NoError(t, err) 2246 } 2247 }) 2248 } 2249 } 2250 2251 func TestTask_Validate_Template(t *testing.T) { 2252 2253 bad := &Template{} 2254 task := &Task{ 2255 Templates: []*Template{bad}, 2256 } 2257 ephemeralDisk := &EphemeralDisk{ 2258 SizeMB: 1, 2259 } 2260 2261 err := task.Validate(ephemeralDisk, JobTypeService, nil, nil) 2262 if !strings.Contains(err.Error(), "Template 1 validation failed") { 2263 t.Fatalf("err: %s", err) 2264 } 2265 2266 // Have two templates that share the same destination 2267 good := &Template{ 2268 SourcePath: "foo", 2269 DestPath: "local/foo", 2270 ChangeMode: "noop", 2271 } 2272 2273 task.Templates = []*Template{good, good} 2274 err = task.Validate(ephemeralDisk, JobTypeService, nil, nil) 2275 if !strings.Contains(err.Error(), "same destination as") { 2276 t.Fatalf("err: %s", err) 2277 } 2278 2279 // Env templates can't use signals 2280 task.Templates = []*Template{ 2281 { 2282 Envvars: true, 2283 ChangeMode: "signal", 2284 }, 2285 } 2286 2287 err = task.Validate(ephemeralDisk, JobTypeService, nil, nil) 2288 if err == nil { 2289 t.Fatalf("expected error from Template.Validate") 2290 } 2291 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 2292 t.Errorf("expected to find %q but found %v", expected, err) 2293 } 2294 } 2295 2296 func TestTemplate_Validate(t *testing.T) { 2297 cases := []struct { 2298 Tmpl *Template 2299 Fail bool 2300 ContainsErrs []string 2301 }{ 2302 { 2303 Tmpl: &Template{}, 2304 Fail: true, 2305 ContainsErrs: []string{ 2306 "specify a source path", 2307 "specify a destination", 2308 TemplateChangeModeInvalidError.Error(), 2309 }, 2310 }, 2311 { 2312 Tmpl: &Template{ 2313 Splay: -100, 2314 }, 2315 Fail: true, 2316 ContainsErrs: []string{ 2317 "positive splay", 2318 }, 2319 }, 2320 { 2321 Tmpl: &Template{ 2322 ChangeMode: "foo", 2323 }, 2324 Fail: true, 2325 ContainsErrs: []string{ 2326 TemplateChangeModeInvalidError.Error(), 2327 }, 2328 }, 2329 { 2330 Tmpl: &Template{ 2331 ChangeMode: "signal", 2332 }, 2333 Fail: true, 2334 ContainsErrs: []string{ 2335 "specify signal value", 2336 }, 2337 }, 2338 { 2339 Tmpl: &Template{ 2340 SourcePath: "foo", 2341 DestPath: "../../root", 2342 ChangeMode: "noop", 2343 }, 2344 Fail: true, 2345 ContainsErrs: []string{ 2346 "destination escapes", 2347 }, 2348 }, 2349 { 2350 Tmpl: &Template{ 2351 SourcePath: "foo", 2352 DestPath: "local/foo", 2353 ChangeMode: "noop", 2354 }, 2355 Fail: false, 2356 }, 2357 { 2358 Tmpl: &Template{ 2359 SourcePath: "foo", 2360 DestPath: "local/foo", 2361 ChangeMode: "noop", 2362 Perms: "0444", 2363 }, 2364 Fail: false, 2365 }, 2366 { 2367 Tmpl: &Template{ 2368 SourcePath: "foo", 2369 DestPath: "local/foo", 2370 ChangeMode: "noop", 2371 Perms: "zza", 2372 }, 2373 Fail: true, 2374 ContainsErrs: []string{ 2375 "as octal", 2376 }, 2377 }, 2378 } 2379 2380 for i, c := range cases { 2381 err := c.Tmpl.Validate() 2382 if err != nil { 2383 if !c.Fail { 2384 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 2385 } 2386 2387 e := err.Error() 2388 for _, exp := range c.ContainsErrs { 2389 if !strings.Contains(e, exp) { 2390 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 2391 } 2392 } 2393 } else if c.Fail { 2394 t.Fatalf("Case %d: should have failed: %v", i+1, err) 2395 } 2396 } 2397 } 2398 2399 func TestConstraint_Validate(t *testing.T) { 2400 c := &Constraint{} 2401 err := c.Validate() 2402 require.Error(t, err, "Missing constraint operand") 2403 2404 c = &Constraint{ 2405 LTarget: "$attr.kernel.name", 2406 RTarget: "linux", 2407 Operand: "=", 2408 } 2409 err = c.Validate() 2410 require.NoError(t, err) 2411 2412 // Perform additional regexp validation 2413 c.Operand = ConstraintRegex 2414 c.RTarget = "(foo" 2415 err = c.Validate() 2416 require.Error(t, err, "missing closing") 2417 2418 // Perform version validation 2419 c.Operand = ConstraintVersion 2420 c.RTarget = "~> foo" 2421 err = c.Validate() 2422 require.Error(t, err, "Malformed constraint") 2423 2424 // Perform semver validation 2425 c.Operand = ConstraintSemver 2426 err = c.Validate() 2427 require.Error(t, err, "Malformed constraint") 2428 2429 c.RTarget = ">= 0.6.1" 2430 require.NoError(t, c.Validate()) 2431 2432 // Perform distinct_property validation 2433 c.Operand = ConstraintDistinctProperty 2434 c.RTarget = "0" 2435 err = c.Validate() 2436 require.Error(t, err, "count of 1 or greater") 2437 2438 c.RTarget = "-1" 2439 err = c.Validate() 2440 require.Error(t, err, "to uint64") 2441 2442 // Perform distinct_hosts validation 2443 c.Operand = ConstraintDistinctHosts 2444 c.LTarget = "" 2445 c.RTarget = "" 2446 if err := c.Validate(); err != nil { 2447 t.Fatalf("expected valid constraint: %v", err) 2448 } 2449 2450 // Perform set_contains* validation 2451 c.RTarget = "" 2452 for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} { 2453 c.Operand = o 2454 err = c.Validate() 2455 require.Error(t, err, "requires an RTarget") 2456 } 2457 2458 // Perform LTarget validation 2459 c.Operand = ConstraintRegex 2460 c.RTarget = "foo" 2461 c.LTarget = "" 2462 err = c.Validate() 2463 require.Error(t, err, "No LTarget") 2464 2465 // Perform constraint type validation 2466 c.Operand = "foo" 2467 err = c.Validate() 2468 require.Error(t, err, "Unknown constraint type") 2469 } 2470 2471 func TestAffinity_Validate(t *testing.T) { 2472 2473 type tc struct { 2474 affinity *Affinity 2475 err error 2476 name string 2477 } 2478 2479 testCases := []tc{ 2480 { 2481 affinity: &Affinity{}, 2482 err: fmt.Errorf("Missing affinity operand"), 2483 }, 2484 { 2485 affinity: &Affinity{ 2486 Operand: "foo", 2487 LTarget: "${meta.node_class}", 2488 Weight: 10, 2489 }, 2490 err: fmt.Errorf("Unknown affinity operator \"foo\""), 2491 }, 2492 { 2493 affinity: &Affinity{ 2494 Operand: "=", 2495 LTarget: "${meta.node_class}", 2496 Weight: 10, 2497 }, 2498 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2499 }, 2500 { 2501 affinity: &Affinity{ 2502 Operand: "=", 2503 LTarget: "${meta.node_class}", 2504 RTarget: "c4", 2505 Weight: 0, 2506 }, 2507 err: fmt.Errorf("Affinity weight cannot be zero"), 2508 }, 2509 { 2510 affinity: &Affinity{ 2511 Operand: "=", 2512 LTarget: "${meta.node_class}", 2513 RTarget: "c4", 2514 Weight: 110, 2515 }, 2516 err: fmt.Errorf("Affinity weight must be within the range [-100,100]"), 2517 }, 2518 { 2519 affinity: &Affinity{ 2520 Operand: "=", 2521 LTarget: "${node.class}", 2522 Weight: 10, 2523 }, 2524 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 2525 }, 2526 { 2527 affinity: &Affinity{ 2528 Operand: "version", 2529 LTarget: "${meta.os}", 2530 RTarget: ">>2.0", 2531 Weight: 110, 2532 }, 2533 err: fmt.Errorf("Version affinity is invalid"), 2534 }, 2535 { 2536 affinity: &Affinity{ 2537 Operand: "regexp", 2538 LTarget: "${meta.os}", 2539 RTarget: "\\K2.0", 2540 Weight: 100, 2541 }, 2542 err: fmt.Errorf("Regular expression failed to compile"), 2543 }, 2544 } 2545 2546 for _, tc := range testCases { 2547 t.Run(tc.name, func(t *testing.T) { 2548 err := tc.affinity.Validate() 2549 if tc.err != nil { 2550 require.NotNil(t, err) 2551 require.Contains(t, err.Error(), tc.err.Error()) 2552 } else { 2553 require.Nil(t, err) 2554 } 2555 }) 2556 } 2557 } 2558 2559 func TestUpdateStrategy_Validate(t *testing.T) { 2560 u := &UpdateStrategy{ 2561 MaxParallel: -1, 2562 HealthCheck: "foo", 2563 MinHealthyTime: -10, 2564 HealthyDeadline: -15, 2565 ProgressDeadline: -25, 2566 AutoRevert: false, 2567 Canary: -1, 2568 } 2569 2570 err := u.Validate() 2571 requireErrors(t, err, 2572 "Invalid health check given", 2573 "Max parallel can not be less than zero", 2574 "Canary count can not be less than zero", 2575 "Minimum healthy time may not be less than zero", 2576 "Healthy deadline must be greater than zero", 2577 "Progress deadline must be zero or greater", 2578 "Minimum healthy time must be less than healthy deadline", 2579 "Healthy deadline must be less than progress deadline", 2580 ) 2581 } 2582 2583 func TestResource_NetIndex(t *testing.T) { 2584 r := &Resources{ 2585 Networks: []*NetworkResource{ 2586 {Device: "eth0"}, 2587 {Device: "lo0"}, 2588 {Device: ""}, 2589 }, 2590 } 2591 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 2592 t.Fatalf("Bad: %d", idx) 2593 } 2594 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 2595 t.Fatalf("Bad: %d", idx) 2596 } 2597 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 2598 t.Fatalf("Bad: %d", idx) 2599 } 2600 } 2601 2602 func TestResource_Superset(t *testing.T) { 2603 r1 := &Resources{ 2604 CPU: 2000, 2605 MemoryMB: 2048, 2606 DiskMB: 10000, 2607 } 2608 r2 := &Resources{ 2609 CPU: 2000, 2610 MemoryMB: 1024, 2611 DiskMB: 5000, 2612 } 2613 2614 if s, _ := r1.Superset(r1); !s { 2615 t.Fatalf("bad") 2616 } 2617 if s, _ := r1.Superset(r2); !s { 2618 t.Fatalf("bad") 2619 } 2620 if s, _ := r2.Superset(r1); s { 2621 t.Fatalf("bad") 2622 } 2623 if s, _ := r2.Superset(r2); !s { 2624 t.Fatalf("bad") 2625 } 2626 } 2627 2628 func TestResource_Add(t *testing.T) { 2629 r1 := &Resources{ 2630 CPU: 2000, 2631 MemoryMB: 2048, 2632 DiskMB: 10000, 2633 Networks: []*NetworkResource{ 2634 { 2635 CIDR: "10.0.0.0/8", 2636 MBits: 100, 2637 ReservedPorts: []Port{{"ssh", 22, 0, ""}}, 2638 }, 2639 }, 2640 } 2641 r2 := &Resources{ 2642 CPU: 2000, 2643 MemoryMB: 1024, 2644 DiskMB: 5000, 2645 Networks: []*NetworkResource{ 2646 { 2647 IP: "10.0.0.1", 2648 MBits: 50, 2649 ReservedPorts: []Port{{"web", 80, 0, ""}}, 2650 }, 2651 }, 2652 } 2653 2654 r1.Add(r2) 2655 2656 expect := &Resources{ 2657 CPU: 3000, 2658 MemoryMB: 3072, 2659 DiskMB: 15000, 2660 Networks: []*NetworkResource{ 2661 { 2662 CIDR: "10.0.0.0/8", 2663 MBits: 150, 2664 ReservedPorts: []Port{{"ssh", 22, 0, ""}, {"web", 80, 0, ""}}, 2665 }, 2666 }, 2667 } 2668 2669 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2670 t.Fatalf("bad: %#v %#v", expect, r1) 2671 } 2672 } 2673 2674 func TestResource_Add_Network(t *testing.T) { 2675 r1 := &Resources{} 2676 r2 := &Resources{ 2677 Networks: []*NetworkResource{ 2678 { 2679 MBits: 50, 2680 DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}}, 2681 }, 2682 }, 2683 } 2684 r3 := &Resources{ 2685 Networks: []*NetworkResource{ 2686 { 2687 MBits: 25, 2688 DynamicPorts: []Port{{"admin", 0, 8080, ""}}, 2689 }, 2690 }, 2691 } 2692 2693 r1.Add(r2) 2694 r1.Add(r3) 2695 2696 expect := &Resources{ 2697 Networks: []*NetworkResource{ 2698 { 2699 MBits: 75, 2700 DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}, {"admin", 0, 8080, ""}}, 2701 }, 2702 }, 2703 } 2704 2705 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2706 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 2707 } 2708 } 2709 2710 func TestComparableResources_Subtract(t *testing.T) { 2711 r1 := &ComparableResources{ 2712 Flattened: AllocatedTaskResources{ 2713 Cpu: AllocatedCpuResources{ 2714 CpuShares: 2000, 2715 }, 2716 Memory: AllocatedMemoryResources{ 2717 MemoryMB: 2048, 2718 }, 2719 Networks: []*NetworkResource{ 2720 { 2721 CIDR: "10.0.0.0/8", 2722 MBits: 100, 2723 ReservedPorts: []Port{{"ssh", 22, 0, ""}}, 2724 }, 2725 }, 2726 }, 2727 Shared: AllocatedSharedResources{ 2728 DiskMB: 10000, 2729 }, 2730 } 2731 2732 r2 := &ComparableResources{ 2733 Flattened: AllocatedTaskResources{ 2734 Cpu: AllocatedCpuResources{ 2735 CpuShares: 1000, 2736 }, 2737 Memory: AllocatedMemoryResources{ 2738 MemoryMB: 1024, 2739 }, 2740 Networks: []*NetworkResource{ 2741 { 2742 CIDR: "10.0.0.0/8", 2743 MBits: 20, 2744 ReservedPorts: []Port{{"ssh", 22, 0, ""}}, 2745 }, 2746 }, 2747 }, 2748 Shared: AllocatedSharedResources{ 2749 DiskMB: 5000, 2750 }, 2751 } 2752 r1.Subtract(r2) 2753 2754 expect := &ComparableResources{ 2755 Flattened: AllocatedTaskResources{ 2756 Cpu: AllocatedCpuResources{ 2757 CpuShares: 1000, 2758 }, 2759 Memory: AllocatedMemoryResources{ 2760 MemoryMB: 1024, 2761 }, 2762 Networks: []*NetworkResource{ 2763 { 2764 CIDR: "10.0.0.0/8", 2765 MBits: 100, 2766 ReservedPorts: []Port{{"ssh", 22, 0, ""}}, 2767 }, 2768 }, 2769 }, 2770 Shared: AllocatedSharedResources{ 2771 DiskMB: 5000, 2772 }, 2773 } 2774 2775 require := require.New(t) 2776 require.Equal(expect, r1) 2777 } 2778 2779 func TestEncodeDecode(t *testing.T) { 2780 type FooRequest struct { 2781 Foo string 2782 Bar int 2783 Baz bool 2784 } 2785 arg := &FooRequest{ 2786 Foo: "test", 2787 Bar: 42, 2788 Baz: true, 2789 } 2790 buf, err := Encode(1, arg) 2791 if err != nil { 2792 t.Fatalf("err: %v", err) 2793 } 2794 2795 var out FooRequest 2796 err = Decode(buf[1:], &out) 2797 if err != nil { 2798 t.Fatalf("err: %v", err) 2799 } 2800 2801 if !reflect.DeepEqual(arg, &out) { 2802 t.Fatalf("bad: %#v %#v", arg, out) 2803 } 2804 } 2805 2806 func BenchmarkEncodeDecode(b *testing.B) { 2807 job := testJob() 2808 2809 for i := 0; i < b.N; i++ { 2810 buf, err := Encode(1, job) 2811 if err != nil { 2812 b.Fatalf("err: %v", err) 2813 } 2814 2815 var out Job 2816 err = Decode(buf[1:], &out) 2817 if err != nil { 2818 b.Fatalf("err: %v", err) 2819 } 2820 } 2821 } 2822 2823 func TestInvalidServiceCheck(t *testing.T) { 2824 s := Service{ 2825 Name: "service-name", 2826 PortLabel: "bar", 2827 Checks: []*ServiceCheck{ 2828 { 2829 Name: "check-name", 2830 Type: "lol", 2831 }, 2832 }, 2833 } 2834 if err := s.Validate(); err == nil { 2835 t.Fatalf("Service should be invalid (invalid type)") 2836 } 2837 2838 s = Service{ 2839 Name: "service.name", 2840 PortLabel: "bar", 2841 } 2842 if err := s.ValidateName(s.Name); err == nil { 2843 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2844 } 2845 2846 s = Service{ 2847 Name: "-my-service", 2848 PortLabel: "bar", 2849 } 2850 if err := s.Validate(); err == nil { 2851 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2852 } 2853 2854 s = Service{ 2855 Name: "my-service-${NOMAD_META_FOO}", 2856 PortLabel: "bar", 2857 } 2858 if err := s.Validate(); err != nil { 2859 t.Fatalf("Service should be valid: %v", err) 2860 } 2861 2862 s = Service{ 2863 Name: "my_service-${NOMAD_META_FOO}", 2864 PortLabel: "bar", 2865 } 2866 if err := s.Validate(); err == nil { 2867 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2868 } 2869 2870 s = Service{ 2871 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2872 PortLabel: "bar", 2873 } 2874 if err := s.ValidateName(s.Name); err == nil { 2875 t.Fatalf("Service should be invalid (too long): %v", err) 2876 } 2877 2878 s = Service{ 2879 Name: "service-name", 2880 Checks: []*ServiceCheck{ 2881 { 2882 Name: "check-tcp", 2883 Type: ServiceCheckTCP, 2884 Interval: 5 * time.Second, 2885 Timeout: 2 * time.Second, 2886 }, 2887 { 2888 Name: "check-http", 2889 Type: ServiceCheckHTTP, 2890 Path: "/foo", 2891 Interval: 5 * time.Second, 2892 Timeout: 2 * time.Second, 2893 }, 2894 }, 2895 } 2896 if err := s.Validate(); err == nil { 2897 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2898 } 2899 2900 s = Service{ 2901 Name: "service-name", 2902 Checks: []*ServiceCheck{ 2903 { 2904 Name: "check-script", 2905 Type: ServiceCheckScript, 2906 Command: "/bin/date", 2907 Interval: 5 * time.Second, 2908 Timeout: 2 * time.Second, 2909 }, 2910 }, 2911 } 2912 if err := s.Validate(); err != nil { 2913 t.Fatalf("un-expected error: %v", err) 2914 } 2915 2916 s = Service{ 2917 Name: "service-name", 2918 Checks: []*ServiceCheck{ 2919 { 2920 Name: "tcp-check", 2921 Type: ServiceCheckTCP, 2922 Interval: 5 * time.Second, 2923 Timeout: 2 * time.Second, 2924 }, 2925 }, 2926 Connect: &ConsulConnect{ 2927 SidecarService: &ConsulSidecarService{}, 2928 }, 2929 } 2930 require.Error(t, s.Validate()) 2931 } 2932 2933 func TestDistinctCheckID(t *testing.T) { 2934 c1 := ServiceCheck{ 2935 Name: "web-health", 2936 Type: "http", 2937 Path: "/health", 2938 Interval: 2 * time.Second, 2939 Timeout: 3 * time.Second, 2940 } 2941 c2 := ServiceCheck{ 2942 Name: "web-health", 2943 Type: "http", 2944 Path: "/health1", 2945 Interval: 2 * time.Second, 2946 Timeout: 3 * time.Second, 2947 } 2948 2949 c3 := ServiceCheck{ 2950 Name: "web-health", 2951 Type: "http", 2952 Path: "/health", 2953 Interval: 4 * time.Second, 2954 Timeout: 3 * time.Second, 2955 } 2956 serviceID := "123" 2957 c1Hash := c1.Hash(serviceID) 2958 c2Hash := c2.Hash(serviceID) 2959 c3Hash := c3.Hash(serviceID) 2960 2961 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2962 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2963 } 2964 2965 } 2966 2967 func TestService_Canonicalize(t *testing.T) { 2968 job := "example" 2969 taskGroup := "cache" 2970 task := "redis" 2971 2972 s := Service{ 2973 Name: "${TASK}-db", 2974 } 2975 2976 s.Canonicalize(job, taskGroup, task) 2977 if s.Name != "redis-db" { 2978 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2979 } 2980 2981 s.Name = "db" 2982 s.Canonicalize(job, taskGroup, task) 2983 if s.Name != "db" { 2984 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2985 } 2986 2987 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2988 s.Canonicalize(job, taskGroup, task) 2989 if s.Name != "example-cache-redis-db" { 2990 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2991 } 2992 2993 s.Name = "${BASE}-db" 2994 s.Canonicalize(job, taskGroup, task) 2995 if s.Name != "example-cache-redis-db" { 2996 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2997 } 2998 2999 } 3000 3001 func TestService_Validate(t *testing.T) { 3002 s := Service{ 3003 Name: "testservice", 3004 } 3005 3006 s.Canonicalize("testjob", "testgroup", "testtask") 3007 3008 // Base service should be valid 3009 require.NoError(t, s.Validate()) 3010 3011 // Native Connect requires task name on service 3012 s.Connect = &ConsulConnect{ 3013 Native: true, 3014 } 3015 require.Error(t, s.Validate()) 3016 3017 // Native Connect should work with task name on service set 3018 s.TaskName = "testtask" 3019 require.NoError(t, s.Validate()) 3020 3021 // Native Connect + Sidecar should be invalid 3022 s.Connect.SidecarService = &ConsulSidecarService{} 3023 require.Error(t, s.Validate()) 3024 } 3025 3026 func TestService_Equals(t *testing.T) { 3027 s := Service{ 3028 Name: "testservice", 3029 } 3030 3031 s.Canonicalize("testjob", "testgroup", "testtask") 3032 3033 o := s.Copy() 3034 3035 // Base service should be equal to copy of itself 3036 require.True(t, s.Equals(o)) 3037 3038 // create a helper to assert a diff and reset the struct 3039 assertDiff := func() { 3040 require.False(t, s.Equals(o)) 3041 o = s.Copy() 3042 require.True(t, s.Equals(o), "bug in copy") 3043 } 3044 3045 // Changing any field should cause inequality 3046 o.Name = "diff" 3047 assertDiff() 3048 3049 o.PortLabel = "diff" 3050 assertDiff() 3051 3052 o.AddressMode = AddressModeDriver 3053 assertDiff() 3054 3055 o.Tags = []string{"diff"} 3056 assertDiff() 3057 3058 o.CanaryTags = []string{"diff"} 3059 assertDiff() 3060 3061 o.Checks = []*ServiceCheck{{Name: "diff"}} 3062 assertDiff() 3063 3064 o.Connect = &ConsulConnect{Native: true} 3065 assertDiff() 3066 3067 o.EnableTagOverride = true 3068 assertDiff() 3069 } 3070 3071 func TestJob_ExpandServiceNames(t *testing.T) { 3072 j := &Job{ 3073 Name: "my-job", 3074 TaskGroups: []*TaskGroup{ 3075 { 3076 Name: "web", 3077 Tasks: []*Task{ 3078 { 3079 Name: "frontend", 3080 Services: []*Service{ 3081 { 3082 Name: "${BASE}-default", 3083 }, 3084 { 3085 Name: "jmx", 3086 }, 3087 }, 3088 }, 3089 }, 3090 }, 3091 { 3092 Name: "admin", 3093 Tasks: []*Task{ 3094 { 3095 Name: "admin-web", 3096 }, 3097 }, 3098 }, 3099 }, 3100 } 3101 3102 j.Canonicalize() 3103 3104 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 3105 if service1Name != "my-job-web-frontend-default" { 3106 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 3107 } 3108 3109 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 3110 if service2Name != "jmx" { 3111 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 3112 } 3113 3114 } 3115 3116 func TestJob_CombinedTaskMeta(t *testing.T) { 3117 j := &Job{ 3118 Meta: map[string]string{ 3119 "job_test": "job", 3120 "group_test": "job", 3121 "task_test": "job", 3122 }, 3123 TaskGroups: []*TaskGroup{ 3124 { 3125 Name: "group", 3126 Meta: map[string]string{ 3127 "group_test": "group", 3128 "task_test": "group", 3129 }, 3130 Tasks: []*Task{ 3131 { 3132 Name: "task", 3133 Meta: map[string]string{ 3134 "task_test": "task", 3135 }, 3136 }, 3137 }, 3138 }, 3139 }, 3140 } 3141 3142 require := require.New(t) 3143 require.EqualValues(map[string]string{ 3144 "job_test": "job", 3145 "group_test": "group", 3146 "task_test": "task", 3147 }, j.CombinedTaskMeta("group", "task")) 3148 require.EqualValues(map[string]string{ 3149 "job_test": "job", 3150 "group_test": "group", 3151 "task_test": "group", 3152 }, j.CombinedTaskMeta("group", "")) 3153 require.EqualValues(map[string]string{ 3154 "job_test": "job", 3155 "group_test": "job", 3156 "task_test": "job", 3157 }, j.CombinedTaskMeta("", "task")) 3158 3159 } 3160 3161 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 3162 // Create a config that is enabled but with no interval specified. 3163 p := &PeriodicConfig{Enabled: true} 3164 if err := p.Validate(); err == nil { 3165 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 3166 } 3167 3168 // Create a config that is enabled, with a spec but no type specified. 3169 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 3170 if err := p.Validate(); err == nil { 3171 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 3172 } 3173 3174 // Create a config that is enabled, with a spec type but no spec specified. 3175 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 3176 if err := p.Validate(); err == nil { 3177 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 3178 } 3179 3180 // Create a config that is enabled, with a bad time zone. 3181 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 3182 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 3183 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 3184 } 3185 } 3186 3187 func TestPeriodicConfig_InvalidCron(t *testing.T) { 3188 specs := []string{"foo", "* *", "@foo"} 3189 for _, spec := range specs { 3190 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 3191 p.Canonicalize() 3192 if err := p.Validate(); err == nil { 3193 t.Fatal("Invalid cron spec") 3194 } 3195 } 3196 } 3197 3198 func TestPeriodicConfig_ValidCron(t *testing.T) { 3199 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 3200 for _, spec := range specs { 3201 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 3202 p.Canonicalize() 3203 if err := p.Validate(); err != nil { 3204 t.Fatal("Passed valid cron") 3205 } 3206 } 3207 } 3208 3209 func TestPeriodicConfig_NextCron(t *testing.T) { 3210 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 3211 3212 cases := []struct { 3213 spec string 3214 nextTime time.Time 3215 errorMsg string 3216 }{ 3217 { 3218 spec: "0 0 29 2 * 1980", 3219 nextTime: time.Time{}, 3220 }, 3221 { 3222 spec: "*/5 * * * *", 3223 nextTime: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 3224 }, 3225 { 3226 spec: "1 15-0 *", 3227 nextTime: time.Time{}, 3228 errorMsg: "failed parsing cron expression", 3229 }, 3230 } 3231 3232 for i, c := range cases { 3233 t.Run(fmt.Sprintf("case: %d: %s", i, c.spec), func(t *testing.T) { 3234 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: c.spec} 3235 p.Canonicalize() 3236 n, err := p.Next(from) 3237 3238 require.Equal(t, c.nextTime, n) 3239 if c.errorMsg == "" { 3240 require.NoError(t, err) 3241 } else { 3242 require.Error(t, err) 3243 require.Contains(t, err.Error(), c.errorMsg) 3244 } 3245 }) 3246 } 3247 } 3248 3249 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 3250 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 3251 for _, zone := range zones { 3252 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 3253 p.Canonicalize() 3254 if err := p.Validate(); err != nil { 3255 t.Fatalf("Valid tz errored: %v", err) 3256 } 3257 } 3258 } 3259 3260 func TestPeriodicConfig_DST(t *testing.T) { 3261 require := require.New(t) 3262 3263 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 3264 p := &PeriodicConfig{ 3265 Enabled: true, 3266 SpecType: PeriodicSpecCron, 3267 Spec: "0 2 11-13 3 * 2017", 3268 TimeZone: "America/Los_Angeles", 3269 } 3270 p.Canonicalize() 3271 3272 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 3273 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 3274 3275 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 3276 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 3277 e2 := time.Date(2017, time.March, 13, 9, 0, 0, 0, time.UTC) 3278 3279 n1, err := p.Next(t1) 3280 require.Nil(err) 3281 3282 n2, err := p.Next(t2) 3283 require.Nil(err) 3284 3285 require.Equal(e1, n1.UTC()) 3286 require.Equal(e2, n2.UTC()) 3287 } 3288 3289 func TestTaskLifecycleConfig_Validate(t *testing.T) { 3290 testCases := []struct { 3291 name string 3292 tlc *TaskLifecycleConfig 3293 err error 3294 }{ 3295 { 3296 name: "prestart completed", 3297 tlc: &TaskLifecycleConfig{ 3298 Hook: "prestart", 3299 Sidecar: false, 3300 }, 3301 err: nil, 3302 }, 3303 { 3304 name: "prestart running", 3305 tlc: &TaskLifecycleConfig{ 3306 Hook: "prestart", 3307 Sidecar: true, 3308 }, 3309 err: nil, 3310 }, 3311 { 3312 name: "no hook", 3313 tlc: &TaskLifecycleConfig{ 3314 Sidecar: true, 3315 }, 3316 err: fmt.Errorf("no lifecycle hook provided"), 3317 }, 3318 } 3319 3320 for _, tc := range testCases { 3321 t.Run(tc.name, func(t *testing.T) { 3322 err := tc.tlc.Validate() 3323 if tc.err != nil { 3324 require.Error(t, err) 3325 require.Contains(t, err.Error(), tc.err.Error()) 3326 } else { 3327 require.Nil(t, err) 3328 } 3329 }) 3330 3331 } 3332 } 3333 3334 func TestRestartPolicy_Validate(t *testing.T) { 3335 // Policy with acceptable restart options passes 3336 p := &RestartPolicy{ 3337 Mode: RestartPolicyModeFail, 3338 Attempts: 0, 3339 Interval: 5 * time.Second, 3340 } 3341 if err := p.Validate(); err != nil { 3342 t.Fatalf("err: %v", err) 3343 } 3344 3345 // Policy with ambiguous restart options fails 3346 p = &RestartPolicy{ 3347 Mode: RestartPolicyModeDelay, 3348 Attempts: 0, 3349 Interval: 5 * time.Second, 3350 } 3351 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 3352 t.Fatalf("expect ambiguity error, got: %v", err) 3353 } 3354 3355 // Bad policy mode fails 3356 p = &RestartPolicy{ 3357 Mode: "nope", 3358 Attempts: 1, 3359 Interval: 5 * time.Second, 3360 } 3361 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 3362 t.Fatalf("expect mode error, got: %v", err) 3363 } 3364 3365 // Fails when attempts*delay does not fit inside interval 3366 p = &RestartPolicy{ 3367 Mode: RestartPolicyModeDelay, 3368 Attempts: 3, 3369 Delay: 5 * time.Second, 3370 Interval: 5 * time.Second, 3371 } 3372 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 3373 t.Fatalf("expect restart interval error, got: %v", err) 3374 } 3375 3376 // Fails when interval is to small 3377 p = &RestartPolicy{ 3378 Mode: RestartPolicyModeDelay, 3379 Attempts: 3, 3380 Delay: 5 * time.Second, 3381 Interval: 2 * time.Second, 3382 } 3383 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 3384 t.Fatalf("expect interval too small error, got: %v", err) 3385 } 3386 } 3387 3388 func TestReschedulePolicy_Validate(t *testing.T) { 3389 type testCase struct { 3390 desc string 3391 ReschedulePolicy *ReschedulePolicy 3392 errors []error 3393 } 3394 3395 testCases := []testCase{ 3396 { 3397 desc: "Nil", 3398 }, 3399 { 3400 desc: "Disabled", 3401 ReschedulePolicy: &ReschedulePolicy{ 3402 Attempts: 0, 3403 Interval: 0 * time.Second}, 3404 }, 3405 { 3406 desc: "Disabled", 3407 ReschedulePolicy: &ReschedulePolicy{ 3408 Attempts: -1, 3409 Interval: 5 * time.Minute}, 3410 }, 3411 { 3412 desc: "Valid Linear Delay", 3413 ReschedulePolicy: &ReschedulePolicy{ 3414 Attempts: 1, 3415 Interval: 5 * time.Minute, 3416 Delay: 10 * time.Second, 3417 DelayFunction: "constant"}, 3418 }, 3419 { 3420 desc: "Valid Exponential Delay", 3421 ReschedulePolicy: &ReschedulePolicy{ 3422 Attempts: 5, 3423 Interval: 1 * time.Hour, 3424 Delay: 30 * time.Second, 3425 MaxDelay: 5 * time.Minute, 3426 DelayFunction: "exponential"}, 3427 }, 3428 { 3429 desc: "Valid Fibonacci Delay", 3430 ReschedulePolicy: &ReschedulePolicy{ 3431 Attempts: 5, 3432 Interval: 15 * time.Minute, 3433 Delay: 10 * time.Second, 3434 MaxDelay: 5 * time.Minute, 3435 DelayFunction: "fibonacci"}, 3436 }, 3437 { 3438 desc: "Invalid delay function", 3439 ReschedulePolicy: &ReschedulePolicy{ 3440 Attempts: 1, 3441 Interval: 1 * time.Second, 3442 DelayFunction: "blah"}, 3443 errors: []error{ 3444 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3445 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3446 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 3447 }, 3448 }, 3449 { 3450 desc: "Invalid delay ceiling", 3451 ReschedulePolicy: &ReschedulePolicy{ 3452 Attempts: 1, 3453 Interval: 8 * time.Second, 3454 DelayFunction: "exponential", 3455 Delay: 15 * time.Second, 3456 MaxDelay: 5 * time.Second}, 3457 errors: []error{ 3458 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 3459 15*time.Second, 5*time.Second), 3460 }, 3461 }, 3462 { 3463 desc: "Invalid delay and interval", 3464 ReschedulePolicy: &ReschedulePolicy{ 3465 Attempts: 1, 3466 Interval: 1 * time.Second, 3467 DelayFunction: "constant"}, 3468 errors: []error{ 3469 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 3470 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3471 }, 3472 }, { 3473 // Should suggest 2h40m as the interval 3474 desc: "Invalid Attempts - linear delay", 3475 ReschedulePolicy: &ReschedulePolicy{ 3476 Attempts: 10, 3477 Interval: 1 * time.Hour, 3478 Delay: 20 * time.Minute, 3479 DelayFunction: "constant", 3480 }, 3481 errors: []error{ 3482 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 3483 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 3484 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3485 200*time.Minute, 10), 3486 }, 3487 }, 3488 { 3489 // Should suggest 4h40m as the interval 3490 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 3491 desc: "Invalid Attempts - exponential delay", 3492 ReschedulePolicy: &ReschedulePolicy{ 3493 Attempts: 10, 3494 Interval: 30 * time.Minute, 3495 Delay: 5 * time.Minute, 3496 MaxDelay: 40 * time.Minute, 3497 DelayFunction: "exponential", 3498 }, 3499 errors: []error{ 3500 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3501 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 3502 "exponential", 40*time.Minute), 3503 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3504 280*time.Minute, 10), 3505 }, 3506 }, 3507 { 3508 // Should suggest 8h as the interval 3509 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 3510 desc: "Invalid Attempts - fibonacci delay", 3511 ReschedulePolicy: &ReschedulePolicy{ 3512 Attempts: 10, 3513 Interval: 1 * time.Hour, 3514 Delay: 20 * time.Minute, 3515 MaxDelay: 80 * time.Minute, 3516 DelayFunction: "fibonacci", 3517 }, 3518 errors: []error{ 3519 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 3520 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 3521 "fibonacci", 80*time.Minute), 3522 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 3523 480*time.Minute, 10), 3524 }, 3525 }, 3526 { 3527 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 3528 ReschedulePolicy: &ReschedulePolicy{ 3529 Attempts: 1, 3530 Unlimited: true, 3531 DelayFunction: "exponential", 3532 Delay: 5 * time.Minute, 3533 MaxDelay: 1 * time.Hour, 3534 }, 3535 errors: []error{ 3536 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 3537 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 3538 }, 3539 }, 3540 { 3541 desc: "Invalid Unlimited config", 3542 ReschedulePolicy: &ReschedulePolicy{ 3543 Attempts: 1, 3544 Interval: 1 * time.Second, 3545 Unlimited: true, 3546 DelayFunction: "exponential", 3547 }, 3548 errors: []error{ 3549 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3550 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 3551 }, 3552 }, 3553 { 3554 desc: "Valid Unlimited config", 3555 ReschedulePolicy: &ReschedulePolicy{ 3556 Unlimited: true, 3557 DelayFunction: "exponential", 3558 Delay: 5 * time.Second, 3559 MaxDelay: 1 * time.Hour, 3560 }, 3561 }, 3562 } 3563 3564 for _, tc := range testCases { 3565 t.Run(tc.desc, func(t *testing.T) { 3566 require := require.New(t) 3567 gotErr := tc.ReschedulePolicy.Validate() 3568 if tc.errors != nil { 3569 // Validate all errors 3570 for _, err := range tc.errors { 3571 require.Contains(gotErr.Error(), err.Error()) 3572 } 3573 } else { 3574 require.Nil(gotErr) 3575 } 3576 }) 3577 } 3578 } 3579 3580 func TestAllocation_Index(t *testing.T) { 3581 a1 := Allocation{ 3582 Name: "example.cache[1]", 3583 TaskGroup: "cache", 3584 JobID: "example", 3585 Job: &Job{ 3586 ID: "example", 3587 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 3588 } 3589 e1 := uint(1) 3590 a2 := a1.Copy() 3591 a2.Name = "example.cache[713127]" 3592 e2 := uint(713127) 3593 3594 if a1.Index() != e1 || a2.Index() != e2 { 3595 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 3596 } 3597 } 3598 3599 func TestTaskArtifact_Validate_Source(t *testing.T) { 3600 valid := &TaskArtifact{GetterSource: "google.com"} 3601 if err := valid.Validate(); err != nil { 3602 t.Fatalf("unexpected error: %v", err) 3603 } 3604 } 3605 3606 func TestTaskArtifact_Validate_Dest(t *testing.T) { 3607 valid := &TaskArtifact{GetterSource: "google.com"} 3608 if err := valid.Validate(); err != nil { 3609 t.Fatalf("unexpected error: %v", err) 3610 } 3611 3612 valid.RelativeDest = "local/" 3613 if err := valid.Validate(); err != nil { 3614 t.Fatalf("unexpected error: %v", err) 3615 } 3616 3617 valid.RelativeDest = "local/.." 3618 if err := valid.Validate(); err != nil { 3619 t.Fatalf("unexpected error: %v", err) 3620 } 3621 3622 valid.RelativeDest = "local/../../.." 3623 if err := valid.Validate(); err == nil { 3624 t.Fatalf("expected error: %v", err) 3625 } 3626 } 3627 3628 // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the 3629 // fields change. 3630 func TestTaskArtifact_Hash(t *testing.T) { 3631 t.Parallel() 3632 3633 cases := []TaskArtifact{ 3634 {}, 3635 { 3636 GetterSource: "a", 3637 }, 3638 { 3639 GetterSource: "b", 3640 }, 3641 { 3642 GetterSource: "b", 3643 GetterOptions: map[string]string{"c": "c"}, 3644 }, 3645 { 3646 GetterSource: "b", 3647 GetterOptions: map[string]string{ 3648 "c": "c", 3649 "d": "d", 3650 }, 3651 }, 3652 { 3653 GetterSource: "b", 3654 GetterOptions: map[string]string{ 3655 "c": "c", 3656 "d": "e", 3657 }, 3658 }, 3659 { 3660 GetterSource: "b", 3661 GetterOptions: map[string]string{ 3662 "c": "c", 3663 "d": "e", 3664 }, 3665 GetterMode: "f", 3666 }, 3667 { 3668 GetterSource: "b", 3669 GetterOptions: map[string]string{ 3670 "c": "c", 3671 "d": "e", 3672 }, 3673 GetterMode: "g", 3674 }, 3675 { 3676 GetterSource: "b", 3677 GetterOptions: map[string]string{ 3678 "c": "c", 3679 "d": "e", 3680 }, 3681 GetterMode: "g", 3682 RelativeDest: "h", 3683 }, 3684 { 3685 GetterSource: "b", 3686 GetterOptions: map[string]string{ 3687 "c": "c", 3688 "d": "e", 3689 }, 3690 GetterMode: "g", 3691 RelativeDest: "i", 3692 }, 3693 } 3694 3695 // Map of hash to source 3696 hashes := make(map[string]TaskArtifact, len(cases)) 3697 for _, tc := range cases { 3698 h := tc.Hash() 3699 3700 // Hash should be deterministic 3701 require.Equal(t, h, tc.Hash()) 3702 3703 // Hash should be unique 3704 if orig, ok := hashes[h]; ok { 3705 require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n", 3706 pretty.Sprint(tc), pretty.Sprint(orig), 3707 ) 3708 } 3709 hashes[h] = tc 3710 } 3711 3712 require.Len(t, hashes, len(cases)) 3713 } 3714 3715 func TestAllocation_ShouldMigrate(t *testing.T) { 3716 alloc := Allocation{ 3717 PreviousAllocation: "123", 3718 TaskGroup: "foo", 3719 Job: &Job{ 3720 TaskGroups: []*TaskGroup{ 3721 { 3722 Name: "foo", 3723 EphemeralDisk: &EphemeralDisk{ 3724 Migrate: true, 3725 Sticky: true, 3726 }, 3727 }, 3728 }, 3729 }, 3730 } 3731 3732 if !alloc.ShouldMigrate() { 3733 t.Fatalf("bad: %v", alloc) 3734 } 3735 3736 alloc1 := Allocation{ 3737 PreviousAllocation: "123", 3738 TaskGroup: "foo", 3739 Job: &Job{ 3740 TaskGroups: []*TaskGroup{ 3741 { 3742 Name: "foo", 3743 EphemeralDisk: &EphemeralDisk{}, 3744 }, 3745 }, 3746 }, 3747 } 3748 3749 if alloc1.ShouldMigrate() { 3750 t.Fatalf("bad: %v", alloc) 3751 } 3752 3753 alloc2 := Allocation{ 3754 PreviousAllocation: "123", 3755 TaskGroup: "foo", 3756 Job: &Job{ 3757 TaskGroups: []*TaskGroup{ 3758 { 3759 Name: "foo", 3760 EphemeralDisk: &EphemeralDisk{ 3761 Sticky: false, 3762 Migrate: true, 3763 }, 3764 }, 3765 }, 3766 }, 3767 } 3768 3769 if alloc2.ShouldMigrate() { 3770 t.Fatalf("bad: %v", alloc) 3771 } 3772 3773 alloc3 := Allocation{ 3774 PreviousAllocation: "123", 3775 TaskGroup: "foo", 3776 Job: &Job{ 3777 TaskGroups: []*TaskGroup{ 3778 { 3779 Name: "foo", 3780 }, 3781 }, 3782 }, 3783 } 3784 3785 if alloc3.ShouldMigrate() { 3786 t.Fatalf("bad: %v", alloc) 3787 } 3788 3789 // No previous 3790 alloc4 := Allocation{ 3791 TaskGroup: "foo", 3792 Job: &Job{ 3793 TaskGroups: []*TaskGroup{ 3794 { 3795 Name: "foo", 3796 EphemeralDisk: &EphemeralDisk{ 3797 Migrate: true, 3798 Sticky: true, 3799 }, 3800 }, 3801 }, 3802 }, 3803 } 3804 3805 if alloc4.ShouldMigrate() { 3806 t.Fatalf("bad: %v", alloc4) 3807 } 3808 } 3809 3810 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 3811 cases := []struct { 3812 Input *TaskArtifact 3813 Err bool 3814 }{ 3815 { 3816 &TaskArtifact{ 3817 GetterSource: "foo.com", 3818 GetterOptions: map[string]string{ 3819 "checksum": "no-type", 3820 }, 3821 }, 3822 true, 3823 }, 3824 { 3825 &TaskArtifact{ 3826 GetterSource: "foo.com", 3827 GetterOptions: map[string]string{ 3828 "checksum": "md5:toosmall", 3829 }, 3830 }, 3831 true, 3832 }, 3833 { 3834 &TaskArtifact{ 3835 GetterSource: "foo.com", 3836 GetterOptions: map[string]string{ 3837 "checksum": "invalid:type", 3838 }, 3839 }, 3840 true, 3841 }, 3842 { 3843 &TaskArtifact{ 3844 GetterSource: "foo.com", 3845 GetterOptions: map[string]string{ 3846 "checksum": "md5:${ARTIFACT_CHECKSUM}", 3847 }, 3848 }, 3849 false, 3850 }, 3851 } 3852 3853 for i, tc := range cases { 3854 err := tc.Input.Validate() 3855 if (err != nil) != tc.Err { 3856 t.Fatalf("case %d: %v", i, err) 3857 continue 3858 } 3859 } 3860 } 3861 3862 func TestPlan_NormalizeAllocations(t *testing.T) { 3863 t.Parallel() 3864 plan := &Plan{ 3865 NodeUpdate: make(map[string][]*Allocation), 3866 NodePreemptions: make(map[string][]*Allocation), 3867 } 3868 stoppedAlloc := MockAlloc() 3869 desiredDesc := "Desired desc" 3870 plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost, "followup-eval-id") 3871 preemptedAlloc := MockAlloc() 3872 preemptingAllocID := uuid.Generate() 3873 plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID) 3874 3875 plan.NormalizeAllocations() 3876 3877 actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0] 3878 expectedStoppedAlloc := &Allocation{ 3879 ID: stoppedAlloc.ID, 3880 DesiredDescription: desiredDesc, 3881 ClientStatus: AllocClientStatusLost, 3882 FollowupEvalID: "followup-eval-id", 3883 } 3884 assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc) 3885 actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0] 3886 expectedPreemptedAlloc := &Allocation{ 3887 ID: preemptedAlloc.ID, 3888 PreemptedByAllocation: preemptingAllocID, 3889 } 3890 assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc) 3891 } 3892 3893 func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3894 t.Parallel() 3895 plan := &Plan{ 3896 NodeUpdate: make(map[string][]*Allocation), 3897 } 3898 alloc := MockAlloc() 3899 desiredDesc := "Desired desc" 3900 3901 plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost, "") 3902 3903 expectedAlloc := new(Allocation) 3904 *expectedAlloc = *alloc 3905 expectedAlloc.DesiredDescription = desiredDesc 3906 expectedAlloc.DesiredStatus = AllocDesiredStatusStop 3907 expectedAlloc.ClientStatus = AllocClientStatusLost 3908 expectedAlloc.Job = nil 3909 expectedAlloc.AllocStates = []*AllocState{{ 3910 Field: AllocStateFieldClientStatus, 3911 Value: "lost", 3912 }} 3913 3914 // This value is set to time.Now() in AppendStoppedAlloc, so clear it 3915 appendedAlloc := plan.NodeUpdate[alloc.NodeID][0] 3916 appendedAlloc.AllocStates[0].Time = time.Time{} 3917 3918 assert.Equal(t, expectedAlloc, appendedAlloc) 3919 assert.Equal(t, alloc.Job, plan.Job) 3920 } 3921 3922 func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3923 t.Parallel() 3924 plan := &Plan{ 3925 NodePreemptions: make(map[string][]*Allocation), 3926 } 3927 alloc := MockAlloc() 3928 preemptingAllocID := uuid.Generate() 3929 3930 plan.AppendPreemptedAlloc(alloc, preemptingAllocID) 3931 3932 appendedAlloc := plan.NodePreemptions[alloc.NodeID][0] 3933 expectedAlloc := &Allocation{ 3934 ID: alloc.ID, 3935 PreemptedByAllocation: preemptingAllocID, 3936 JobID: alloc.JobID, 3937 Namespace: alloc.Namespace, 3938 DesiredStatus: AllocDesiredStatusEvict, 3939 DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID), 3940 AllocatedResources: alloc.AllocatedResources, 3941 TaskResources: alloc.TaskResources, 3942 SharedResources: alloc.SharedResources, 3943 } 3944 assert.Equal(t, expectedAlloc, appendedAlloc) 3945 } 3946 3947 func TestAllocation_MsgPackTags(t *testing.T) { 3948 t.Parallel() 3949 planType := reflect.TypeOf(Allocation{}) 3950 3951 msgPackTags, _ := planType.FieldByName("_struct") 3952 3953 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3954 } 3955 3956 func TestEvaluation_MsgPackTags(t *testing.T) { 3957 t.Parallel() 3958 planType := reflect.TypeOf(Evaluation{}) 3959 3960 msgPackTags, _ := planType.FieldByName("_struct") 3961 3962 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3963 } 3964 3965 func TestAllocation_Terminated(t *testing.T) { 3966 type desiredState struct { 3967 ClientStatus string 3968 DesiredStatus string 3969 Terminated bool 3970 } 3971 3972 harness := []desiredState{ 3973 { 3974 ClientStatus: AllocClientStatusPending, 3975 DesiredStatus: AllocDesiredStatusStop, 3976 Terminated: false, 3977 }, 3978 { 3979 ClientStatus: AllocClientStatusRunning, 3980 DesiredStatus: AllocDesiredStatusStop, 3981 Terminated: false, 3982 }, 3983 { 3984 ClientStatus: AllocClientStatusFailed, 3985 DesiredStatus: AllocDesiredStatusStop, 3986 Terminated: true, 3987 }, 3988 { 3989 ClientStatus: AllocClientStatusFailed, 3990 DesiredStatus: AllocDesiredStatusRun, 3991 Terminated: true, 3992 }, 3993 } 3994 3995 for _, state := range harness { 3996 alloc := Allocation{} 3997 alloc.DesiredStatus = state.DesiredStatus 3998 alloc.ClientStatus = state.ClientStatus 3999 if alloc.Terminated() != state.Terminated { 4000 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 4001 } 4002 } 4003 } 4004 4005 func TestAllocation_ShouldReschedule(t *testing.T) { 4006 type testCase struct { 4007 Desc string 4008 FailTime time.Time 4009 ClientStatus string 4010 DesiredStatus string 4011 ReschedulePolicy *ReschedulePolicy 4012 RescheduleTrackers []*RescheduleEvent 4013 ShouldReschedule bool 4014 } 4015 4016 fail := time.Now() 4017 4018 harness := []testCase{ 4019 { 4020 Desc: "Reschedule when desired state is stop", 4021 ClientStatus: AllocClientStatusPending, 4022 DesiredStatus: AllocDesiredStatusStop, 4023 FailTime: fail, 4024 ReschedulePolicy: nil, 4025 ShouldReschedule: false, 4026 }, 4027 { 4028 Desc: "Disabled rescheduling", 4029 ClientStatus: AllocClientStatusFailed, 4030 DesiredStatus: AllocDesiredStatusRun, 4031 FailTime: fail, 4032 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 4033 ShouldReschedule: false, 4034 }, 4035 { 4036 Desc: "Reschedule when client status is complete", 4037 ClientStatus: AllocClientStatusComplete, 4038 DesiredStatus: AllocDesiredStatusRun, 4039 FailTime: fail, 4040 ReschedulePolicy: nil, 4041 ShouldReschedule: false, 4042 }, 4043 { 4044 Desc: "Reschedule with nil reschedule policy", 4045 ClientStatus: AllocClientStatusFailed, 4046 DesiredStatus: AllocDesiredStatusRun, 4047 FailTime: fail, 4048 ReschedulePolicy: nil, 4049 ShouldReschedule: false, 4050 }, 4051 { 4052 Desc: "Reschedule with unlimited and attempts >0", 4053 ClientStatus: AllocClientStatusFailed, 4054 DesiredStatus: AllocDesiredStatusRun, 4055 FailTime: fail, 4056 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 4057 ShouldReschedule: true, 4058 }, 4059 { 4060 Desc: "Reschedule when client status is complete", 4061 ClientStatus: AllocClientStatusComplete, 4062 DesiredStatus: AllocDesiredStatusRun, 4063 FailTime: fail, 4064 ReschedulePolicy: nil, 4065 ShouldReschedule: false, 4066 }, 4067 { 4068 Desc: "Reschedule with policy when client status complete", 4069 ClientStatus: AllocClientStatusComplete, 4070 DesiredStatus: AllocDesiredStatusRun, 4071 FailTime: fail, 4072 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 4073 ShouldReschedule: false, 4074 }, 4075 { 4076 Desc: "Reschedule with no previous attempts", 4077 ClientStatus: AllocClientStatusFailed, 4078 DesiredStatus: AllocDesiredStatusRun, 4079 FailTime: fail, 4080 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 4081 ShouldReschedule: true, 4082 }, 4083 { 4084 Desc: "Reschedule with leftover attempts", 4085 ClientStatus: AllocClientStatusFailed, 4086 DesiredStatus: AllocDesiredStatusRun, 4087 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 4088 FailTime: fail, 4089 RescheduleTrackers: []*RescheduleEvent{ 4090 { 4091 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 4092 }, 4093 }, 4094 ShouldReschedule: true, 4095 }, 4096 { 4097 Desc: "Reschedule with too old previous attempts", 4098 ClientStatus: AllocClientStatusFailed, 4099 DesiredStatus: AllocDesiredStatusRun, 4100 FailTime: fail, 4101 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 4102 RescheduleTrackers: []*RescheduleEvent{ 4103 { 4104 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 4105 }, 4106 }, 4107 ShouldReschedule: true, 4108 }, 4109 { 4110 Desc: "Reschedule with no leftover attempts", 4111 ClientStatus: AllocClientStatusFailed, 4112 DesiredStatus: AllocDesiredStatusRun, 4113 FailTime: fail, 4114 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 4115 RescheduleTrackers: []*RescheduleEvent{ 4116 { 4117 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 4118 }, 4119 { 4120 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 4121 }, 4122 }, 4123 ShouldReschedule: false, 4124 }, 4125 } 4126 4127 for _, state := range harness { 4128 alloc := Allocation{} 4129 alloc.DesiredStatus = state.DesiredStatus 4130 alloc.ClientStatus = state.ClientStatus 4131 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 4132 4133 t.Run(state.Desc, func(t *testing.T) { 4134 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 4135 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 4136 } 4137 }) 4138 4139 } 4140 } 4141 4142 func TestAllocation_LastEventTime(t *testing.T) { 4143 type testCase struct { 4144 desc string 4145 taskState map[string]*TaskState 4146 expectedLastEventTime time.Time 4147 } 4148 4149 t1 := time.Now().UTC() 4150 4151 testCases := []testCase{ 4152 { 4153 desc: "nil task state", 4154 expectedLastEventTime: t1, 4155 }, 4156 { 4157 desc: "empty task state", 4158 taskState: make(map[string]*TaskState), 4159 expectedLastEventTime: t1, 4160 }, 4161 { 4162 desc: "Finished At not set", 4163 taskState: map[string]*TaskState{"foo": {State: "start", 4164 StartedAt: t1.Add(-2 * time.Hour)}}, 4165 expectedLastEventTime: t1, 4166 }, 4167 { 4168 desc: "One finished ", 4169 taskState: map[string]*TaskState{"foo": {State: "start", 4170 StartedAt: t1.Add(-2 * time.Hour), 4171 FinishedAt: t1.Add(-1 * time.Hour)}}, 4172 expectedLastEventTime: t1.Add(-1 * time.Hour), 4173 }, 4174 { 4175 desc: "Multiple task groups", 4176 taskState: map[string]*TaskState{"foo": {State: "start", 4177 StartedAt: t1.Add(-2 * time.Hour), 4178 FinishedAt: t1.Add(-1 * time.Hour)}, 4179 "bar": {State: "start", 4180 StartedAt: t1.Add(-2 * time.Hour), 4181 FinishedAt: t1.Add(-40 * time.Minute)}}, 4182 expectedLastEventTime: t1.Add(-40 * time.Minute), 4183 }, 4184 { 4185 desc: "No finishedAt set, one task event, should use modify time", 4186 taskState: map[string]*TaskState{"foo": { 4187 State: "run", 4188 StartedAt: t1.Add(-2 * time.Hour), 4189 Events: []*TaskEvent{ 4190 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 4191 }}, 4192 }, 4193 expectedLastEventTime: t1, 4194 }, 4195 } 4196 for _, tc := range testCases { 4197 t.Run(tc.desc, func(t *testing.T) { 4198 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 4199 alloc.TaskStates = tc.taskState 4200 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 4201 }) 4202 } 4203 } 4204 4205 func TestAllocation_NextDelay(t *testing.T) { 4206 type testCase struct { 4207 desc string 4208 reschedulePolicy *ReschedulePolicy 4209 alloc *Allocation 4210 expectedRescheduleTime time.Time 4211 expectedRescheduleEligible bool 4212 } 4213 now := time.Now() 4214 testCases := []testCase{ 4215 { 4216 desc: "Allocation hasn't failed yet", 4217 reschedulePolicy: &ReschedulePolicy{ 4218 DelayFunction: "constant", 4219 Delay: 5 * time.Second, 4220 }, 4221 alloc: &Allocation{}, 4222 expectedRescheduleTime: time.Time{}, 4223 expectedRescheduleEligible: false, 4224 }, 4225 { 4226 desc: "Allocation has no reschedule policy", 4227 alloc: &Allocation{}, 4228 expectedRescheduleTime: time.Time{}, 4229 expectedRescheduleEligible: false, 4230 }, 4231 { 4232 desc: "Allocation lacks task state", 4233 reschedulePolicy: &ReschedulePolicy{ 4234 DelayFunction: "constant", 4235 Delay: 5 * time.Second, 4236 Unlimited: true, 4237 }, 4238 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 4239 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 4240 expectedRescheduleEligible: true, 4241 }, 4242 { 4243 desc: "linear delay, unlimited restarts, no reschedule tracker", 4244 reschedulePolicy: &ReschedulePolicy{ 4245 DelayFunction: "constant", 4246 Delay: 5 * time.Second, 4247 Unlimited: true, 4248 }, 4249 alloc: &Allocation{ 4250 ClientStatus: AllocClientStatusFailed, 4251 TaskStates: map[string]*TaskState{"foo": {State: "dead", 4252 StartedAt: now.Add(-1 * time.Hour), 4253 FinishedAt: now.Add(-2 * time.Second)}}, 4254 }, 4255 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4256 expectedRescheduleEligible: true, 4257 }, 4258 { 4259 desc: "linear delay with reschedule tracker", 4260 reschedulePolicy: &ReschedulePolicy{ 4261 DelayFunction: "constant", 4262 Delay: 5 * time.Second, 4263 Interval: 10 * time.Minute, 4264 Attempts: 2, 4265 }, 4266 alloc: &Allocation{ 4267 ClientStatus: AllocClientStatusFailed, 4268 TaskStates: map[string]*TaskState{"foo": {State: "start", 4269 StartedAt: now.Add(-1 * time.Hour), 4270 FinishedAt: now.Add(-2 * time.Second)}}, 4271 RescheduleTracker: &RescheduleTracker{ 4272 Events: []*RescheduleEvent{{ 4273 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 4274 Delay: 5 * time.Second, 4275 }}, 4276 }}, 4277 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4278 expectedRescheduleEligible: true, 4279 }, 4280 { 4281 desc: "linear delay with reschedule tracker, attempts exhausted", 4282 reschedulePolicy: &ReschedulePolicy{ 4283 DelayFunction: "constant", 4284 Delay: 5 * time.Second, 4285 Interval: 10 * time.Minute, 4286 Attempts: 2, 4287 }, 4288 alloc: &Allocation{ 4289 ClientStatus: AllocClientStatusFailed, 4290 TaskStates: map[string]*TaskState{"foo": {State: "start", 4291 StartedAt: now.Add(-1 * time.Hour), 4292 FinishedAt: now.Add(-2 * time.Second)}}, 4293 RescheduleTracker: &RescheduleTracker{ 4294 Events: []*RescheduleEvent{ 4295 { 4296 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 4297 Delay: 5 * time.Second, 4298 }, 4299 { 4300 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 4301 Delay: 5 * time.Second, 4302 }, 4303 }, 4304 }}, 4305 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4306 expectedRescheduleEligible: false, 4307 }, 4308 { 4309 desc: "exponential delay - no reschedule tracker", 4310 reschedulePolicy: &ReschedulePolicy{ 4311 DelayFunction: "exponential", 4312 Delay: 5 * time.Second, 4313 MaxDelay: 90 * time.Second, 4314 Unlimited: true, 4315 }, 4316 alloc: &Allocation{ 4317 ClientStatus: AllocClientStatusFailed, 4318 TaskStates: map[string]*TaskState{"foo": {State: "start", 4319 StartedAt: now.Add(-1 * time.Hour), 4320 FinishedAt: now.Add(-2 * time.Second)}}, 4321 }, 4322 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4323 expectedRescheduleEligible: true, 4324 }, 4325 { 4326 desc: "exponential delay with reschedule tracker", 4327 reschedulePolicy: &ReschedulePolicy{ 4328 DelayFunction: "exponential", 4329 Delay: 5 * time.Second, 4330 MaxDelay: 90 * time.Second, 4331 Unlimited: true, 4332 }, 4333 alloc: &Allocation{ 4334 ClientStatus: AllocClientStatusFailed, 4335 TaskStates: map[string]*TaskState{"foo": {State: "start", 4336 StartedAt: now.Add(-1 * time.Hour), 4337 FinishedAt: now.Add(-2 * time.Second)}}, 4338 RescheduleTracker: &RescheduleTracker{ 4339 Events: []*RescheduleEvent{ 4340 { 4341 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4342 Delay: 5 * time.Second, 4343 }, 4344 { 4345 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4346 Delay: 10 * time.Second, 4347 }, 4348 { 4349 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4350 Delay: 20 * time.Second, 4351 }, 4352 }, 4353 }}, 4354 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4355 expectedRescheduleEligible: true, 4356 }, 4357 { 4358 desc: "exponential delay with delay ceiling reached", 4359 reschedulePolicy: &ReschedulePolicy{ 4360 DelayFunction: "exponential", 4361 Delay: 5 * time.Second, 4362 MaxDelay: 90 * time.Second, 4363 Unlimited: true, 4364 }, 4365 alloc: &Allocation{ 4366 ClientStatus: AllocClientStatusFailed, 4367 TaskStates: map[string]*TaskState{"foo": {State: "start", 4368 StartedAt: now.Add(-1 * time.Hour), 4369 FinishedAt: now.Add(-15 * time.Second)}}, 4370 RescheduleTracker: &RescheduleTracker{ 4371 Events: []*RescheduleEvent{ 4372 { 4373 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4374 Delay: 5 * time.Second, 4375 }, 4376 { 4377 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4378 Delay: 10 * time.Second, 4379 }, 4380 { 4381 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4382 Delay: 20 * time.Second, 4383 }, 4384 { 4385 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4386 Delay: 40 * time.Second, 4387 }, 4388 { 4389 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4390 Delay: 80 * time.Second, 4391 }, 4392 }, 4393 }}, 4394 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 4395 expectedRescheduleEligible: true, 4396 }, 4397 { 4398 // Test case where most recent reschedule ran longer than delay ceiling 4399 desc: "exponential delay, delay ceiling reset condition met", 4400 reschedulePolicy: &ReschedulePolicy{ 4401 DelayFunction: "exponential", 4402 Delay: 5 * time.Second, 4403 MaxDelay: 90 * time.Second, 4404 Unlimited: true, 4405 }, 4406 alloc: &Allocation{ 4407 ClientStatus: AllocClientStatusFailed, 4408 TaskStates: map[string]*TaskState{"foo": {State: "start", 4409 StartedAt: now.Add(-1 * time.Hour), 4410 FinishedAt: now.Add(-15 * time.Minute)}}, 4411 RescheduleTracker: &RescheduleTracker{ 4412 Events: []*RescheduleEvent{ 4413 { 4414 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4415 Delay: 5 * time.Second, 4416 }, 4417 { 4418 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4419 Delay: 10 * time.Second, 4420 }, 4421 { 4422 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4423 Delay: 20 * time.Second, 4424 }, 4425 { 4426 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4427 Delay: 40 * time.Second, 4428 }, 4429 { 4430 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4431 Delay: 80 * time.Second, 4432 }, 4433 { 4434 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4435 Delay: 90 * time.Second, 4436 }, 4437 { 4438 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4439 Delay: 90 * time.Second, 4440 }, 4441 }, 4442 }}, 4443 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 4444 expectedRescheduleEligible: true, 4445 }, 4446 { 4447 desc: "fibonacci delay - no reschedule tracker", 4448 reschedulePolicy: &ReschedulePolicy{ 4449 DelayFunction: "fibonacci", 4450 Delay: 5 * time.Second, 4451 MaxDelay: 90 * time.Second, 4452 Unlimited: true, 4453 }, 4454 alloc: &Allocation{ 4455 ClientStatus: AllocClientStatusFailed, 4456 TaskStates: map[string]*TaskState{"foo": {State: "start", 4457 StartedAt: now.Add(-1 * time.Hour), 4458 FinishedAt: now.Add(-2 * time.Second)}}}, 4459 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 4460 expectedRescheduleEligible: true, 4461 }, 4462 { 4463 desc: "fibonacci delay with reschedule tracker", 4464 reschedulePolicy: &ReschedulePolicy{ 4465 DelayFunction: "fibonacci", 4466 Delay: 5 * time.Second, 4467 MaxDelay: 90 * time.Second, 4468 Unlimited: true, 4469 }, 4470 alloc: &Allocation{ 4471 ClientStatus: AllocClientStatusFailed, 4472 TaskStates: map[string]*TaskState{"foo": {State: "start", 4473 StartedAt: now.Add(-1 * time.Hour), 4474 FinishedAt: now.Add(-2 * time.Second)}}, 4475 RescheduleTracker: &RescheduleTracker{ 4476 Events: []*RescheduleEvent{ 4477 { 4478 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4479 Delay: 5 * time.Second, 4480 }, 4481 { 4482 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 4483 Delay: 5 * time.Second, 4484 }, 4485 }, 4486 }}, 4487 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 4488 expectedRescheduleEligible: true, 4489 }, 4490 { 4491 desc: "fibonacci delay with more events", 4492 reschedulePolicy: &ReschedulePolicy{ 4493 DelayFunction: "fibonacci", 4494 Delay: 5 * time.Second, 4495 MaxDelay: 90 * time.Second, 4496 Unlimited: true, 4497 }, 4498 alloc: &Allocation{ 4499 ClientStatus: AllocClientStatusFailed, 4500 TaskStates: map[string]*TaskState{"foo": {State: "start", 4501 StartedAt: now.Add(-1 * time.Hour), 4502 FinishedAt: now.Add(-2 * time.Second)}}, 4503 RescheduleTracker: &RescheduleTracker{ 4504 Events: []*RescheduleEvent{ 4505 { 4506 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4507 Delay: 5 * time.Second, 4508 }, 4509 { 4510 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4511 Delay: 5 * time.Second, 4512 }, 4513 { 4514 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4515 Delay: 10 * time.Second, 4516 }, 4517 { 4518 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4519 Delay: 15 * time.Second, 4520 }, 4521 { 4522 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4523 Delay: 25 * time.Second, 4524 }, 4525 }, 4526 }}, 4527 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 4528 expectedRescheduleEligible: true, 4529 }, 4530 { 4531 desc: "fibonacci delay with delay ceiling reached", 4532 reschedulePolicy: &ReschedulePolicy{ 4533 DelayFunction: "fibonacci", 4534 Delay: 5 * time.Second, 4535 MaxDelay: 50 * time.Second, 4536 Unlimited: true, 4537 }, 4538 alloc: &Allocation{ 4539 ClientStatus: AllocClientStatusFailed, 4540 TaskStates: map[string]*TaskState{"foo": {State: "start", 4541 StartedAt: now.Add(-1 * time.Hour), 4542 FinishedAt: now.Add(-15 * time.Second)}}, 4543 RescheduleTracker: &RescheduleTracker{ 4544 Events: []*RescheduleEvent{ 4545 { 4546 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4547 Delay: 5 * time.Second, 4548 }, 4549 { 4550 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4551 Delay: 5 * time.Second, 4552 }, 4553 { 4554 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4555 Delay: 10 * time.Second, 4556 }, 4557 { 4558 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4559 Delay: 15 * time.Second, 4560 }, 4561 { 4562 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4563 Delay: 25 * time.Second, 4564 }, 4565 { 4566 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 4567 Delay: 40 * time.Second, 4568 }, 4569 }, 4570 }}, 4571 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 4572 expectedRescheduleEligible: true, 4573 }, 4574 { 4575 desc: "fibonacci delay with delay reset condition met", 4576 reschedulePolicy: &ReschedulePolicy{ 4577 DelayFunction: "fibonacci", 4578 Delay: 5 * time.Second, 4579 MaxDelay: 50 * time.Second, 4580 Unlimited: true, 4581 }, 4582 alloc: &Allocation{ 4583 ClientStatus: AllocClientStatusFailed, 4584 TaskStates: map[string]*TaskState{"foo": {State: "start", 4585 StartedAt: now.Add(-1 * time.Hour), 4586 FinishedAt: now.Add(-5 * time.Minute)}}, 4587 RescheduleTracker: &RescheduleTracker{ 4588 Events: []*RescheduleEvent{ 4589 { 4590 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4591 Delay: 5 * time.Second, 4592 }, 4593 { 4594 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4595 Delay: 5 * time.Second, 4596 }, 4597 { 4598 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4599 Delay: 10 * time.Second, 4600 }, 4601 { 4602 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4603 Delay: 15 * time.Second, 4604 }, 4605 { 4606 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4607 Delay: 25 * time.Second, 4608 }, 4609 { 4610 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4611 Delay: 40 * time.Second, 4612 }, 4613 }, 4614 }}, 4615 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 4616 expectedRescheduleEligible: true, 4617 }, 4618 { 4619 desc: "fibonacci delay with the most recent event that reset delay value", 4620 reschedulePolicy: &ReschedulePolicy{ 4621 DelayFunction: "fibonacci", 4622 Delay: 5 * time.Second, 4623 MaxDelay: 50 * time.Second, 4624 Unlimited: true, 4625 }, 4626 alloc: &Allocation{ 4627 ClientStatus: AllocClientStatusFailed, 4628 TaskStates: map[string]*TaskState{"foo": {State: "start", 4629 StartedAt: now.Add(-1 * time.Hour), 4630 FinishedAt: now.Add(-5 * time.Second)}}, 4631 RescheduleTracker: &RescheduleTracker{ 4632 Events: []*RescheduleEvent{ 4633 { 4634 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4635 Delay: 5 * time.Second, 4636 }, 4637 { 4638 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4639 Delay: 5 * time.Second, 4640 }, 4641 { 4642 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4643 Delay: 10 * time.Second, 4644 }, 4645 { 4646 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4647 Delay: 15 * time.Second, 4648 }, 4649 { 4650 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4651 Delay: 25 * time.Second, 4652 }, 4653 { 4654 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4655 Delay: 40 * time.Second, 4656 }, 4657 { 4658 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4659 Delay: 50 * time.Second, 4660 }, 4661 { 4662 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 4663 Delay: 5 * time.Second, 4664 }, 4665 }, 4666 }}, 4667 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 4668 expectedRescheduleEligible: true, 4669 }, 4670 } 4671 for _, tc := range testCases { 4672 t.Run(tc.desc, func(t *testing.T) { 4673 require := require.New(t) 4674 j := testJob() 4675 if tc.reschedulePolicy != nil { 4676 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 4677 } 4678 tc.alloc.Job = j 4679 tc.alloc.TaskGroup = j.TaskGroups[0].Name 4680 reschedTime, allowed := tc.alloc.NextRescheduleTime() 4681 require.Equal(tc.expectedRescheduleEligible, allowed) 4682 require.Equal(tc.expectedRescheduleTime, reschedTime) 4683 }) 4684 } 4685 4686 } 4687 4688 func TestAllocation_WaitClientStop(t *testing.T) { 4689 type testCase struct { 4690 desc string 4691 stop time.Duration 4692 status string 4693 expectedShould bool 4694 expectedRescheduleTime time.Time 4695 } 4696 now := time.Now().UTC() 4697 testCases := []testCase{ 4698 { 4699 desc: "running", 4700 stop: 2 * time.Second, 4701 status: AllocClientStatusRunning, 4702 expectedShould: true, 4703 }, 4704 { 4705 desc: "no stop_after_client_disconnect", 4706 status: AllocClientStatusLost, 4707 expectedShould: false, 4708 }, 4709 { 4710 desc: "stop", 4711 status: AllocClientStatusLost, 4712 stop: 2 * time.Second, 4713 expectedShould: true, 4714 expectedRescheduleTime: now.Add((2 + 5) * time.Second), 4715 }, 4716 } 4717 for _, tc := range testCases { 4718 t.Run(tc.desc, func(t *testing.T) { 4719 j := testJob() 4720 a := &Allocation{ 4721 ClientStatus: tc.status, 4722 Job: j, 4723 TaskStates: map[string]*TaskState{}, 4724 } 4725 4726 if tc.status == AllocClientStatusLost { 4727 a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost) 4728 } 4729 4730 j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop 4731 a.TaskGroup = j.TaskGroups[0].Name 4732 4733 require.Equal(t, tc.expectedShould, a.ShouldClientStop()) 4734 4735 if !tc.expectedShould || tc.status != AllocClientStatusLost { 4736 return 4737 } 4738 4739 // the reschedTime is close to the expectedRescheduleTime 4740 reschedTime := a.WaitClientStop() 4741 e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix() 4742 require.Less(t, e, int64(2)) 4743 }) 4744 } 4745 } 4746 4747 func TestAllocation_Canonicalize_Old(t *testing.T) { 4748 alloc := MockAlloc() 4749 alloc.AllocatedResources = nil 4750 alloc.TaskResources = map[string]*Resources{ 4751 "web": { 4752 CPU: 500, 4753 MemoryMB: 256, 4754 Networks: []*NetworkResource{ 4755 { 4756 Device: "eth0", 4757 IP: "192.168.0.100", 4758 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4759 MBits: 50, 4760 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4761 }, 4762 }, 4763 }, 4764 } 4765 alloc.SharedResources = &Resources{ 4766 DiskMB: 150, 4767 } 4768 alloc.Canonicalize() 4769 4770 expected := &AllocatedResources{ 4771 Tasks: map[string]*AllocatedTaskResources{ 4772 "web": { 4773 Cpu: AllocatedCpuResources{ 4774 CpuShares: 500, 4775 }, 4776 Memory: AllocatedMemoryResources{ 4777 MemoryMB: 256, 4778 }, 4779 Networks: []*NetworkResource{ 4780 { 4781 Device: "eth0", 4782 IP: "192.168.0.100", 4783 ReservedPorts: []Port{{Label: "admin", Value: 5000}}, 4784 MBits: 50, 4785 DynamicPorts: []Port{{Label: "http", Value: 9876}}, 4786 }, 4787 }, 4788 }, 4789 }, 4790 Shared: AllocatedSharedResources{ 4791 DiskMB: 150, 4792 }, 4793 } 4794 4795 require.Equal(t, expected, alloc.AllocatedResources) 4796 } 4797 4798 // TestAllocation_Canonicalize_New asserts that an alloc with latest 4799 // schema isn't modified with Canonicalize 4800 func TestAllocation_Canonicalize_New(t *testing.T) { 4801 alloc := MockAlloc() 4802 copy := alloc.Copy() 4803 4804 alloc.Canonicalize() 4805 require.Equal(t, copy, alloc) 4806 } 4807 4808 func TestRescheduleTracker_Copy(t *testing.T) { 4809 type testCase struct { 4810 original *RescheduleTracker 4811 expected *RescheduleTracker 4812 } 4813 4814 cases := []testCase{ 4815 {nil, nil}, 4816 {&RescheduleTracker{Events: []*RescheduleEvent{ 4817 {RescheduleTime: 2, 4818 PrevAllocID: "12", 4819 PrevNodeID: "12", 4820 Delay: 30 * time.Second}, 4821 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 4822 {RescheduleTime: 2, 4823 PrevAllocID: "12", 4824 PrevNodeID: "12", 4825 Delay: 30 * time.Second}, 4826 }}}, 4827 } 4828 4829 for _, tc := range cases { 4830 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 4831 t.Fatalf("expected %v but got %v", *tc.expected, *got) 4832 } 4833 } 4834 } 4835 4836 func TestVault_Validate(t *testing.T) { 4837 v := &Vault{ 4838 Env: true, 4839 ChangeMode: VaultChangeModeNoop, 4840 } 4841 4842 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 4843 t.Fatalf("Expected policy list empty error") 4844 } 4845 4846 v.Policies = []string{"foo", "root"} 4847 v.ChangeMode = VaultChangeModeSignal 4848 4849 err := v.Validate() 4850 if err == nil { 4851 t.Fatalf("Expected validation errors") 4852 } 4853 4854 if !strings.Contains(err.Error(), "Signal must") { 4855 t.Fatalf("Expected signal empty error") 4856 } 4857 if !strings.Contains(err.Error(), "root") { 4858 t.Fatalf("Expected root error") 4859 } 4860 } 4861 4862 func TestParameterizedJobConfig_Validate(t *testing.T) { 4863 d := &ParameterizedJobConfig{ 4864 Payload: "foo", 4865 } 4866 4867 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 4868 t.Fatalf("Expected unknown payload requirement: %v", err) 4869 } 4870 4871 d.Payload = DispatchPayloadOptional 4872 d.MetaOptional = []string{"foo", "bar"} 4873 d.MetaRequired = []string{"bar", "baz"} 4874 4875 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 4876 t.Fatalf("Expected meta not being disjoint error: %v", err) 4877 } 4878 } 4879 4880 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 4881 job := testJob() 4882 job.ParameterizedJob = &ParameterizedJobConfig{ 4883 Payload: DispatchPayloadOptional, 4884 } 4885 job.Type = JobTypeSystem 4886 4887 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 4888 t.Fatalf("Expected bad scheduler tpye: %v", err) 4889 } 4890 } 4891 4892 func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) { 4893 // Setup a system Job with stop_after_client_disconnect set, which is invalid 4894 job := testJob() 4895 job.Type = JobTypeSystem 4896 stop := 1 * time.Minute 4897 job.TaskGroups[0].StopAfterClientDisconnect = &stop 4898 4899 err := job.Validate() 4900 require.Error(t, err) 4901 require.Contains(t, err.Error(), "stop_after_client_disconnect can only be set in batch and service jobs") 4902 4903 // Modify the job to a batch job with an invalid stop_after_client_disconnect value 4904 job.Type = JobTypeBatch 4905 invalid := -1 * time.Minute 4906 job.TaskGroups[0].StopAfterClientDisconnect = &invalid 4907 4908 err = job.Validate() 4909 require.Error(t, err) 4910 require.Contains(t, err.Error(), "stop_after_client_disconnect must be a positive value") 4911 4912 // Modify the job to a batch job with a valid stop_after_client_disconnect value 4913 job.Type = JobTypeBatch 4914 job.TaskGroups[0].StopAfterClientDisconnect = &stop 4915 err = job.Validate() 4916 require.NoError(t, err) 4917 } 4918 4919 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 4920 d := &ParameterizedJobConfig{} 4921 d.Canonicalize() 4922 if d.Payload != DispatchPayloadOptional { 4923 t.Fatalf("Canonicalize failed") 4924 } 4925 } 4926 4927 func TestDispatchPayloadConfig_Validate(t *testing.T) { 4928 d := &DispatchPayloadConfig{ 4929 File: "foo", 4930 } 4931 4932 // task/local/haha 4933 if err := d.Validate(); err != nil { 4934 t.Fatalf("bad: %v", err) 4935 } 4936 4937 // task/haha 4938 d.File = "../haha" 4939 if err := d.Validate(); err != nil { 4940 t.Fatalf("bad: %v", err) 4941 } 4942 4943 // ../haha 4944 d.File = "../../../haha" 4945 if err := d.Validate(); err == nil { 4946 t.Fatalf("bad: %v", err) 4947 } 4948 } 4949 4950 func TestScalingPolicy_Canonicalize(t *testing.T) { 4951 cases := []struct { 4952 name string 4953 input *ScalingPolicy 4954 expected *ScalingPolicy 4955 }{ 4956 { 4957 name: "empty policy", 4958 input: &ScalingPolicy{}, 4959 expected: &ScalingPolicy{Type: ScalingPolicyTypeHorizontal}, 4960 }, 4961 { 4962 name: "policy with type", 4963 input: &ScalingPolicy{Type: "other-type"}, 4964 expected: &ScalingPolicy{Type: "other-type"}, 4965 }, 4966 } 4967 4968 for _, c := range cases { 4969 t.Run(c.name, func(t *testing.T) { 4970 require := require.New(t) 4971 4972 c.input.Canonicalize() 4973 require.Equal(c.expected, c.input) 4974 }) 4975 } 4976 } 4977 4978 func TestScalingPolicy_Validate(t *testing.T) { 4979 type testCase struct { 4980 name string 4981 input *ScalingPolicy 4982 expectedErr string 4983 } 4984 4985 cases := []testCase{ 4986 { 4987 name: "full horizontal policy", 4988 input: &ScalingPolicy{ 4989 Policy: map[string]interface{}{ 4990 "key": "value", 4991 }, 4992 Type: ScalingPolicyTypeHorizontal, 4993 Min: 5, 4994 Max: 5, 4995 Enabled: true, 4996 Target: map[string]string{ 4997 ScalingTargetNamespace: "my-namespace", 4998 ScalingTargetJob: "my-job", 4999 ScalingTargetGroup: "my-task-group", 5000 }, 5001 }, 5002 }, 5003 { 5004 name: "missing type", 5005 input: &ScalingPolicy{}, 5006 expectedErr: "missing scaling policy type", 5007 }, 5008 { 5009 name: "invalid type", 5010 input: &ScalingPolicy{ 5011 Type: "not valid", 5012 }, 5013 expectedErr: `scaling policy type "not valid" is not valid`, 5014 }, 5015 { 5016 name: "min < 0", 5017 input: &ScalingPolicy{ 5018 Type: ScalingPolicyTypeHorizontal, 5019 Min: -1, 5020 Max: 5, 5021 }, 5022 expectedErr: "minimum count must be specified and non-negative", 5023 }, 5024 { 5025 name: "max < 0", 5026 input: &ScalingPolicy{ 5027 Type: ScalingPolicyTypeHorizontal, 5028 Min: 5, 5029 Max: -1, 5030 }, 5031 expectedErr: "maximum count must be specified and non-negative", 5032 }, 5033 { 5034 name: "min > max", 5035 input: &ScalingPolicy{ 5036 Type: ScalingPolicyTypeHorizontal, 5037 Min: 10, 5038 Max: 0, 5039 }, 5040 expectedErr: "maximum count must not be less than minimum count", 5041 }, 5042 { 5043 name: "min == max", 5044 input: &ScalingPolicy{ 5045 Type: ScalingPolicyTypeHorizontal, 5046 Min: 10, 5047 Max: 10, 5048 }, 5049 }, 5050 { 5051 name: "min == 0", 5052 input: &ScalingPolicy{ 5053 Type: ScalingPolicyTypeHorizontal, 5054 Min: 0, 5055 Max: 10, 5056 }, 5057 }, 5058 { 5059 name: "max == 0", 5060 input: &ScalingPolicy{ 5061 Type: ScalingPolicyTypeHorizontal, 5062 Min: 0, 5063 Max: 0, 5064 }, 5065 }, 5066 { 5067 name: "horizontal missing namespace", 5068 input: &ScalingPolicy{ 5069 Type: ScalingPolicyTypeHorizontal, 5070 Target: map[string]string{ 5071 ScalingTargetJob: "my-job", 5072 ScalingTargetGroup: "my-group", 5073 }, 5074 }, 5075 expectedErr: "missing target namespace", 5076 }, 5077 { 5078 name: "horizontal missing job", 5079 input: &ScalingPolicy{ 5080 Type: ScalingPolicyTypeHorizontal, 5081 Target: map[string]string{ 5082 ScalingTargetNamespace: "my-namespace", 5083 ScalingTargetGroup: "my-group", 5084 }, 5085 }, 5086 expectedErr: "missing target job", 5087 }, 5088 { 5089 name: "horizontal missing group", 5090 input: &ScalingPolicy{ 5091 Type: ScalingPolicyTypeHorizontal, 5092 Target: map[string]string{ 5093 ScalingTargetNamespace: "my-namespace", 5094 ScalingTargetJob: "my-job", 5095 }, 5096 }, 5097 expectedErr: "missing target group", 5098 }, 5099 } 5100 5101 for _, c := range cases { 5102 t.Run(c.name, func(t *testing.T) { 5103 require := require.New(t) 5104 5105 err := c.input.Validate() 5106 5107 if len(c.expectedErr) > 0 { 5108 require.Error(err, c.expectedErr) 5109 } else { 5110 require.NoError(err) 5111 } 5112 }) 5113 } 5114 } 5115 5116 func TestIsRecoverable(t *testing.T) { 5117 if IsRecoverable(nil) { 5118 t.Errorf("nil should not be recoverable") 5119 } 5120 if IsRecoverable(NewRecoverableError(nil, true)) { 5121 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 5122 } 5123 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 5124 t.Errorf("Custom errors should not be recoverable") 5125 } 5126 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 5127 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 5128 } 5129 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 5130 t.Errorf("Explicitly recoverable errors *should* be recoverable") 5131 } 5132 } 5133 5134 func TestACLTokenValidate(t *testing.T) { 5135 tk := &ACLToken{} 5136 5137 // Missing a type 5138 err := tk.Validate() 5139 assert.NotNil(t, err) 5140 if !strings.Contains(err.Error(), "client or management") { 5141 t.Fatalf("bad: %v", err) 5142 } 5143 5144 // Missing policies 5145 tk.Type = ACLClientToken 5146 err = tk.Validate() 5147 assert.NotNil(t, err) 5148 if !strings.Contains(err.Error(), "missing policies") { 5149 t.Fatalf("bad: %v", err) 5150 } 5151 5152 // Invalid policies 5153 tk.Type = ACLManagementToken 5154 tk.Policies = []string{"foo"} 5155 err = tk.Validate() 5156 assert.NotNil(t, err) 5157 if !strings.Contains(err.Error(), "associated with policies") { 5158 t.Fatalf("bad: %v", err) 5159 } 5160 5161 // Name too long policies 5162 tk.Name = "" 5163 for i := 0; i < 8; i++ { 5164 tk.Name += uuid.Generate() 5165 } 5166 tk.Policies = nil 5167 err = tk.Validate() 5168 assert.NotNil(t, err) 5169 if !strings.Contains(err.Error(), "too long") { 5170 t.Fatalf("bad: %v", err) 5171 } 5172 5173 // Make it valid 5174 tk.Name = "foo" 5175 err = tk.Validate() 5176 assert.Nil(t, err) 5177 } 5178 5179 func TestACLTokenPolicySubset(t *testing.T) { 5180 tk := &ACLToken{ 5181 Type: ACLClientToken, 5182 Policies: []string{"foo", "bar", "baz"}, 5183 } 5184 5185 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 5186 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 5187 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 5188 assert.Equal(t, true, tk.PolicySubset([]string{})) 5189 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 5190 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 5191 5192 tk = &ACLToken{ 5193 Type: ACLManagementToken, 5194 } 5195 5196 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 5197 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 5198 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 5199 assert.Equal(t, true, tk.PolicySubset([]string{})) 5200 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 5201 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 5202 } 5203 5204 func TestACLTokenSetHash(t *testing.T) { 5205 tk := &ACLToken{ 5206 Name: "foo", 5207 Type: ACLClientToken, 5208 Policies: []string{"foo", "bar"}, 5209 Global: false, 5210 } 5211 out1 := tk.SetHash() 5212 assert.NotNil(t, out1) 5213 assert.NotNil(t, tk.Hash) 5214 assert.Equal(t, out1, tk.Hash) 5215 5216 tk.Policies = []string{"foo"} 5217 out2 := tk.SetHash() 5218 assert.NotNil(t, out2) 5219 assert.NotNil(t, tk.Hash) 5220 assert.Equal(t, out2, tk.Hash) 5221 assert.NotEqual(t, out1, out2) 5222 } 5223 5224 func TestACLPolicySetHash(t *testing.T) { 5225 ap := &ACLPolicy{ 5226 Name: "foo", 5227 Description: "great policy", 5228 Rules: "node { policy = \"read\" }", 5229 } 5230 out1 := ap.SetHash() 5231 assert.NotNil(t, out1) 5232 assert.NotNil(t, ap.Hash) 5233 assert.Equal(t, out1, ap.Hash) 5234 5235 ap.Rules = "node { policy = \"write\" }" 5236 out2 := ap.SetHash() 5237 assert.NotNil(t, out2) 5238 assert.NotNil(t, ap.Hash) 5239 assert.Equal(t, out2, ap.Hash) 5240 assert.NotEqual(t, out1, out2) 5241 } 5242 5243 func TestTaskEventPopulate(t *testing.T) { 5244 prepopulatedEvent := NewTaskEvent(TaskSetup) 5245 prepopulatedEvent.DisplayMessage = "Hola" 5246 testcases := []struct { 5247 event *TaskEvent 5248 expectedMsg string 5249 }{ 5250 {nil, ""}, 5251 {prepopulatedEvent, "Hola"}, 5252 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 5253 {NewTaskEvent(TaskStarted), "Task started by client"}, 5254 {NewTaskEvent(TaskReceived), "Task received by client"}, 5255 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 5256 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 5257 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 5258 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 5259 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 5260 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 5261 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 5262 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 5263 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 5264 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 5265 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 5266 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 5267 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 5268 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 5269 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 5270 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 5271 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 5272 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 5273 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 5274 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 5275 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 5276 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 5277 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 5278 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 5279 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 5280 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 5281 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 5282 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 5283 {NewTaskEvent("Unknown Type, No message"), ""}, 5284 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 5285 } 5286 5287 for _, tc := range testcases { 5288 tc.event.PopulateEventDisplayMessage() 5289 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 5290 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 5291 } 5292 } 5293 } 5294 5295 func TestNetworkResourcesEquals(t *testing.T) { 5296 require := require.New(t) 5297 var networkResourcesTest = []struct { 5298 input []*NetworkResource 5299 expected bool 5300 errorMsg string 5301 }{ 5302 { 5303 []*NetworkResource{ 5304 { 5305 IP: "10.0.0.1", 5306 MBits: 50, 5307 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5308 }, 5309 { 5310 IP: "10.0.0.1", 5311 MBits: 50, 5312 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5313 }, 5314 }, 5315 true, 5316 "Equal network resources should return true", 5317 }, 5318 { 5319 []*NetworkResource{ 5320 { 5321 IP: "10.0.0.0", 5322 MBits: 50, 5323 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5324 }, 5325 { 5326 IP: "10.0.0.1", 5327 MBits: 50, 5328 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5329 }, 5330 }, 5331 false, 5332 "Different IP addresses should return false", 5333 }, 5334 { 5335 []*NetworkResource{ 5336 { 5337 IP: "10.0.0.1", 5338 MBits: 40, 5339 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5340 }, 5341 { 5342 IP: "10.0.0.1", 5343 MBits: 50, 5344 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5345 }, 5346 }, 5347 false, 5348 "Different MBits values should return false", 5349 }, 5350 { 5351 []*NetworkResource{ 5352 { 5353 IP: "10.0.0.1", 5354 MBits: 50, 5355 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5356 }, 5357 { 5358 IP: "10.0.0.1", 5359 MBits: 50, 5360 ReservedPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}}, 5361 }, 5362 }, 5363 false, 5364 "Different ReservedPorts lengths should return false", 5365 }, 5366 { 5367 []*NetworkResource{ 5368 { 5369 IP: "10.0.0.1", 5370 MBits: 50, 5371 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5372 }, 5373 { 5374 IP: "10.0.0.1", 5375 MBits: 50, 5376 ReservedPorts: []Port{}, 5377 }, 5378 }, 5379 false, 5380 "Empty and non empty ReservedPorts values should return false", 5381 }, 5382 { 5383 []*NetworkResource{ 5384 { 5385 IP: "10.0.0.1", 5386 MBits: 50, 5387 ReservedPorts: []Port{{"web", 80, 0, ""}}, 5388 }, 5389 { 5390 IP: "10.0.0.1", 5391 MBits: 50, 5392 ReservedPorts: []Port{{"notweb", 80, 0, ""}}, 5393 }, 5394 }, 5395 false, 5396 "Different valued ReservedPorts values should return false", 5397 }, 5398 { 5399 []*NetworkResource{ 5400 { 5401 IP: "10.0.0.1", 5402 MBits: 50, 5403 DynamicPorts: []Port{{"web", 80, 0, ""}}, 5404 }, 5405 { 5406 IP: "10.0.0.1", 5407 MBits: 50, 5408 DynamicPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}}, 5409 }, 5410 }, 5411 false, 5412 "Different DynamicPorts lengths should return false", 5413 }, 5414 { 5415 []*NetworkResource{ 5416 { 5417 IP: "10.0.0.1", 5418 MBits: 50, 5419 DynamicPorts: []Port{{"web", 80, 0, ""}}, 5420 }, 5421 { 5422 IP: "10.0.0.1", 5423 MBits: 50, 5424 DynamicPorts: []Port{}, 5425 }, 5426 }, 5427 false, 5428 "Empty and non empty DynamicPorts values should return false", 5429 }, 5430 { 5431 []*NetworkResource{ 5432 { 5433 IP: "10.0.0.1", 5434 MBits: 50, 5435 DynamicPorts: []Port{{"web", 80, 0, ""}}, 5436 }, 5437 { 5438 IP: "10.0.0.1", 5439 MBits: 50, 5440 DynamicPorts: []Port{{"notweb", 80, 0, ""}}, 5441 }, 5442 }, 5443 false, 5444 "Different valued DynamicPorts values should return false", 5445 }, 5446 } 5447 for _, testCase := range networkResourcesTest { 5448 first := testCase.input[0] 5449 second := testCase.input[1] 5450 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 5451 } 5452 } 5453 5454 func TestNode_Canonicalize(t *testing.T) { 5455 t.Parallel() 5456 require := require.New(t) 5457 5458 // Make sure the eligiblity is set properly 5459 node := &Node{} 5460 node.Canonicalize() 5461 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 5462 5463 node = &Node{ 5464 Drain: true, 5465 } 5466 node.Canonicalize() 5467 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 5468 } 5469 5470 func TestNode_Copy(t *testing.T) { 5471 t.Parallel() 5472 require := require.New(t) 5473 5474 node := &Node{ 5475 ID: uuid.Generate(), 5476 SecretID: uuid.Generate(), 5477 Datacenter: "dc1", 5478 Name: "foobar", 5479 Attributes: map[string]string{ 5480 "kernel.name": "linux", 5481 "arch": "x86", 5482 "nomad.version": "0.5.0", 5483 "driver.exec": "1", 5484 "driver.mock_driver": "1", 5485 }, 5486 Resources: &Resources{ 5487 CPU: 4000, 5488 MemoryMB: 8192, 5489 DiskMB: 100 * 1024, 5490 Networks: []*NetworkResource{ 5491 { 5492 Device: "eth0", 5493 CIDR: "192.168.0.100/32", 5494 MBits: 1000, 5495 }, 5496 }, 5497 }, 5498 Reserved: &Resources{ 5499 CPU: 100, 5500 MemoryMB: 256, 5501 DiskMB: 4 * 1024, 5502 Networks: []*NetworkResource{ 5503 { 5504 Device: "eth0", 5505 IP: "192.168.0.100", 5506 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 5507 MBits: 1, 5508 }, 5509 }, 5510 }, 5511 NodeResources: &NodeResources{ 5512 Cpu: NodeCpuResources{ 5513 CpuShares: 4000, 5514 }, 5515 Memory: NodeMemoryResources{ 5516 MemoryMB: 8192, 5517 }, 5518 Disk: NodeDiskResources{ 5519 DiskMB: 100 * 1024, 5520 }, 5521 Networks: []*NetworkResource{ 5522 { 5523 Device: "eth0", 5524 CIDR: "192.168.0.100/32", 5525 MBits: 1000, 5526 }, 5527 }, 5528 }, 5529 ReservedResources: &NodeReservedResources{ 5530 Cpu: NodeReservedCpuResources{ 5531 CpuShares: 100, 5532 }, 5533 Memory: NodeReservedMemoryResources{ 5534 MemoryMB: 256, 5535 }, 5536 Disk: NodeReservedDiskResources{ 5537 DiskMB: 4 * 1024, 5538 }, 5539 Networks: NodeReservedNetworkResources{ 5540 ReservedHostPorts: "22", 5541 }, 5542 }, 5543 Links: map[string]string{ 5544 "consul": "foobar.dc1", 5545 }, 5546 Meta: map[string]string{ 5547 "pci-dss": "true", 5548 "database": "mysql", 5549 "version": "5.6", 5550 }, 5551 NodeClass: "linux-medium-pci", 5552 Status: NodeStatusReady, 5553 SchedulingEligibility: NodeSchedulingEligible, 5554 Drivers: map[string]*DriverInfo{ 5555 "mock_driver": { 5556 Attributes: map[string]string{"running": "1"}, 5557 Detected: true, 5558 Healthy: true, 5559 HealthDescription: "Currently active", 5560 UpdateTime: time.Now(), 5561 }, 5562 }, 5563 } 5564 node.ComputeClass() 5565 5566 node2 := node.Copy() 5567 5568 require.Equal(node.Attributes, node2.Attributes) 5569 require.Equal(node.Resources, node2.Resources) 5570 require.Equal(node.Reserved, node2.Reserved) 5571 require.Equal(node.Links, node2.Links) 5572 require.Equal(node.Meta, node2.Meta) 5573 require.Equal(node.Events, node2.Events) 5574 require.Equal(node.DrainStrategy, node2.DrainStrategy) 5575 require.Equal(node.Drivers, node2.Drivers) 5576 } 5577 5578 func TestSpread_Validate(t *testing.T) { 5579 type tc struct { 5580 spread *Spread 5581 err error 5582 name string 5583 } 5584 5585 testCases := []tc{ 5586 { 5587 spread: &Spread{}, 5588 err: fmt.Errorf("Missing spread attribute"), 5589 name: "empty spread", 5590 }, 5591 { 5592 spread: &Spread{ 5593 Attribute: "${node.datacenter}", 5594 Weight: -1, 5595 }, 5596 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5597 name: "Invalid weight", 5598 }, 5599 { 5600 spread: &Spread{ 5601 Attribute: "${node.datacenter}", 5602 Weight: 110, 5603 }, 5604 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 5605 name: "Invalid weight", 5606 }, 5607 { 5608 spread: &Spread{ 5609 Attribute: "${node.datacenter}", 5610 Weight: 50, 5611 SpreadTarget: []*SpreadTarget{ 5612 { 5613 Value: "dc1", 5614 Percent: 25, 5615 }, 5616 { 5617 Value: "dc2", 5618 Percent: 150, 5619 }, 5620 }, 5621 }, 5622 err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"), 5623 name: "Invalid percentages", 5624 }, 5625 { 5626 spread: &Spread{ 5627 Attribute: "${node.datacenter}", 5628 Weight: 50, 5629 SpreadTarget: []*SpreadTarget{ 5630 { 5631 Value: "dc1", 5632 Percent: 75, 5633 }, 5634 { 5635 Value: "dc2", 5636 Percent: 75, 5637 }, 5638 }, 5639 }, 5640 err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150), 5641 name: "Invalid percentages", 5642 }, 5643 { 5644 spread: &Spread{ 5645 Attribute: "${node.datacenter}", 5646 Weight: 50, 5647 SpreadTarget: []*SpreadTarget{ 5648 { 5649 Value: "dc1", 5650 Percent: 25, 5651 }, 5652 { 5653 Value: "dc1", 5654 Percent: 50, 5655 }, 5656 }, 5657 }, 5658 err: fmt.Errorf("Spread target value \"dc1\" already defined"), 5659 name: "No spread targets", 5660 }, 5661 { 5662 spread: &Spread{ 5663 Attribute: "${node.datacenter}", 5664 Weight: 50, 5665 SpreadTarget: []*SpreadTarget{ 5666 { 5667 Value: "dc1", 5668 Percent: 25, 5669 }, 5670 { 5671 Value: "dc2", 5672 Percent: 50, 5673 }, 5674 }, 5675 }, 5676 err: nil, 5677 name: "Valid spread", 5678 }, 5679 } 5680 5681 for _, tc := range testCases { 5682 t.Run(tc.name, func(t *testing.T) { 5683 err := tc.spread.Validate() 5684 if tc.err != nil { 5685 require.NotNil(t, err) 5686 require.Contains(t, err.Error(), tc.err.Error()) 5687 } else { 5688 require.Nil(t, err) 5689 } 5690 }) 5691 } 5692 } 5693 5694 func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { 5695 require := require.New(t) 5696 cases := []struct { 5697 Input string 5698 Parsed []uint64 5699 Err bool 5700 }{ 5701 { 5702 "1,2,3", 5703 []uint64{1, 2, 3}, 5704 false, 5705 }, 5706 { 5707 "3,1,2,1,2,3,1-3", 5708 []uint64{1, 2, 3}, 5709 false, 5710 }, 5711 { 5712 "3-1", 5713 nil, 5714 true, 5715 }, 5716 { 5717 "1-3,2-4", 5718 []uint64{1, 2, 3, 4}, 5719 false, 5720 }, 5721 { 5722 "1-3,4,5-5,6,7,8-10", 5723 []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 5724 false, 5725 }, 5726 } 5727 5728 for i, tc := range cases { 5729 r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input} 5730 out, err := r.ParseReservedHostPorts() 5731 if (err != nil) != tc.Err { 5732 t.Fatalf("test case %d: %v", i, err) 5733 continue 5734 } 5735 5736 require.Equal(out, tc.Parsed) 5737 } 5738 } 5739 5740 func TestMultiregion_CopyCanonicalize(t *testing.T) { 5741 require := require.New(t) 5742 5743 emptyOld := &Multiregion{} 5744 expected := &Multiregion{ 5745 Strategy: &MultiregionStrategy{}, 5746 Regions: []*MultiregionRegion{}, 5747 } 5748 5749 old := emptyOld.Copy() 5750 old.Canonicalize() 5751 require.Equal(old, expected) 5752 require.False(old.Diff(expected)) 5753 5754 nonEmptyOld := &Multiregion{ 5755 Strategy: &MultiregionStrategy{ 5756 MaxParallel: 2, 5757 OnFailure: "fail_all", 5758 }, 5759 Regions: []*MultiregionRegion{ 5760 { 5761 Name: "west", 5762 Count: 2, 5763 Datacenters: []string{"west-1", "west-2"}, 5764 Meta: map[string]string{}, 5765 }, 5766 { 5767 Name: "east", 5768 Count: 1, 5769 Datacenters: []string{"east-1"}, 5770 Meta: map[string]string{}, 5771 }, 5772 }, 5773 } 5774 5775 old = nonEmptyOld.Copy() 5776 old.Canonicalize() 5777 require.Equal(old, nonEmptyOld) 5778 require.False(old.Diff(nonEmptyOld)) 5779 } 5780 5781 func TestNodeResources_Merge(t *testing.T) { 5782 res := &NodeResources{ 5783 Cpu: NodeCpuResources{ 5784 CpuShares: int64(32000), 5785 }, 5786 Memory: NodeMemoryResources{ 5787 MemoryMB: int64(64000), 5788 }, 5789 Networks: Networks{ 5790 { 5791 Device: "foo", 5792 }, 5793 }, 5794 } 5795 5796 res.Merge(&NodeResources{ 5797 Memory: NodeMemoryResources{ 5798 MemoryMB: int64(100000), 5799 }, 5800 Networks: Networks{ 5801 { 5802 Mode: "foo/bar", 5803 }, 5804 }, 5805 }) 5806 5807 require.Exactly(t, &NodeResources{ 5808 Cpu: NodeCpuResources{ 5809 CpuShares: int64(32000), 5810 }, 5811 Memory: NodeMemoryResources{ 5812 MemoryMB: int64(100000), 5813 }, 5814 Networks: Networks{ 5815 { 5816 Device: "foo", 5817 }, 5818 { 5819 Mode: "foo/bar", 5820 }, 5821 }, 5822 }, res) 5823 } 5824 5825 func TestAllocatedResources_Canonicalize(t *testing.T) { 5826 cases := map[string]struct { 5827 input *AllocatedResources 5828 expected *AllocatedResources 5829 }{ 5830 "base": { 5831 input: &AllocatedResources{ 5832 Tasks: map[string]*AllocatedTaskResources{ 5833 "task": { 5834 Networks: Networks{ 5835 { 5836 IP: "127.0.0.1", 5837 DynamicPorts: []Port{{"admin", 8080, 0, "default"}}, 5838 }, 5839 }, 5840 }, 5841 }, 5842 }, 5843 expected: &AllocatedResources{ 5844 Tasks: map[string]*AllocatedTaskResources{ 5845 "task": { 5846 Networks: Networks{ 5847 { 5848 IP: "127.0.0.1", 5849 DynamicPorts: []Port{{"admin", 8080, 0, "default"}}, 5850 }, 5851 }, 5852 }, 5853 }, 5854 Shared: AllocatedSharedResources{ 5855 Ports: AllocatedPorts{ 5856 { 5857 Label: "admin", 5858 Value: 8080, 5859 To: 0, 5860 HostIP: "127.0.0.1", 5861 }, 5862 }, 5863 }, 5864 }, 5865 }, 5866 "base with existing": { 5867 input: &AllocatedResources{ 5868 Tasks: map[string]*AllocatedTaskResources{ 5869 "task": { 5870 Networks: Networks{ 5871 { 5872 IP: "127.0.0.1", 5873 DynamicPorts: []Port{{"admin", 8080, 0, "default"}}, 5874 }, 5875 }, 5876 }, 5877 }, 5878 Shared: AllocatedSharedResources{ 5879 Ports: AllocatedPorts{ 5880 { 5881 Label: "http", 5882 Value: 80, 5883 To: 8080, 5884 HostIP: "127.0.0.1", 5885 }, 5886 }, 5887 }, 5888 }, 5889 expected: &AllocatedResources{ 5890 Tasks: map[string]*AllocatedTaskResources{ 5891 "task": { 5892 Networks: Networks{ 5893 { 5894 IP: "127.0.0.1", 5895 DynamicPorts: []Port{{"admin", 8080, 0, "default"}}, 5896 }, 5897 }, 5898 }, 5899 }, 5900 Shared: AllocatedSharedResources{ 5901 Ports: AllocatedPorts{ 5902 { 5903 Label: "http", 5904 Value: 80, 5905 To: 8080, 5906 HostIP: "127.0.0.1", 5907 }, 5908 { 5909 Label: "admin", 5910 Value: 8080, 5911 To: 0, 5912 HostIP: "127.0.0.1", 5913 }, 5914 }, 5915 }, 5916 }, 5917 }, 5918 } 5919 for name, tc := range cases { 5920 tc.input.Canonicalize() 5921 require.Exactly(t, tc.expected, tc.input, "case %s did not match", name) 5922 } 5923 } 5924 5925 func TestAllocatedSharedResources_Canonicalize(t *testing.T) { 5926 a := &AllocatedSharedResources{ 5927 Networks: []*NetworkResource{ 5928 { 5929 IP: "127.0.0.1", 5930 DynamicPorts: []Port{ 5931 { 5932 Label: "http", 5933 Value: 22222, 5934 To: 8080, 5935 }, 5936 }, 5937 ReservedPorts: []Port{ 5938 { 5939 Label: "redis", 5940 Value: 6783, 5941 To: 6783, 5942 }, 5943 }, 5944 }, 5945 }, 5946 } 5947 5948 a.Canonicalize() 5949 require.Exactly(t, AllocatedPorts{ 5950 { 5951 Label: "http", 5952 Value: 22222, 5953 To: 8080, 5954 HostIP: "127.0.0.1", 5955 }, 5956 { 5957 Label: "redis", 5958 Value: 6783, 5959 To: 6783, 5960 HostIP: "127.0.0.1", 5961 }, 5962 }, a.Ports) 5963 } 5964 5965 func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) { 5966 t.Run("service task not set", func(t *testing.T) { 5967 tg := &TaskGroup{ 5968 Name: "group1", 5969 Services: []*Service{{ 5970 Name: "service1", 5971 TaskName: "", // unset 5972 Checks: []*ServiceCheck{{ 5973 Name: "check1", 5974 Type: "script", 5975 TaskName: "", // unset 5976 }, { 5977 Name: "check2", 5978 Type: "ttl", // not script 5979 }, { 5980 Name: "check3", 5981 Type: "script", 5982 TaskName: "", // unset 5983 }}, 5984 }, { 5985 Name: "service2", 5986 Checks: []*ServiceCheck{{ 5987 Type: "script", 5988 TaskName: "task1", // set 5989 }}, 5990 }, { 5991 Name: "service3", 5992 TaskName: "", // unset 5993 Checks: []*ServiceCheck{{ 5994 Name: "check1", 5995 Type: "script", 5996 TaskName: "", // unset 5997 }}, 5998 }}, 5999 } 6000 6001 errStr := tg.validateScriptChecksInGroupServices().Error() 6002 require.Contains(t, errStr, "Service [group1]->service1 or Check check1 must specify task parameter") 6003 require.Contains(t, errStr, "Service [group1]->service1 or Check check3 must specify task parameter") 6004 require.Contains(t, errStr, "Service [group1]->service3 or Check check1 must specify task parameter") 6005 }) 6006 6007 t.Run("service task set", func(t *testing.T) { 6008 tgOK := &TaskGroup{ 6009 Name: "group1", 6010 Services: []*Service{{ 6011 Name: "service1", 6012 TaskName: "task1", 6013 Checks: []*ServiceCheck{{ 6014 Name: "check1", 6015 Type: "script", 6016 }, { 6017 Name: "check2", 6018 Type: "ttl", 6019 }, { 6020 Name: "check3", 6021 Type: "script", 6022 }}, 6023 }}, 6024 } 6025 6026 mErrOK := tgOK.validateScriptChecksInGroupServices() 6027 require.Nil(t, mErrOK) 6028 }) 6029 } 6030 6031 func requireErrors(t *testing.T, err error, expected ...string) { 6032 t.Helper() 6033 require.Error(t, err) 6034 mErr, ok := err.(*multierror.Error) 6035 require.True(t, ok) 6036 6037 var found []string 6038 for _, e := range expected { 6039 for _, actual := range mErr.Errors { 6040 if strings.Contains(actual.Error(), e) { 6041 found = append(found, e) 6042 break 6043 } 6044 } 6045 } 6046 6047 require.Equal(t, expected, found) 6048 }