github.com/manicqin/nomad@v0.9.5/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 "github.com/kr/pretty" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 ) 18 19 func TestJob_Validate(t *testing.T) { 20 j := &Job{} 21 err := j.Validate() 22 mErr := err.(*multierror.Error) 23 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 24 t.Fatalf("err: %s", err) 25 } 26 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 27 t.Fatalf("err: %s", err) 28 } 29 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 30 t.Fatalf("err: %s", err) 31 } 32 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 33 t.Fatalf("err: %s", err) 34 } 35 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 36 t.Fatalf("err: %s", err) 37 } 38 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 39 t.Fatalf("err: %s", err) 40 } 41 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 42 t.Fatalf("err: %s", err) 43 } 44 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 45 t.Fatalf("err: %s", err) 46 } 47 48 j = &Job{ 49 Type: "invalid-job-type", 50 } 51 err = j.Validate() 52 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 53 t.Errorf("expected %s but found: %v", expected, err) 54 } 55 56 j = &Job{ 57 Type: JobTypeService, 58 Periodic: &PeriodicConfig{ 59 Enabled: true, 60 }, 61 } 62 err = j.Validate() 63 mErr = err.(*multierror.Error) 64 if !strings.Contains(mErr.Error(), "Periodic") { 65 t.Fatalf("err: %s", err) 66 } 67 68 j = &Job{ 69 Region: "global", 70 ID: uuid.Generate(), 71 Namespace: "test", 72 Name: "my-job", 73 Type: JobTypeService, 74 Priority: 50, 75 Datacenters: []string{"dc1"}, 76 TaskGroups: []*TaskGroup{ 77 { 78 Name: "web", 79 RestartPolicy: &RestartPolicy{ 80 Interval: 5 * time.Minute, 81 Delay: 10 * time.Second, 82 Attempts: 10, 83 }, 84 }, 85 { 86 Name: "web", 87 RestartPolicy: &RestartPolicy{ 88 Interval: 5 * time.Minute, 89 Delay: 10 * time.Second, 90 Attempts: 10, 91 }, 92 }, 93 { 94 RestartPolicy: &RestartPolicy{ 95 Interval: 5 * time.Minute, 96 Delay: 10 * time.Second, 97 Attempts: 10, 98 }, 99 }, 100 }, 101 } 102 err = j.Validate() 103 mErr = err.(*multierror.Error) 104 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 105 t.Fatalf("err: %s", err) 106 } 107 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 108 t.Fatalf("err: %s", err) 109 } 110 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 111 t.Fatalf("err: %s", err) 112 } 113 114 // test for empty datacenters 115 j = &Job{ 116 Datacenters: []string{""}, 117 } 118 err = j.Validate() 119 mErr = err.(*multierror.Error) 120 if !strings.Contains(mErr.Error(), "datacenter must be non-empty string") { 121 t.Fatalf("err: %s", err) 122 } 123 } 124 125 func TestJob_Warnings(t *testing.T) { 126 cases := []struct { 127 Name string 128 Job *Job 129 Expected []string 130 }{ 131 { 132 Name: "Higher counts for update stanza", 133 Expected: []string{"max parallel count is greater"}, 134 Job: &Job{ 135 Type: JobTypeService, 136 TaskGroups: []*TaskGroup{ 137 { 138 Name: "foo", 139 Count: 2, 140 Update: &UpdateStrategy{ 141 MaxParallel: 10, 142 }, 143 }, 144 }, 145 }, 146 }, 147 { 148 Name: "AutoPromote mixed TaskGroups", 149 Expected: []string{"auto_promote must be true for all groups"}, 150 Job: &Job{ 151 Type: JobTypeService, 152 TaskGroups: []*TaskGroup{ 153 { 154 Update: &UpdateStrategy{ 155 AutoPromote: true, 156 }, 157 }, 158 { 159 Update: &UpdateStrategy{ 160 AutoPromote: false, 161 }, 162 }, 163 }, 164 }, 165 }, 166 } 167 168 for _, c := range cases { 169 t.Run(c.Name, func(t *testing.T) { 170 warnings := c.Job.Warnings() 171 if warnings == nil { 172 if len(c.Expected) == 0 { 173 return 174 } else { 175 t.Fatal("Got no warnings when they were expected") 176 } 177 } 178 179 a := warnings.Error() 180 for _, e := range c.Expected { 181 if !strings.Contains(a, e) { 182 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 183 } 184 } 185 }) 186 } 187 } 188 189 func TestJob_SpecChanged(t *testing.T) { 190 // Get a base test job 191 base := testJob() 192 193 // Only modify the indexes/mutable state of the job 194 mutatedBase := base.Copy() 195 mutatedBase.Status = "foo" 196 mutatedBase.ModifyIndex = base.ModifyIndex + 100 197 198 // changed contains a spec change that should be detected 199 change := base.Copy() 200 change.Priority = 99 201 202 cases := []struct { 203 Name string 204 Original *Job 205 New *Job 206 Changed bool 207 }{ 208 { 209 Name: "Same job except mutable indexes", 210 Changed: false, 211 Original: base, 212 New: mutatedBase, 213 }, 214 { 215 Name: "Different", 216 Changed: true, 217 Original: base, 218 New: change, 219 }, 220 } 221 222 for _, c := range cases { 223 t.Run(c.Name, func(t *testing.T) { 224 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 225 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 226 } 227 }) 228 } 229 } 230 231 func testJob() *Job { 232 return &Job{ 233 Region: "global", 234 ID: uuid.Generate(), 235 Namespace: "test", 236 Name: "my-job", 237 Type: JobTypeService, 238 Priority: 50, 239 AllAtOnce: false, 240 Datacenters: []string{"dc1"}, 241 Constraints: []*Constraint{ 242 { 243 LTarget: "$attr.kernel.name", 244 RTarget: "linux", 245 Operand: "=", 246 }, 247 }, 248 Periodic: &PeriodicConfig{ 249 Enabled: false, 250 }, 251 TaskGroups: []*TaskGroup{ 252 { 253 Name: "web", 254 Count: 10, 255 EphemeralDisk: DefaultEphemeralDisk(), 256 RestartPolicy: &RestartPolicy{ 257 Mode: RestartPolicyModeFail, 258 Attempts: 3, 259 Interval: 10 * time.Minute, 260 Delay: 1 * time.Minute, 261 }, 262 ReschedulePolicy: &ReschedulePolicy{ 263 Interval: 5 * time.Minute, 264 Attempts: 10, 265 Delay: 5 * time.Second, 266 DelayFunction: "constant", 267 }, 268 Tasks: []*Task{ 269 { 270 Name: "web", 271 Driver: "exec", 272 Config: map[string]interface{}{ 273 "command": "/bin/date", 274 }, 275 Env: map[string]string{ 276 "FOO": "bar", 277 }, 278 Artifacts: []*TaskArtifact{ 279 { 280 GetterSource: "http://foo.com", 281 }, 282 }, 283 Services: []*Service{ 284 { 285 Name: "${TASK}-frontend", 286 PortLabel: "http", 287 }, 288 }, 289 Resources: &Resources{ 290 CPU: 500, 291 MemoryMB: 256, 292 Networks: []*NetworkResource{ 293 { 294 MBits: 50, 295 DynamicPorts: []Port{{Label: "http"}}, 296 }, 297 }, 298 }, 299 LogConfig: &LogConfig{ 300 MaxFiles: 10, 301 MaxFileSizeMB: 1, 302 }, 303 }, 304 }, 305 Meta: map[string]string{ 306 "elb_check_type": "http", 307 "elb_check_interval": "30s", 308 "elb_check_min": "3", 309 }, 310 }, 311 }, 312 Meta: map[string]string{ 313 "owner": "armon", 314 }, 315 } 316 } 317 318 func TestJob_Copy(t *testing.T) { 319 j := testJob() 320 c := j.Copy() 321 if !reflect.DeepEqual(j, c) { 322 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 323 } 324 } 325 326 func TestJob_IsPeriodic(t *testing.T) { 327 j := &Job{ 328 Type: JobTypeService, 329 Periodic: &PeriodicConfig{ 330 Enabled: true, 331 }, 332 } 333 if !j.IsPeriodic() { 334 t.Fatalf("IsPeriodic() returned false on periodic job") 335 } 336 337 j = &Job{ 338 Type: JobTypeService, 339 } 340 if j.IsPeriodic() { 341 t.Fatalf("IsPeriodic() returned true on non-periodic job") 342 } 343 } 344 345 func TestJob_IsPeriodicActive(t *testing.T) { 346 cases := []struct { 347 job *Job 348 active bool 349 }{ 350 { 351 job: &Job{ 352 Type: JobTypeService, 353 Periodic: &PeriodicConfig{ 354 Enabled: true, 355 }, 356 }, 357 active: true, 358 }, 359 { 360 job: &Job{ 361 Type: JobTypeService, 362 Periodic: &PeriodicConfig{ 363 Enabled: false, 364 }, 365 }, 366 active: false, 367 }, 368 { 369 job: &Job{ 370 Type: JobTypeService, 371 Periodic: &PeriodicConfig{ 372 Enabled: true, 373 }, 374 Stop: true, 375 }, 376 active: false, 377 }, 378 { 379 job: &Job{ 380 Type: JobTypeService, 381 Periodic: &PeriodicConfig{ 382 Enabled: false, 383 }, 384 ParameterizedJob: &ParameterizedJobConfig{}, 385 }, 386 active: false, 387 }, 388 } 389 390 for i, c := range cases { 391 if act := c.job.IsPeriodicActive(); act != c.active { 392 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 393 } 394 } 395 } 396 397 func TestJob_SystemJob_Validate(t *testing.T) { 398 j := testJob() 399 j.Type = JobTypeSystem 400 j.TaskGroups[0].ReschedulePolicy = nil 401 j.Canonicalize() 402 403 err := j.Validate() 404 if err == nil || !strings.Contains(err.Error(), "exceed") { 405 t.Fatalf("expect error due to count") 406 } 407 408 j.TaskGroups[0].Count = 0 409 if err := j.Validate(); err != nil { 410 t.Fatalf("unexpected err: %v", err) 411 } 412 413 j.TaskGroups[0].Count = 1 414 if err := j.Validate(); err != nil { 415 t.Fatalf("unexpected err: %v", err) 416 } 417 418 // Add affinities at job, task group and task level, that should fail validation 419 420 j.Affinities = []*Affinity{{ 421 Operand: "=", 422 LTarget: "${node.datacenter}", 423 RTarget: "dc1", 424 }} 425 j.TaskGroups[0].Affinities = []*Affinity{{ 426 Operand: "=", 427 LTarget: "${meta.rack}", 428 RTarget: "r1", 429 }} 430 j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{ 431 Operand: "=", 432 LTarget: "${meta.rack}", 433 RTarget: "r1", 434 }} 435 err = j.Validate() 436 require.NotNil(t, err) 437 require.Contains(t, err.Error(), "System jobs may not have an affinity stanza") 438 439 // Add spread at job and task group level, that should fail validation 440 j.Spreads = []*Spread{{ 441 Attribute: "${node.datacenter}", 442 Weight: 100, 443 }} 444 j.TaskGroups[0].Spreads = []*Spread{{ 445 Attribute: "${node.datacenter}", 446 Weight: 100, 447 }} 448 449 err = j.Validate() 450 require.NotNil(t, err) 451 require.Contains(t, err.Error(), "System jobs may not have a spread stanza") 452 453 } 454 455 func TestJob_VaultPolicies(t *testing.T) { 456 j0 := &Job{} 457 e0 := make(map[string]map[string]*Vault, 0) 458 459 vj1 := &Vault{ 460 Policies: []string{ 461 "p1", 462 "p2", 463 }, 464 } 465 vj2 := &Vault{ 466 Policies: []string{ 467 "p3", 468 "p4", 469 }, 470 } 471 vj3 := &Vault{ 472 Policies: []string{ 473 "p5", 474 }, 475 } 476 j1 := &Job{ 477 TaskGroups: []*TaskGroup{ 478 { 479 Name: "foo", 480 Tasks: []*Task{ 481 { 482 Name: "t1", 483 }, 484 { 485 Name: "t2", 486 Vault: vj1, 487 }, 488 }, 489 }, 490 { 491 Name: "bar", 492 Tasks: []*Task{ 493 { 494 Name: "t3", 495 Vault: vj2, 496 }, 497 { 498 Name: "t4", 499 Vault: vj3, 500 }, 501 }, 502 }, 503 }, 504 } 505 506 e1 := map[string]map[string]*Vault{ 507 "foo": { 508 "t2": vj1, 509 }, 510 "bar": { 511 "t3": vj2, 512 "t4": vj3, 513 }, 514 } 515 516 cases := []struct { 517 Job *Job 518 Expected map[string]map[string]*Vault 519 }{ 520 { 521 Job: j0, 522 Expected: e0, 523 }, 524 { 525 Job: j1, 526 Expected: e1, 527 }, 528 } 529 530 for i, c := range cases { 531 got := c.Job.VaultPolicies() 532 if !reflect.DeepEqual(got, c.Expected) { 533 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 534 } 535 } 536 } 537 538 func TestJob_RequiredSignals(t *testing.T) { 539 j0 := &Job{} 540 e0 := make(map[string]map[string][]string, 0) 541 542 vj1 := &Vault{ 543 Policies: []string{"p1"}, 544 ChangeMode: VaultChangeModeNoop, 545 } 546 vj2 := &Vault{ 547 Policies: []string{"p1"}, 548 ChangeMode: VaultChangeModeSignal, 549 ChangeSignal: "SIGUSR1", 550 } 551 tj1 := &Template{ 552 SourcePath: "foo", 553 DestPath: "bar", 554 ChangeMode: TemplateChangeModeNoop, 555 } 556 tj2 := &Template{ 557 SourcePath: "foo", 558 DestPath: "bar", 559 ChangeMode: TemplateChangeModeSignal, 560 ChangeSignal: "SIGUSR2", 561 } 562 j1 := &Job{ 563 TaskGroups: []*TaskGroup{ 564 { 565 Name: "foo", 566 Tasks: []*Task{ 567 { 568 Name: "t1", 569 }, 570 { 571 Name: "t2", 572 Vault: vj2, 573 Templates: []*Template{tj2}, 574 }, 575 }, 576 }, 577 { 578 Name: "bar", 579 Tasks: []*Task{ 580 { 581 Name: "t3", 582 Vault: vj1, 583 Templates: []*Template{tj1}, 584 }, 585 { 586 Name: "t4", 587 Vault: vj2, 588 }, 589 }, 590 }, 591 }, 592 } 593 594 e1 := map[string]map[string][]string{ 595 "foo": { 596 "t2": {"SIGUSR1", "SIGUSR2"}, 597 }, 598 "bar": { 599 "t4": {"SIGUSR1"}, 600 }, 601 } 602 603 j2 := &Job{ 604 TaskGroups: []*TaskGroup{ 605 { 606 Name: "foo", 607 Tasks: []*Task{ 608 { 609 Name: "t1", 610 KillSignal: "SIGQUIT", 611 }, 612 }, 613 }, 614 }, 615 } 616 617 e2 := map[string]map[string][]string{ 618 "foo": { 619 "t1": {"SIGQUIT"}, 620 }, 621 } 622 623 cases := []struct { 624 Job *Job 625 Expected map[string]map[string][]string 626 }{ 627 { 628 Job: j0, 629 Expected: e0, 630 }, 631 { 632 Job: j1, 633 Expected: e1, 634 }, 635 { 636 Job: j2, 637 Expected: e2, 638 }, 639 } 640 641 for i, c := range cases { 642 got := c.Job.RequiredSignals() 643 if !reflect.DeepEqual(got, c.Expected) { 644 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 645 } 646 } 647 } 648 649 // test new Equal comparisons for components of Jobs 650 func TestJob_PartEqual(t *testing.T) { 651 ns := &Networks{} 652 require.True(t, ns.Equals(&Networks{})) 653 654 ns = &Networks{ 655 &NetworkResource{Device: "eth0"}, 656 } 657 require.True(t, ns.Equals(&Networks{ 658 &NetworkResource{Device: "eth0"}, 659 })) 660 661 ns = &Networks{ 662 &NetworkResource{Device: "eth0"}, 663 &NetworkResource{Device: "eth1"}, 664 &NetworkResource{Device: "eth2"}, 665 } 666 require.True(t, ns.Equals(&Networks{ 667 &NetworkResource{Device: "eth2"}, 668 &NetworkResource{Device: "eth0"}, 669 &NetworkResource{Device: "eth1"}, 670 })) 671 672 cs := &Constraints{ 673 &Constraint{"left0", "right0", "=", ""}, 674 &Constraint{"left1", "right1", "=", ""}, 675 &Constraint{"left2", "right2", "=", ""}, 676 } 677 require.True(t, cs.Equals(&Constraints{ 678 &Constraint{"left0", "right0", "=", ""}, 679 &Constraint{"left2", "right2", "=", ""}, 680 &Constraint{"left1", "right1", "=", ""}, 681 })) 682 683 as := &Affinities{ 684 &Affinity{"left0", "right0", "=", 0, ""}, 685 &Affinity{"left1", "right1", "=", 0, ""}, 686 &Affinity{"left2", "right2", "=", 0, ""}, 687 } 688 require.True(t, as.Equals(&Affinities{ 689 &Affinity{"left0", "right0", "=", 0, ""}, 690 &Affinity{"left2", "right2", "=", 0, ""}, 691 &Affinity{"left1", "right1", "=", 0, ""}, 692 })) 693 } 694 695 func TestTaskGroup_Validate(t *testing.T) { 696 j := testJob() 697 tg := &TaskGroup{ 698 Count: -1, 699 RestartPolicy: &RestartPolicy{ 700 Interval: 5 * time.Minute, 701 Delay: 10 * time.Second, 702 Attempts: 10, 703 Mode: RestartPolicyModeDelay, 704 }, 705 ReschedulePolicy: &ReschedulePolicy{ 706 Interval: 5 * time.Minute, 707 Attempts: 5, 708 Delay: 5 * time.Second, 709 }, 710 } 711 err := tg.Validate(j) 712 mErr := err.(*multierror.Error) 713 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 714 t.Fatalf("err: %s", err) 715 } 716 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 717 t.Fatalf("err: %s", err) 718 } 719 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 720 t.Fatalf("err: %s", err) 721 } 722 723 tg = &TaskGroup{ 724 Tasks: []*Task{ 725 { 726 Name: "task-a", 727 Resources: &Resources{ 728 Networks: []*NetworkResource{ 729 { 730 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 731 }, 732 }, 733 }, 734 }, 735 { 736 Name: "task-b", 737 Resources: &Resources{ 738 Networks: []*NetworkResource{ 739 { 740 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 741 }, 742 }, 743 }, 744 }, 745 }, 746 } 747 err = tg.Validate(&Job{}) 748 expected := `Static port 123 already reserved by task-a:foo` 749 if !strings.Contains(err.Error(), expected) { 750 t.Errorf("expected %s but found: %v", expected, err) 751 } 752 753 tg = &TaskGroup{ 754 Tasks: []*Task{ 755 { 756 Name: "task-a", 757 Resources: &Resources{ 758 Networks: []*NetworkResource{ 759 { 760 ReservedPorts: []Port{ 761 {Label: "foo", Value: 123}, 762 {Label: "bar", Value: 123}, 763 }, 764 }, 765 }, 766 }, 767 }, 768 }, 769 } 770 err = tg.Validate(&Job{}) 771 expected = `Static port 123 already reserved by task-a:foo` 772 if !strings.Contains(err.Error(), expected) { 773 t.Errorf("expected %s but found: %v", expected, err) 774 } 775 776 tg = &TaskGroup{ 777 Name: "web", 778 Count: 1, 779 Tasks: []*Task{ 780 {Name: "web", Leader: true}, 781 {Name: "web", Leader: true}, 782 {}, 783 }, 784 RestartPolicy: &RestartPolicy{ 785 Interval: 5 * time.Minute, 786 Delay: 10 * time.Second, 787 Attempts: 10, 788 Mode: RestartPolicyModeDelay, 789 }, 790 ReschedulePolicy: &ReschedulePolicy{ 791 Interval: 5 * time.Minute, 792 Attempts: 10, 793 Delay: 5 * time.Second, 794 DelayFunction: "constant", 795 }, 796 } 797 798 err = tg.Validate(j) 799 mErr = err.(*multierror.Error) 800 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 801 t.Fatalf("err: %s", err) 802 } 803 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 804 t.Fatalf("err: %s", err) 805 } 806 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 807 t.Fatalf("err: %s", err) 808 } 809 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 810 t.Fatalf("err: %s", err) 811 } 812 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 813 t.Fatalf("err: %s", err) 814 } 815 816 tg = &TaskGroup{ 817 Name: "web", 818 Count: 1, 819 Tasks: []*Task{ 820 {Name: "web", Leader: true}, 821 }, 822 Update: DefaultUpdateStrategy.Copy(), 823 } 824 j.Type = JobTypeBatch 825 err = tg.Validate(j) 826 if !strings.Contains(err.Error(), "does not allow update block") { 827 t.Fatalf("err: %s", err) 828 } 829 830 tg = &TaskGroup{ 831 Count: -1, 832 RestartPolicy: &RestartPolicy{ 833 Interval: 5 * time.Minute, 834 Delay: 10 * time.Second, 835 Attempts: 10, 836 Mode: RestartPolicyModeDelay, 837 }, 838 ReschedulePolicy: &ReschedulePolicy{ 839 Interval: 5 * time.Minute, 840 Attempts: 5, 841 Delay: 5 * time.Second, 842 }, 843 } 844 j.Type = JobTypeSystem 845 err = tg.Validate(j) 846 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 847 t.Fatalf("err: %s", err) 848 } 849 850 tg = &TaskGroup{ 851 Networks: []*NetworkResource{ 852 { 853 DynamicPorts: []Port{{"http", 0, 80}}, 854 }, 855 }, 856 Tasks: []*Task{ 857 { 858 Resources: &Resources{ 859 Networks: []*NetworkResource{ 860 { 861 DynamicPorts: []Port{{"http", 0, 80}}, 862 }, 863 }, 864 }, 865 }, 866 }, 867 } 868 err = tg.Validate(j) 869 require.Contains(t, err.Error(), "Port label http already in use") 870 require.Contains(t, err.Error(), "Port mapped to 80 already in use") 871 872 tg = &TaskGroup{ 873 Volumes: map[string]*VolumeRequest{ 874 "foo": { 875 Type: "nothost", 876 Source: "foo", 877 }, 878 }, 879 Tasks: []*Task{ 880 { 881 Name: "task-a", 882 Resources: &Resources{}, 883 }, 884 }, 885 } 886 err = tg.Validate(&Job{}) 887 require.Contains(t, err.Error(), `Volume foo has unrecognised type nothost`) 888 889 tg = &TaskGroup{ 890 Volumes: map[string]*VolumeRequest{ 891 "foo": { 892 Type: "host", 893 }, 894 }, 895 Tasks: []*Task{ 896 { 897 Name: "task-a", 898 Resources: &Resources{}, 899 }, 900 }, 901 } 902 err = tg.Validate(&Job{}) 903 require.Contains(t, err.Error(), `Volume foo has an empty source`) 904 905 tg = &TaskGroup{ 906 Volumes: map[string]*VolumeRequest{ 907 "foo": { 908 Type: "host", 909 }, 910 }, 911 Tasks: []*Task{ 912 { 913 Name: "task-a", 914 Resources: &Resources{}, 915 VolumeMounts: []*VolumeMount{ 916 { 917 Volume: "", 918 }, 919 }, 920 }, 921 { 922 Name: "task-b", 923 Resources: &Resources{}, 924 VolumeMounts: []*VolumeMount{ 925 { 926 Volume: "foob", 927 }, 928 }, 929 }, 930 }, 931 } 932 err = tg.Validate(&Job{}) 933 expected = `Task task-a has a volume mount (0) referencing an empty volume` 934 require.Contains(t, err.Error(), expected) 935 936 expected = `Task task-b has a volume mount (0) referencing undefined volume foob` 937 require.Contains(t, err.Error(), expected) 938 939 taskA := &Task{Name: "task-a"} 940 tg = &TaskGroup{ 941 Name: "group-a", 942 Services: []*Service{ 943 { 944 Name: "service-a", 945 Checks: []*ServiceCheck{ 946 { 947 Name: "check-a", 948 Type: "tcp", 949 TaskName: "task-b", 950 PortLabel: "http", 951 Interval: time.Duration(1 * time.Second), 952 Timeout: time.Duration(1 * time.Second), 953 }, 954 }, 955 }, 956 }, 957 Tasks: []*Task{taskA}, 958 } 959 err = tg.Validate(&Job{}) 960 expected = `Check check-a invalid: refers to non-existent task task-b` 961 require.Contains(t, err.Error(), expected) 962 963 expected = `Check check-a invalid: only script and gRPC checks should have tasks` 964 require.Contains(t, err.Error(), expected) 965 966 } 967 968 func TestTask_Validate(t *testing.T) { 969 task := &Task{} 970 ephemeralDisk := DefaultEphemeralDisk() 971 err := task.Validate(ephemeralDisk, JobTypeBatch, nil) 972 mErr := err.(*multierror.Error) 973 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 974 t.Fatalf("err: %s", err) 975 } 976 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 977 t.Fatalf("err: %s", err) 978 } 979 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 980 t.Fatalf("err: %s", err) 981 } 982 983 task = &Task{Name: "web/foo"} 984 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 985 mErr = err.(*multierror.Error) 986 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 987 t.Fatalf("err: %s", err) 988 } 989 990 task = &Task{ 991 Name: "web", 992 Driver: "docker", 993 Resources: &Resources{ 994 CPU: 100, 995 MemoryMB: 100, 996 }, 997 LogConfig: DefaultLogConfig(), 998 } 999 ephemeralDisk.SizeMB = 200 1000 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1001 if err != nil { 1002 t.Fatalf("err: %s", err) 1003 } 1004 1005 task.Constraints = append(task.Constraints, 1006 &Constraint{ 1007 Operand: ConstraintDistinctHosts, 1008 }, 1009 &Constraint{ 1010 Operand: ConstraintDistinctProperty, 1011 LTarget: "${meta.rack}", 1012 }) 1013 1014 err = task.Validate(ephemeralDisk, JobTypeBatch, nil) 1015 mErr = err.(*multierror.Error) 1016 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 1017 t.Fatalf("err: %s", err) 1018 } 1019 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 1020 t.Fatalf("err: %s", err) 1021 } 1022 } 1023 1024 func TestTask_Validate_Services(t *testing.T) { 1025 s1 := &Service{ 1026 Name: "service-name", 1027 PortLabel: "bar", 1028 Checks: []*ServiceCheck{ 1029 { 1030 Name: "check-name", 1031 Type: ServiceCheckTCP, 1032 Interval: 0 * time.Second, 1033 }, 1034 { 1035 Name: "check-name", 1036 Type: ServiceCheckTCP, 1037 Timeout: 2 * time.Second, 1038 }, 1039 { 1040 Name: "check-name", 1041 Type: ServiceCheckTCP, 1042 Interval: 1 * time.Second, 1043 }, 1044 }, 1045 } 1046 1047 s2 := &Service{ 1048 Name: "service-name", 1049 PortLabel: "bar", 1050 } 1051 1052 s3 := &Service{ 1053 Name: "service-A", 1054 PortLabel: "a", 1055 } 1056 s4 := &Service{ 1057 Name: "service-A", 1058 PortLabel: "b", 1059 } 1060 1061 ephemeralDisk := DefaultEphemeralDisk() 1062 ephemeralDisk.SizeMB = 200 1063 task := &Task{ 1064 Name: "web", 1065 Driver: "docker", 1066 Resources: &Resources{ 1067 CPU: 100, 1068 MemoryMB: 100, 1069 }, 1070 Services: []*Service{s1, s2}, 1071 } 1072 1073 task1 := &Task{ 1074 Name: "web", 1075 Driver: "docker", 1076 Resources: DefaultResources(), 1077 Services: []*Service{s3, s4}, 1078 LogConfig: DefaultLogConfig(), 1079 } 1080 task1.Resources.Networks = []*NetworkResource{ 1081 { 1082 MBits: 10, 1083 DynamicPorts: []Port{ 1084 { 1085 Label: "a", 1086 Value: 1000, 1087 }, 1088 { 1089 Label: "b", 1090 Value: 2000, 1091 }, 1092 }, 1093 }, 1094 } 1095 1096 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1097 if err == nil { 1098 t.Fatal("expected an error") 1099 } 1100 1101 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 1102 t.Fatalf("err: %v", err) 1103 } 1104 1105 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 1106 t.Fatalf("err: %v", err) 1107 } 1108 1109 if !strings.Contains(err.Error(), "missing required value interval") { 1110 t.Fatalf("err: %v", err) 1111 } 1112 1113 if !strings.Contains(err.Error(), "cannot be less than") { 1114 t.Fatalf("err: %v", err) 1115 } 1116 1117 if err = task1.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1118 t.Fatalf("err : %v", err) 1119 } 1120 } 1121 1122 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1123 ephemeralDisk := DefaultEphemeralDisk() 1124 getTask := func(s *Service) *Task { 1125 task := &Task{ 1126 Name: "web", 1127 Driver: "docker", 1128 Resources: DefaultResources(), 1129 Services: []*Service{s}, 1130 LogConfig: DefaultLogConfig(), 1131 } 1132 task.Resources.Networks = []*NetworkResource{ 1133 { 1134 MBits: 10, 1135 DynamicPorts: []Port{ 1136 { 1137 Label: "http", 1138 Value: 80, 1139 }, 1140 }, 1141 }, 1142 } 1143 return task 1144 } 1145 1146 cases := []*Service{ 1147 { 1148 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1149 Name: "DriverModeWithLabel", 1150 PortLabel: "http", 1151 AddressMode: AddressModeDriver, 1152 }, 1153 { 1154 Name: "DriverModeWithPort", 1155 PortLabel: "80", 1156 AddressMode: AddressModeDriver, 1157 }, 1158 { 1159 Name: "HostModeWithLabel", 1160 PortLabel: "http", 1161 AddressMode: AddressModeHost, 1162 }, 1163 { 1164 Name: "HostModeWithoutLabel", 1165 AddressMode: AddressModeHost, 1166 }, 1167 { 1168 Name: "DriverModeWithoutLabel", 1169 AddressMode: AddressModeDriver, 1170 }, 1171 } 1172 1173 for _, service := range cases { 1174 task := getTask(service) 1175 t.Run(service.Name, func(t *testing.T) { 1176 if err := task.Validate(ephemeralDisk, JobTypeService, nil); err != nil { 1177 t.Fatalf("unexpected err: %v", err) 1178 } 1179 }) 1180 } 1181 } 1182 1183 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1184 ephemeralDisk := DefaultEphemeralDisk() 1185 getTask := func(s *Service) *Task { 1186 task := &Task{ 1187 Name: "web", 1188 Driver: "docker", 1189 Resources: DefaultResources(), 1190 Services: []*Service{s}, 1191 LogConfig: DefaultLogConfig(), 1192 } 1193 task.Resources.Networks = []*NetworkResource{ 1194 { 1195 MBits: 10, 1196 DynamicPorts: []Port{ 1197 { 1198 Label: "http", 1199 Value: 80, 1200 }, 1201 }, 1202 }, 1203 } 1204 return task 1205 } 1206 1207 cases := []*Service{ 1208 { 1209 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1210 Name: "DriverModeWithLabel", 1211 PortLabel: "asdf", 1212 AddressMode: AddressModeDriver, 1213 }, 1214 { 1215 Name: "HostModeWithLabel", 1216 PortLabel: "asdf", 1217 AddressMode: AddressModeHost, 1218 }, 1219 { 1220 Name: "HostModeWithPort", 1221 PortLabel: "80", 1222 AddressMode: AddressModeHost, 1223 }, 1224 } 1225 1226 for _, service := range cases { 1227 task := getTask(service) 1228 t.Run(service.Name, func(t *testing.T) { 1229 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1230 if err == nil { 1231 t.Fatalf("expected an error") 1232 } 1233 //t.Logf("err: %v", err) 1234 }) 1235 } 1236 } 1237 1238 func TestTask_Validate_Service_Check(t *testing.T) { 1239 1240 invalidCheck := ServiceCheck{ 1241 Name: "check-name", 1242 Command: "/bin/true", 1243 Type: ServiceCheckScript, 1244 Interval: 10 * time.Second, 1245 } 1246 1247 err := invalidCheck.validate() 1248 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1249 t.Fatalf("expected a timeout validation error but received: %q", err) 1250 } 1251 1252 check1 := ServiceCheck{ 1253 Name: "check-name", 1254 Type: ServiceCheckTCP, 1255 Interval: 10 * time.Second, 1256 Timeout: 2 * time.Second, 1257 } 1258 1259 if err := check1.validate(); err != nil { 1260 t.Fatalf("err: %v", err) 1261 } 1262 1263 check1.InitialStatus = "foo" 1264 err = check1.validate() 1265 if err == nil { 1266 t.Fatal("Expected an error") 1267 } 1268 1269 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1270 t.Fatalf("err: %v", err) 1271 } 1272 1273 check1.InitialStatus = api.HealthCritical 1274 err = check1.validate() 1275 if err != nil { 1276 t.Fatalf("err: %v", err) 1277 } 1278 1279 check1.InitialStatus = api.HealthPassing 1280 err = check1.validate() 1281 if err != nil { 1282 t.Fatalf("err: %v", err) 1283 } 1284 1285 check1.InitialStatus = "" 1286 err = check1.validate() 1287 if err != nil { 1288 t.Fatalf("err: %v", err) 1289 } 1290 1291 check2 := ServiceCheck{ 1292 Name: "check-name-2", 1293 Type: ServiceCheckHTTP, 1294 Interval: 10 * time.Second, 1295 Timeout: 2 * time.Second, 1296 Path: "/foo/bar", 1297 } 1298 1299 err = check2.validate() 1300 if err != nil { 1301 t.Fatalf("err: %v", err) 1302 } 1303 1304 check2.Path = "" 1305 err = check2.validate() 1306 if err == nil { 1307 t.Fatal("Expected an error") 1308 } 1309 if !strings.Contains(err.Error(), "valid http path") { 1310 t.Fatalf("err: %v", err) 1311 } 1312 1313 check2.Path = "http://www.example.com" 1314 err = check2.validate() 1315 if err == nil { 1316 t.Fatal("Expected an error") 1317 } 1318 if !strings.Contains(err.Error(), "relative http path") { 1319 t.Fatalf("err: %v", err) 1320 } 1321 } 1322 1323 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1324 // inherit address mode but do inherit ports. 1325 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1326 getTask := func(s *Service) *Task { 1327 return &Task{ 1328 Resources: &Resources{ 1329 Networks: []*NetworkResource{ 1330 { 1331 DynamicPorts: []Port{ 1332 { 1333 Label: "http", 1334 Value: 9999, 1335 }, 1336 }, 1337 }, 1338 }, 1339 }, 1340 Services: []*Service{s}, 1341 } 1342 } 1343 1344 cases := []struct { 1345 Service *Service 1346 ErrContains string 1347 }{ 1348 { 1349 Service: &Service{ 1350 Name: "invalid-driver", 1351 PortLabel: "80", 1352 AddressMode: "host", 1353 }, 1354 ErrContains: `port label "80" referenced`, 1355 }, 1356 { 1357 Service: &Service{ 1358 Name: "http-driver-fail-1", 1359 PortLabel: "80", 1360 AddressMode: "driver", 1361 Checks: []*ServiceCheck{ 1362 { 1363 Name: "invalid-check-1", 1364 Type: "tcp", 1365 Interval: time.Second, 1366 Timeout: time.Second, 1367 }, 1368 }, 1369 }, 1370 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1371 }, 1372 { 1373 Service: &Service{ 1374 Name: "http-driver-fail-2", 1375 PortLabel: "80", 1376 AddressMode: "driver", 1377 Checks: []*ServiceCheck{ 1378 { 1379 Name: "invalid-check-2", 1380 Type: "tcp", 1381 PortLabel: "80", 1382 Interval: time.Second, 1383 Timeout: time.Second, 1384 }, 1385 }, 1386 }, 1387 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1388 }, 1389 { 1390 Service: &Service{ 1391 Name: "http-driver-fail-3", 1392 PortLabel: "80", 1393 AddressMode: "driver", 1394 Checks: []*ServiceCheck{ 1395 { 1396 Name: "invalid-check-3", 1397 Type: "tcp", 1398 PortLabel: "missing-port-label", 1399 Interval: time.Second, 1400 Timeout: time.Second, 1401 }, 1402 }, 1403 }, 1404 ErrContains: `port label "missing-port-label" referenced`, 1405 }, 1406 { 1407 Service: &Service{ 1408 Name: "http-driver-passes", 1409 PortLabel: "80", 1410 AddressMode: "driver", 1411 Checks: []*ServiceCheck{ 1412 { 1413 Name: "valid-script-check", 1414 Type: "script", 1415 Command: "ok", 1416 Interval: time.Second, 1417 Timeout: time.Second, 1418 }, 1419 { 1420 Name: "valid-host-check", 1421 Type: "tcp", 1422 PortLabel: "http", 1423 Interval: time.Second, 1424 Timeout: time.Second, 1425 }, 1426 { 1427 Name: "valid-driver-check", 1428 Type: "tcp", 1429 AddressMode: "driver", 1430 Interval: time.Second, 1431 Timeout: time.Second, 1432 }, 1433 }, 1434 }, 1435 }, 1436 { 1437 Service: &Service{ 1438 Name: "empty-address-3673-passes-1", 1439 Checks: []*ServiceCheck{ 1440 { 1441 Name: "valid-port-label", 1442 Type: "tcp", 1443 PortLabel: "http", 1444 Interval: time.Second, 1445 Timeout: time.Second, 1446 }, 1447 { 1448 Name: "empty-is-ok", 1449 Type: "script", 1450 Command: "ok", 1451 Interval: time.Second, 1452 Timeout: time.Second, 1453 }, 1454 }, 1455 }, 1456 }, 1457 { 1458 Service: &Service{ 1459 Name: "empty-address-3673-passes-2", 1460 }, 1461 }, 1462 { 1463 Service: &Service{ 1464 Name: "empty-address-3673-fails", 1465 Checks: []*ServiceCheck{ 1466 { 1467 Name: "empty-is-not-ok", 1468 Type: "tcp", 1469 Interval: time.Second, 1470 Timeout: time.Second, 1471 }, 1472 }, 1473 }, 1474 ErrContains: `invalid: check requires a port but neither check nor service`, 1475 }, 1476 } 1477 1478 for _, tc := range cases { 1479 tc := tc 1480 task := getTask(tc.Service) 1481 t.Run(tc.Service.Name, func(t *testing.T) { 1482 err := validateServices(task) 1483 if err == nil && tc.ErrContains == "" { 1484 // Ok! 1485 return 1486 } 1487 if err == nil { 1488 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1489 } 1490 if !strings.Contains(err.Error(), tc.ErrContains) { 1491 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1492 } 1493 }) 1494 } 1495 } 1496 1497 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 1498 t.Parallel() 1499 // Bad (no port) 1500 invalidGRPC := &ServiceCheck{ 1501 Type: ServiceCheckGRPC, 1502 Interval: time.Second, 1503 Timeout: time.Second, 1504 } 1505 service := &Service{ 1506 Name: "test", 1507 Checks: []*ServiceCheck{invalidGRPC}, 1508 } 1509 1510 assert.Error(t, service.Validate()) 1511 1512 // Good 1513 service.Checks[0] = &ServiceCheck{ 1514 Type: ServiceCheckGRPC, 1515 Interval: time.Second, 1516 Timeout: time.Second, 1517 PortLabel: "some-port-label", 1518 } 1519 1520 assert.NoError(t, service.Validate()) 1521 } 1522 1523 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1524 t.Parallel() 1525 invalidCheckRestart := &CheckRestart{ 1526 Limit: -1, 1527 Grace: -1, 1528 } 1529 1530 err := invalidCheckRestart.Validate() 1531 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1532 assert.Len(t, err.(*multierror.Error).Errors, 2) 1533 1534 validCheckRestart := &CheckRestart{} 1535 assert.Nil(t, validCheckRestart.Validate()) 1536 1537 validCheckRestart.Limit = 1 1538 validCheckRestart.Grace = 1 1539 assert.Nil(t, validCheckRestart.Validate()) 1540 } 1541 1542 func TestTask_Validate_ConnectProxyKind(t *testing.T) { 1543 ephemeralDisk := DefaultEphemeralDisk() 1544 getTask := func(kind TaskKind, leader bool) *Task { 1545 task := &Task{ 1546 Name: "web", 1547 Driver: "docker", 1548 Resources: DefaultResources(), 1549 LogConfig: DefaultLogConfig(), 1550 Kind: kind, 1551 Leader: leader, 1552 } 1553 task.Resources.Networks = []*NetworkResource{ 1554 { 1555 MBits: 10, 1556 DynamicPorts: []Port{ 1557 { 1558 Label: "http", 1559 Value: 80, 1560 }, 1561 }, 1562 }, 1563 } 1564 return task 1565 } 1566 1567 cases := []struct { 1568 Desc string 1569 Kind TaskKind 1570 Leader bool 1571 Service *Service 1572 TgService []*Service 1573 ErrContains string 1574 }{ 1575 { 1576 Desc: "Not connect", 1577 Kind: "test", 1578 }, 1579 { 1580 Desc: "Invalid because of service in task definition", 1581 Kind: "connect-proxy:redis", 1582 Service: &Service{ 1583 Name: "redis", 1584 }, 1585 ErrContains: "Connect proxy task must not have a service stanza", 1586 }, 1587 { 1588 Desc: "Leader should not be set", 1589 Kind: "connect-proxy:redis", 1590 Leader: true, 1591 Service: &Service{ 1592 Name: "redis", 1593 }, 1594 ErrContains: "Connect proxy task must not have leader set", 1595 }, 1596 { 1597 Desc: "Service name invalid", 1598 Kind: "connect-proxy:redis:test", 1599 Service: &Service{ 1600 Name: "redis", 1601 }, 1602 ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`, 1603 }, 1604 { 1605 Desc: "Service name not found in group", 1606 Kind: "connect-proxy:redis", 1607 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1608 }, 1609 { 1610 Desc: "Connect stanza not configured in group", 1611 Kind: "connect-proxy:redis", 1612 TgService: []*Service{{ 1613 Name: "redis", 1614 }}, 1615 ErrContains: `No Connect services in task group with Connect proxy ("redis")`, 1616 }, 1617 { 1618 Desc: "Valid connect proxy kind", 1619 Kind: "connect-proxy:redis", 1620 TgService: []*Service{{ 1621 Name: "redis", 1622 Connect: &ConsulConnect{ 1623 SidecarService: &ConsulSidecarService{ 1624 Port: "db", 1625 }, 1626 }, 1627 }}, 1628 }, 1629 } 1630 1631 for _, tc := range cases { 1632 tc := tc 1633 task := getTask(tc.Kind, tc.Leader) 1634 if tc.Service != nil { 1635 task.Services = []*Service{tc.Service} 1636 } 1637 t.Run(tc.Desc, func(t *testing.T) { 1638 err := task.Validate(ephemeralDisk, "service", tc.TgService) 1639 if err == nil && tc.ErrContains == "" { 1640 // Ok! 1641 return 1642 } 1643 require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains) 1644 require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err) 1645 }) 1646 } 1647 1648 } 1649 func TestTask_Validate_LogConfig(t *testing.T) { 1650 task := &Task{ 1651 LogConfig: DefaultLogConfig(), 1652 } 1653 ephemeralDisk := &EphemeralDisk{ 1654 SizeMB: 1, 1655 } 1656 1657 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1658 mErr := err.(*multierror.Error) 1659 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1660 t.Fatalf("err: %s", err) 1661 } 1662 } 1663 1664 func TestTask_Validate_Template(t *testing.T) { 1665 1666 bad := &Template{} 1667 task := &Task{ 1668 Templates: []*Template{bad}, 1669 } 1670 ephemeralDisk := &EphemeralDisk{ 1671 SizeMB: 1, 1672 } 1673 1674 err := task.Validate(ephemeralDisk, JobTypeService, nil) 1675 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1676 t.Fatalf("err: %s", err) 1677 } 1678 1679 // Have two templates that share the same destination 1680 good := &Template{ 1681 SourcePath: "foo", 1682 DestPath: "local/foo", 1683 ChangeMode: "noop", 1684 } 1685 1686 task.Templates = []*Template{good, good} 1687 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1688 if !strings.Contains(err.Error(), "same destination as") { 1689 t.Fatalf("err: %s", err) 1690 } 1691 1692 // Env templates can't use signals 1693 task.Templates = []*Template{ 1694 { 1695 Envvars: true, 1696 ChangeMode: "signal", 1697 }, 1698 } 1699 1700 err = task.Validate(ephemeralDisk, JobTypeService, nil) 1701 if err == nil { 1702 t.Fatalf("expected error from Template.Validate") 1703 } 1704 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1705 t.Errorf("expected to find %q but found %v", expected, err) 1706 } 1707 } 1708 1709 func TestTemplate_Validate(t *testing.T) { 1710 cases := []struct { 1711 Tmpl *Template 1712 Fail bool 1713 ContainsErrs []string 1714 }{ 1715 { 1716 Tmpl: &Template{}, 1717 Fail: true, 1718 ContainsErrs: []string{ 1719 "specify a source path", 1720 "specify a destination", 1721 TemplateChangeModeInvalidError.Error(), 1722 }, 1723 }, 1724 { 1725 Tmpl: &Template{ 1726 Splay: -100, 1727 }, 1728 Fail: true, 1729 ContainsErrs: []string{ 1730 "positive splay", 1731 }, 1732 }, 1733 { 1734 Tmpl: &Template{ 1735 ChangeMode: "foo", 1736 }, 1737 Fail: true, 1738 ContainsErrs: []string{ 1739 TemplateChangeModeInvalidError.Error(), 1740 }, 1741 }, 1742 { 1743 Tmpl: &Template{ 1744 ChangeMode: "signal", 1745 }, 1746 Fail: true, 1747 ContainsErrs: []string{ 1748 "specify signal value", 1749 }, 1750 }, 1751 { 1752 Tmpl: &Template{ 1753 SourcePath: "foo", 1754 DestPath: "../../root", 1755 ChangeMode: "noop", 1756 }, 1757 Fail: true, 1758 ContainsErrs: []string{ 1759 "destination escapes", 1760 }, 1761 }, 1762 { 1763 Tmpl: &Template{ 1764 SourcePath: "foo", 1765 DestPath: "local/foo", 1766 ChangeMode: "noop", 1767 }, 1768 Fail: false, 1769 }, 1770 { 1771 Tmpl: &Template{ 1772 SourcePath: "foo", 1773 DestPath: "local/foo", 1774 ChangeMode: "noop", 1775 Perms: "0444", 1776 }, 1777 Fail: false, 1778 }, 1779 { 1780 Tmpl: &Template{ 1781 SourcePath: "foo", 1782 DestPath: "local/foo", 1783 ChangeMode: "noop", 1784 Perms: "zza", 1785 }, 1786 Fail: true, 1787 ContainsErrs: []string{ 1788 "as octal", 1789 }, 1790 }, 1791 } 1792 1793 for i, c := range cases { 1794 err := c.Tmpl.Validate() 1795 if err != nil { 1796 if !c.Fail { 1797 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 1798 } 1799 1800 e := err.Error() 1801 for _, exp := range c.ContainsErrs { 1802 if !strings.Contains(e, exp) { 1803 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 1804 } 1805 } 1806 } else if c.Fail { 1807 t.Fatalf("Case %d: should have failed: %v", i+1, err) 1808 } 1809 } 1810 } 1811 1812 func TestConstraint_Validate(t *testing.T) { 1813 c := &Constraint{} 1814 err := c.Validate() 1815 mErr := err.(*multierror.Error) 1816 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 1817 t.Fatalf("err: %s", err) 1818 } 1819 1820 c = &Constraint{ 1821 LTarget: "$attr.kernel.name", 1822 RTarget: "linux", 1823 Operand: "=", 1824 } 1825 err = c.Validate() 1826 require.NoError(t, err) 1827 1828 // Perform additional regexp validation 1829 c.Operand = ConstraintRegex 1830 c.RTarget = "(foo" 1831 err = c.Validate() 1832 mErr = err.(*multierror.Error) 1833 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 1834 t.Fatalf("err: %s", err) 1835 } 1836 1837 // Perform version validation 1838 c.Operand = ConstraintVersion 1839 c.RTarget = "~> foo" 1840 err = c.Validate() 1841 mErr = err.(*multierror.Error) 1842 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 1843 t.Fatalf("err: %s", err) 1844 } 1845 1846 // Perform semver validation 1847 c.Operand = ConstraintSemver 1848 err = c.Validate() 1849 require.Error(t, err) 1850 require.Contains(t, err.Error(), "Malformed constraint") 1851 1852 c.RTarget = ">= 0.6.1" 1853 require.NoError(t, c.Validate()) 1854 1855 // Perform distinct_property validation 1856 c.Operand = ConstraintDistinctProperty 1857 c.RTarget = "0" 1858 err = c.Validate() 1859 mErr = err.(*multierror.Error) 1860 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 1861 t.Fatalf("err: %s", err) 1862 } 1863 1864 c.RTarget = "-1" 1865 err = c.Validate() 1866 mErr = err.(*multierror.Error) 1867 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 1868 t.Fatalf("err: %s", err) 1869 } 1870 1871 // Perform distinct_hosts validation 1872 c.Operand = ConstraintDistinctHosts 1873 c.LTarget = "" 1874 c.RTarget = "" 1875 if err := c.Validate(); err != nil { 1876 t.Fatalf("expected valid constraint: %v", err) 1877 } 1878 1879 // Perform set_contains* validation 1880 c.RTarget = "" 1881 for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} { 1882 c.Operand = o 1883 err = c.Validate() 1884 mErr = err.(*multierror.Error) 1885 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 1886 t.Fatalf("err: %s", err) 1887 } 1888 } 1889 1890 // Perform LTarget validation 1891 c.Operand = ConstraintRegex 1892 c.RTarget = "foo" 1893 c.LTarget = "" 1894 err = c.Validate() 1895 mErr = err.(*multierror.Error) 1896 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 1897 t.Fatalf("err: %s", err) 1898 } 1899 1900 // Perform constraint type validation 1901 c.Operand = "foo" 1902 err = c.Validate() 1903 mErr = err.(*multierror.Error) 1904 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 1905 t.Fatalf("err: %s", err) 1906 } 1907 } 1908 1909 func TestAffinity_Validate(t *testing.T) { 1910 1911 type tc struct { 1912 affinity *Affinity 1913 err error 1914 name string 1915 } 1916 1917 testCases := []tc{ 1918 { 1919 affinity: &Affinity{}, 1920 err: fmt.Errorf("Missing affinity operand"), 1921 }, 1922 { 1923 affinity: &Affinity{ 1924 Operand: "foo", 1925 LTarget: "${meta.node_class}", 1926 Weight: 10, 1927 }, 1928 err: fmt.Errorf("Unknown affinity operator \"foo\""), 1929 }, 1930 { 1931 affinity: &Affinity{ 1932 Operand: "=", 1933 LTarget: "${meta.node_class}", 1934 Weight: 10, 1935 }, 1936 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 1937 }, 1938 { 1939 affinity: &Affinity{ 1940 Operand: "=", 1941 LTarget: "${meta.node_class}", 1942 RTarget: "c4", 1943 Weight: 0, 1944 }, 1945 err: fmt.Errorf("Affinity weight cannot be zero"), 1946 }, 1947 { 1948 affinity: &Affinity{ 1949 Operand: "=", 1950 LTarget: "${meta.node_class}", 1951 RTarget: "c4", 1952 Weight: 110, 1953 }, 1954 err: fmt.Errorf("Affinity weight must be within the range [-100,100]"), 1955 }, 1956 { 1957 affinity: &Affinity{ 1958 Operand: "=", 1959 LTarget: "${node.class}", 1960 Weight: 10, 1961 }, 1962 err: fmt.Errorf("Operator \"=\" requires an RTarget"), 1963 }, 1964 { 1965 affinity: &Affinity{ 1966 Operand: "version", 1967 LTarget: "${meta.os}", 1968 RTarget: ">>2.0", 1969 Weight: 110, 1970 }, 1971 err: fmt.Errorf("Version affinity is invalid"), 1972 }, 1973 { 1974 affinity: &Affinity{ 1975 Operand: "regexp", 1976 LTarget: "${meta.os}", 1977 RTarget: "\\K2.0", 1978 Weight: 100, 1979 }, 1980 err: fmt.Errorf("Regular expression failed to compile"), 1981 }, 1982 } 1983 1984 for _, tc := range testCases { 1985 t.Run(tc.name, func(t *testing.T) { 1986 err := tc.affinity.Validate() 1987 if tc.err != nil { 1988 require.NotNil(t, err) 1989 require.Contains(t, err.Error(), tc.err.Error()) 1990 } else { 1991 require.Nil(t, err) 1992 } 1993 }) 1994 } 1995 } 1996 1997 func TestUpdateStrategy_Validate(t *testing.T) { 1998 u := &UpdateStrategy{ 1999 MaxParallel: -1, 2000 HealthCheck: "foo", 2001 MinHealthyTime: -10, 2002 HealthyDeadline: -15, 2003 ProgressDeadline: -25, 2004 AutoRevert: false, 2005 Canary: -1, 2006 } 2007 2008 err := u.Validate() 2009 mErr := err.(*multierror.Error) 2010 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 2011 t.Fatalf("err: %s", err) 2012 } 2013 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than zero") { 2014 t.Fatalf("err: %s", err) 2015 } 2016 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 2017 t.Fatalf("err: %s", err) 2018 } 2019 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 2020 t.Fatalf("err: %s", err) 2021 } 2022 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 2023 t.Fatalf("err: %s", err) 2024 } 2025 if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") { 2026 t.Fatalf("err: %s", err) 2027 } 2028 if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") { 2029 t.Fatalf("err: %s", err) 2030 } 2031 if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") { 2032 t.Fatalf("err: %s", err) 2033 } 2034 } 2035 2036 func TestResource_NetIndex(t *testing.T) { 2037 r := &Resources{ 2038 Networks: []*NetworkResource{ 2039 {Device: "eth0"}, 2040 {Device: "lo0"}, 2041 {Device: ""}, 2042 }, 2043 } 2044 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 2045 t.Fatalf("Bad: %d", idx) 2046 } 2047 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 2048 t.Fatalf("Bad: %d", idx) 2049 } 2050 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 2051 t.Fatalf("Bad: %d", idx) 2052 } 2053 } 2054 2055 func TestResource_Superset(t *testing.T) { 2056 r1 := &Resources{ 2057 CPU: 2000, 2058 MemoryMB: 2048, 2059 DiskMB: 10000, 2060 } 2061 r2 := &Resources{ 2062 CPU: 2000, 2063 MemoryMB: 1024, 2064 DiskMB: 5000, 2065 } 2066 2067 if s, _ := r1.Superset(r1); !s { 2068 t.Fatalf("bad") 2069 } 2070 if s, _ := r1.Superset(r2); !s { 2071 t.Fatalf("bad") 2072 } 2073 if s, _ := r2.Superset(r1); s { 2074 t.Fatalf("bad") 2075 } 2076 if s, _ := r2.Superset(r2); !s { 2077 t.Fatalf("bad") 2078 } 2079 } 2080 2081 func TestResource_Add(t *testing.T) { 2082 r1 := &Resources{ 2083 CPU: 2000, 2084 MemoryMB: 2048, 2085 DiskMB: 10000, 2086 Networks: []*NetworkResource{ 2087 { 2088 CIDR: "10.0.0.0/8", 2089 MBits: 100, 2090 ReservedPorts: []Port{{"ssh", 22, 0}}, 2091 }, 2092 }, 2093 } 2094 r2 := &Resources{ 2095 CPU: 2000, 2096 MemoryMB: 1024, 2097 DiskMB: 5000, 2098 Networks: []*NetworkResource{ 2099 { 2100 IP: "10.0.0.1", 2101 MBits: 50, 2102 ReservedPorts: []Port{{"web", 80, 0}}, 2103 }, 2104 }, 2105 } 2106 2107 err := r1.Add(r2) 2108 if err != nil { 2109 t.Fatalf("Err: %v", err) 2110 } 2111 2112 expect := &Resources{ 2113 CPU: 3000, 2114 MemoryMB: 3072, 2115 DiskMB: 15000, 2116 Networks: []*NetworkResource{ 2117 { 2118 CIDR: "10.0.0.0/8", 2119 MBits: 150, 2120 ReservedPorts: []Port{{"ssh", 22, 0}, {"web", 80, 0}}, 2121 }, 2122 }, 2123 } 2124 2125 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2126 t.Fatalf("bad: %#v %#v", expect, r1) 2127 } 2128 } 2129 2130 func TestResource_Add_Network(t *testing.T) { 2131 r1 := &Resources{} 2132 r2 := &Resources{ 2133 Networks: []*NetworkResource{ 2134 { 2135 MBits: 50, 2136 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}}, 2137 }, 2138 }, 2139 } 2140 r3 := &Resources{ 2141 Networks: []*NetworkResource{ 2142 { 2143 MBits: 25, 2144 DynamicPorts: []Port{{"admin", 0, 8080}}, 2145 }, 2146 }, 2147 } 2148 2149 err := r1.Add(r2) 2150 if err != nil { 2151 t.Fatalf("Err: %v", err) 2152 } 2153 err = r1.Add(r3) 2154 if err != nil { 2155 t.Fatalf("Err: %v", err) 2156 } 2157 2158 expect := &Resources{ 2159 Networks: []*NetworkResource{ 2160 { 2161 MBits: 75, 2162 DynamicPorts: []Port{{"http", 0, 80}, {"https", 0, 443}, {"admin", 0, 8080}}, 2163 }, 2164 }, 2165 } 2166 2167 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2168 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 2169 } 2170 } 2171 2172 func TestComparableResources_Subtract(t *testing.T) { 2173 r1 := &ComparableResources{ 2174 Flattened: AllocatedTaskResources{ 2175 Cpu: AllocatedCpuResources{ 2176 CpuShares: 2000, 2177 }, 2178 Memory: AllocatedMemoryResources{ 2179 MemoryMB: 2048, 2180 }, 2181 Networks: []*NetworkResource{ 2182 { 2183 CIDR: "10.0.0.0/8", 2184 MBits: 100, 2185 ReservedPorts: []Port{{"ssh", 22, 0}}, 2186 }, 2187 }, 2188 }, 2189 Shared: AllocatedSharedResources{ 2190 DiskMB: 10000, 2191 }, 2192 } 2193 2194 r2 := &ComparableResources{ 2195 Flattened: AllocatedTaskResources{ 2196 Cpu: AllocatedCpuResources{ 2197 CpuShares: 1000, 2198 }, 2199 Memory: AllocatedMemoryResources{ 2200 MemoryMB: 1024, 2201 }, 2202 Networks: []*NetworkResource{ 2203 { 2204 CIDR: "10.0.0.0/8", 2205 MBits: 20, 2206 ReservedPorts: []Port{{"ssh", 22, 0}}, 2207 }, 2208 }, 2209 }, 2210 Shared: AllocatedSharedResources{ 2211 DiskMB: 5000, 2212 }, 2213 } 2214 r1.Subtract(r2) 2215 2216 expect := &ComparableResources{ 2217 Flattened: AllocatedTaskResources{ 2218 Cpu: AllocatedCpuResources{ 2219 CpuShares: 1000, 2220 }, 2221 Memory: AllocatedMemoryResources{ 2222 MemoryMB: 1024, 2223 }, 2224 Networks: []*NetworkResource{ 2225 { 2226 CIDR: "10.0.0.0/8", 2227 MBits: 100, 2228 ReservedPorts: []Port{{"ssh", 22, 0}}, 2229 }, 2230 }, 2231 }, 2232 Shared: AllocatedSharedResources{ 2233 DiskMB: 5000, 2234 }, 2235 } 2236 2237 require := require.New(t) 2238 require.Equal(expect, r1) 2239 } 2240 2241 func TestEncodeDecode(t *testing.T) { 2242 type FooRequest struct { 2243 Foo string 2244 Bar int 2245 Baz bool 2246 } 2247 arg := &FooRequest{ 2248 Foo: "test", 2249 Bar: 42, 2250 Baz: true, 2251 } 2252 buf, err := Encode(1, arg) 2253 if err != nil { 2254 t.Fatalf("err: %v", err) 2255 } 2256 2257 var out FooRequest 2258 err = Decode(buf[1:], &out) 2259 if err != nil { 2260 t.Fatalf("err: %v", err) 2261 } 2262 2263 if !reflect.DeepEqual(arg, &out) { 2264 t.Fatalf("bad: %#v %#v", arg, out) 2265 } 2266 } 2267 2268 func BenchmarkEncodeDecode(b *testing.B) { 2269 job := testJob() 2270 2271 for i := 0; i < b.N; i++ { 2272 buf, err := Encode(1, job) 2273 if err != nil { 2274 b.Fatalf("err: %v", err) 2275 } 2276 2277 var out Job 2278 err = Decode(buf[1:], &out) 2279 if err != nil { 2280 b.Fatalf("err: %v", err) 2281 } 2282 } 2283 } 2284 2285 func TestInvalidServiceCheck(t *testing.T) { 2286 s := Service{ 2287 Name: "service-name", 2288 PortLabel: "bar", 2289 Checks: []*ServiceCheck{ 2290 { 2291 Name: "check-name", 2292 Type: "lol", 2293 }, 2294 }, 2295 } 2296 if err := s.Validate(); err == nil { 2297 t.Fatalf("Service should be invalid (invalid type)") 2298 } 2299 2300 s = Service{ 2301 Name: "service.name", 2302 PortLabel: "bar", 2303 } 2304 if err := s.ValidateName(s.Name); err == nil { 2305 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2306 } 2307 2308 s = Service{ 2309 Name: "-my-service", 2310 PortLabel: "bar", 2311 } 2312 if err := s.Validate(); err == nil { 2313 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2314 } 2315 2316 s = Service{ 2317 Name: "my-service-${NOMAD_META_FOO}", 2318 PortLabel: "bar", 2319 } 2320 if err := s.Validate(); err != nil { 2321 t.Fatalf("Service should be valid: %v", err) 2322 } 2323 2324 s = Service{ 2325 Name: "my_service-${NOMAD_META_FOO}", 2326 PortLabel: "bar", 2327 } 2328 if err := s.Validate(); err == nil { 2329 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2330 } 2331 2332 s = Service{ 2333 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2334 PortLabel: "bar", 2335 } 2336 if err := s.ValidateName(s.Name); err == nil { 2337 t.Fatalf("Service should be invalid (too long): %v", err) 2338 } 2339 2340 s = Service{ 2341 Name: "service-name", 2342 Checks: []*ServiceCheck{ 2343 { 2344 Name: "check-tcp", 2345 Type: ServiceCheckTCP, 2346 Interval: 5 * time.Second, 2347 Timeout: 2 * time.Second, 2348 }, 2349 { 2350 Name: "check-http", 2351 Type: ServiceCheckHTTP, 2352 Path: "/foo", 2353 Interval: 5 * time.Second, 2354 Timeout: 2 * time.Second, 2355 }, 2356 }, 2357 } 2358 if err := s.Validate(); err == nil { 2359 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2360 } 2361 2362 s = Service{ 2363 Name: "service-name", 2364 Checks: []*ServiceCheck{ 2365 { 2366 Name: "check-script", 2367 Type: ServiceCheckScript, 2368 Command: "/bin/date", 2369 Interval: 5 * time.Second, 2370 Timeout: 2 * time.Second, 2371 }, 2372 }, 2373 } 2374 if err := s.Validate(); err != nil { 2375 t.Fatalf("un-expected error: %v", err) 2376 } 2377 2378 s = Service{ 2379 Name: "service-name", 2380 Checks: []*ServiceCheck{ 2381 { 2382 Name: "tcp-check", 2383 Type: ServiceCheckTCP, 2384 Interval: 5 * time.Second, 2385 Timeout: 2 * time.Second, 2386 }, 2387 }, 2388 Connect: &ConsulConnect{ 2389 SidecarService: &ConsulSidecarService{}, 2390 }, 2391 } 2392 require.Error(t, s.Validate()) 2393 } 2394 2395 func TestDistinctCheckID(t *testing.T) { 2396 c1 := ServiceCheck{ 2397 Name: "web-health", 2398 Type: "http", 2399 Path: "/health", 2400 Interval: 2 * time.Second, 2401 Timeout: 3 * time.Second, 2402 } 2403 c2 := ServiceCheck{ 2404 Name: "web-health", 2405 Type: "http", 2406 Path: "/health1", 2407 Interval: 2 * time.Second, 2408 Timeout: 3 * time.Second, 2409 } 2410 2411 c3 := ServiceCheck{ 2412 Name: "web-health", 2413 Type: "http", 2414 Path: "/health", 2415 Interval: 4 * time.Second, 2416 Timeout: 3 * time.Second, 2417 } 2418 serviceID := "123" 2419 c1Hash := c1.Hash(serviceID) 2420 c2Hash := c2.Hash(serviceID) 2421 c3Hash := c3.Hash(serviceID) 2422 2423 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2424 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2425 } 2426 2427 } 2428 2429 func TestService_Canonicalize(t *testing.T) { 2430 job := "example" 2431 taskGroup := "cache" 2432 task := "redis" 2433 2434 s := Service{ 2435 Name: "${TASK}-db", 2436 } 2437 2438 s.Canonicalize(job, taskGroup, task) 2439 if s.Name != "redis-db" { 2440 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2441 } 2442 2443 s.Name = "db" 2444 s.Canonicalize(job, taskGroup, task) 2445 if s.Name != "db" { 2446 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2447 } 2448 2449 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2450 s.Canonicalize(job, taskGroup, task) 2451 if s.Name != "example-cache-redis-db" { 2452 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2453 } 2454 2455 s.Name = "${BASE}-db" 2456 s.Canonicalize(job, taskGroup, task) 2457 if s.Name != "example-cache-redis-db" { 2458 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 2459 } 2460 2461 } 2462 2463 func TestService_Validate(t *testing.T) { 2464 s := Service{ 2465 Name: "testservice", 2466 } 2467 2468 s.Canonicalize("testjob", "testgroup", "testtask") 2469 2470 // Base service should be valid 2471 require.NoError(t, s.Validate()) 2472 2473 // Native Connect should be valid 2474 s.Connect = &ConsulConnect{ 2475 Native: true, 2476 } 2477 require.NoError(t, s.Validate()) 2478 2479 // Native Connect + Sidecar should be invalid 2480 s.Connect.SidecarService = &ConsulSidecarService{} 2481 require.Error(t, s.Validate()) 2482 } 2483 2484 func TestService_Equals(t *testing.T) { 2485 s := Service{ 2486 Name: "testservice", 2487 } 2488 2489 s.Canonicalize("testjob", "testgroup", "testtask") 2490 2491 o := s.Copy() 2492 2493 // Base service should be equal to copy of itself 2494 require.True(t, s.Equals(o)) 2495 2496 // create a helper to assert a diff and reset the struct 2497 assertDiff := func() { 2498 require.False(t, s.Equals(o)) 2499 o = s.Copy() 2500 require.True(t, s.Equals(o), "bug in copy") 2501 } 2502 2503 // Changing any field should cause inequality 2504 o.Name = "diff" 2505 assertDiff() 2506 2507 o.PortLabel = "diff" 2508 assertDiff() 2509 2510 o.AddressMode = AddressModeDriver 2511 assertDiff() 2512 2513 o.Tags = []string{"diff"} 2514 assertDiff() 2515 2516 o.CanaryTags = []string{"diff"} 2517 assertDiff() 2518 2519 o.Checks = []*ServiceCheck{{Name: "diff"}} 2520 assertDiff() 2521 2522 o.Connect = &ConsulConnect{Native: true} 2523 assertDiff() 2524 } 2525 2526 func TestJob_ExpandServiceNames(t *testing.T) { 2527 j := &Job{ 2528 Name: "my-job", 2529 TaskGroups: []*TaskGroup{ 2530 { 2531 Name: "web", 2532 Tasks: []*Task{ 2533 { 2534 Name: "frontend", 2535 Services: []*Service{ 2536 { 2537 Name: "${BASE}-default", 2538 }, 2539 { 2540 Name: "jmx", 2541 }, 2542 }, 2543 }, 2544 }, 2545 }, 2546 { 2547 Name: "admin", 2548 Tasks: []*Task{ 2549 { 2550 Name: "admin-web", 2551 }, 2552 }, 2553 }, 2554 }, 2555 } 2556 2557 j.Canonicalize() 2558 2559 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 2560 if service1Name != "my-job-web-frontend-default" { 2561 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 2562 } 2563 2564 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 2565 if service2Name != "jmx" { 2566 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 2567 } 2568 2569 } 2570 2571 func TestJob_CombinedTaskMeta(t *testing.T) { 2572 j := &Job{ 2573 Meta: map[string]string{ 2574 "job_test": "job", 2575 "group_test": "job", 2576 "task_test": "job", 2577 }, 2578 TaskGroups: []*TaskGroup{ 2579 { 2580 Name: "group", 2581 Meta: map[string]string{ 2582 "group_test": "group", 2583 "task_test": "group", 2584 }, 2585 Tasks: []*Task{ 2586 { 2587 Name: "task", 2588 Meta: map[string]string{ 2589 "task_test": "task", 2590 }, 2591 }, 2592 }, 2593 }, 2594 }, 2595 } 2596 2597 require := require.New(t) 2598 require.EqualValues(map[string]string{ 2599 "job_test": "job", 2600 "group_test": "group", 2601 "task_test": "task", 2602 }, j.CombinedTaskMeta("group", "task")) 2603 require.EqualValues(map[string]string{ 2604 "job_test": "job", 2605 "group_test": "group", 2606 "task_test": "group", 2607 }, j.CombinedTaskMeta("group", "")) 2608 require.EqualValues(map[string]string{ 2609 "job_test": "job", 2610 "group_test": "job", 2611 "task_test": "job", 2612 }, j.CombinedTaskMeta("", "task")) 2613 2614 } 2615 2616 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 2617 // Create a config that is enabled but with no interval specified. 2618 p := &PeriodicConfig{Enabled: true} 2619 if err := p.Validate(); err == nil { 2620 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2621 } 2622 2623 // Create a config that is enabled, with a spec but no type specified. 2624 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2625 if err := p.Validate(); err == nil { 2626 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2627 } 2628 2629 // Create a config that is enabled, with a spec type but no spec specified. 2630 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2631 if err := p.Validate(); err == nil { 2632 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2633 } 2634 2635 // Create a config that is enabled, with a bad time zone. 2636 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2637 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2638 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2639 } 2640 } 2641 2642 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2643 specs := []string{"foo", "* *", "@foo"} 2644 for _, spec := range specs { 2645 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2646 p.Canonicalize() 2647 if err := p.Validate(); err == nil { 2648 t.Fatal("Invalid cron spec") 2649 } 2650 } 2651 } 2652 2653 func TestPeriodicConfig_ValidCron(t *testing.T) { 2654 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2655 for _, spec := range specs { 2656 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2657 p.Canonicalize() 2658 if err := p.Validate(); err != nil { 2659 t.Fatal("Passed valid cron") 2660 } 2661 } 2662 } 2663 2664 func TestPeriodicConfig_NextCron(t *testing.T) { 2665 require := require.New(t) 2666 2667 type testExpectation struct { 2668 Time time.Time 2669 HasError bool 2670 ErrorMsg string 2671 } 2672 2673 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2674 specs := []string{"0 0 29 2 * 1980", 2675 "*/5 * * * *", 2676 "1 15-0 * * 1-5"} 2677 expected := []*testExpectation{ 2678 { 2679 Time: time.Time{}, 2680 HasError: false, 2681 }, 2682 { 2683 Time: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2684 HasError: false, 2685 }, 2686 { 2687 Time: time.Time{}, 2688 HasError: true, 2689 ErrorMsg: "failed parsing cron expression", 2690 }, 2691 } 2692 2693 for i, spec := range specs { 2694 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2695 p.Canonicalize() 2696 n, err := p.Next(from) 2697 nextExpected := expected[i] 2698 2699 require.Equal(nextExpected.Time, n) 2700 require.Equal(err != nil, nextExpected.HasError) 2701 if err != nil { 2702 require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg)) 2703 } 2704 } 2705 } 2706 2707 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2708 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2709 for _, zone := range zones { 2710 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2711 p.Canonicalize() 2712 if err := p.Validate(); err != nil { 2713 t.Fatalf("Valid tz errored: %v", err) 2714 } 2715 } 2716 } 2717 2718 func TestPeriodicConfig_DST(t *testing.T) { 2719 require := require.New(t) 2720 2721 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2722 p := &PeriodicConfig{ 2723 Enabled: true, 2724 SpecType: PeriodicSpecCron, 2725 Spec: "0 2 11-12 3 * 2017", 2726 TimeZone: "America/Los_Angeles", 2727 } 2728 p.Canonicalize() 2729 2730 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2731 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2732 2733 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2734 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2735 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2736 2737 n1, err := p.Next(t1) 2738 require.Nil(err) 2739 2740 n2, err := p.Next(t2) 2741 require.Nil(err) 2742 2743 require.Equal(e1, n1.UTC()) 2744 require.Equal(e2, n2.UTC()) 2745 } 2746 2747 func TestRestartPolicy_Validate(t *testing.T) { 2748 // Policy with acceptable restart options passes 2749 p := &RestartPolicy{ 2750 Mode: RestartPolicyModeFail, 2751 Attempts: 0, 2752 Interval: 5 * time.Second, 2753 } 2754 if err := p.Validate(); err != nil { 2755 t.Fatalf("err: %v", err) 2756 } 2757 2758 // Policy with ambiguous restart options fails 2759 p = &RestartPolicy{ 2760 Mode: RestartPolicyModeDelay, 2761 Attempts: 0, 2762 Interval: 5 * time.Second, 2763 } 2764 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 2765 t.Fatalf("expect ambiguity error, got: %v", err) 2766 } 2767 2768 // Bad policy mode fails 2769 p = &RestartPolicy{ 2770 Mode: "nope", 2771 Attempts: 1, 2772 Interval: 5 * time.Second, 2773 } 2774 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 2775 t.Fatalf("expect mode error, got: %v", err) 2776 } 2777 2778 // Fails when attempts*delay does not fit inside interval 2779 p = &RestartPolicy{ 2780 Mode: RestartPolicyModeDelay, 2781 Attempts: 3, 2782 Delay: 5 * time.Second, 2783 Interval: 5 * time.Second, 2784 } 2785 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 2786 t.Fatalf("expect restart interval error, got: %v", err) 2787 } 2788 2789 // Fails when interval is to small 2790 p = &RestartPolicy{ 2791 Mode: RestartPolicyModeDelay, 2792 Attempts: 3, 2793 Delay: 5 * time.Second, 2794 Interval: 2 * time.Second, 2795 } 2796 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 2797 t.Fatalf("expect interval too small error, got: %v", err) 2798 } 2799 } 2800 2801 func TestReschedulePolicy_Validate(t *testing.T) { 2802 type testCase struct { 2803 desc string 2804 ReschedulePolicy *ReschedulePolicy 2805 errors []error 2806 } 2807 2808 testCases := []testCase{ 2809 { 2810 desc: "Nil", 2811 }, 2812 { 2813 desc: "Disabled", 2814 ReschedulePolicy: &ReschedulePolicy{ 2815 Attempts: 0, 2816 Interval: 0 * time.Second}, 2817 }, 2818 { 2819 desc: "Disabled", 2820 ReschedulePolicy: &ReschedulePolicy{ 2821 Attempts: -1, 2822 Interval: 5 * time.Minute}, 2823 }, 2824 { 2825 desc: "Valid Linear Delay", 2826 ReschedulePolicy: &ReschedulePolicy{ 2827 Attempts: 1, 2828 Interval: 5 * time.Minute, 2829 Delay: 10 * time.Second, 2830 DelayFunction: "constant"}, 2831 }, 2832 { 2833 desc: "Valid Exponential Delay", 2834 ReschedulePolicy: &ReschedulePolicy{ 2835 Attempts: 5, 2836 Interval: 1 * time.Hour, 2837 Delay: 30 * time.Second, 2838 MaxDelay: 5 * time.Minute, 2839 DelayFunction: "exponential"}, 2840 }, 2841 { 2842 desc: "Valid Fibonacci Delay", 2843 ReschedulePolicy: &ReschedulePolicy{ 2844 Attempts: 5, 2845 Interval: 15 * time.Minute, 2846 Delay: 10 * time.Second, 2847 MaxDelay: 5 * time.Minute, 2848 DelayFunction: "fibonacci"}, 2849 }, 2850 { 2851 desc: "Invalid delay function", 2852 ReschedulePolicy: &ReschedulePolicy{ 2853 Attempts: 1, 2854 Interval: 1 * time.Second, 2855 DelayFunction: "blah"}, 2856 errors: []error{ 2857 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2858 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2859 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 2860 }, 2861 }, 2862 { 2863 desc: "Invalid delay ceiling", 2864 ReschedulePolicy: &ReschedulePolicy{ 2865 Attempts: 1, 2866 Interval: 8 * time.Second, 2867 DelayFunction: "exponential", 2868 Delay: 15 * time.Second, 2869 MaxDelay: 5 * time.Second}, 2870 errors: []error{ 2871 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 2872 15*time.Second, 5*time.Second), 2873 }, 2874 }, 2875 { 2876 desc: "Invalid delay and interval", 2877 ReschedulePolicy: &ReschedulePolicy{ 2878 Attempts: 1, 2879 Interval: 1 * time.Second, 2880 DelayFunction: "constant"}, 2881 errors: []error{ 2882 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2883 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2884 }, 2885 }, { 2886 // Should suggest 2h40m as the interval 2887 desc: "Invalid Attempts - linear delay", 2888 ReschedulePolicy: &ReschedulePolicy{ 2889 Attempts: 10, 2890 Interval: 1 * time.Hour, 2891 Delay: 20 * time.Minute, 2892 DelayFunction: "constant", 2893 }, 2894 errors: []error{ 2895 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 2896 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 2897 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2898 200*time.Minute, 10), 2899 }, 2900 }, 2901 { 2902 // Should suggest 4h40m as the interval 2903 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 2904 desc: "Invalid Attempts - exponential delay", 2905 ReschedulePolicy: &ReschedulePolicy{ 2906 Attempts: 10, 2907 Interval: 30 * time.Minute, 2908 Delay: 5 * time.Minute, 2909 MaxDelay: 40 * time.Minute, 2910 DelayFunction: "exponential", 2911 }, 2912 errors: []error{ 2913 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2914 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 2915 "exponential", 40*time.Minute), 2916 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2917 280*time.Minute, 10), 2918 }, 2919 }, 2920 { 2921 // Should suggest 8h as the interval 2922 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 2923 desc: "Invalid Attempts - fibonacci delay", 2924 ReschedulePolicy: &ReschedulePolicy{ 2925 Attempts: 10, 2926 Interval: 1 * time.Hour, 2927 Delay: 20 * time.Minute, 2928 MaxDelay: 80 * time.Minute, 2929 DelayFunction: "fibonacci", 2930 }, 2931 errors: []error{ 2932 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2933 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 2934 "fibonacci", 80*time.Minute), 2935 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2936 480*time.Minute, 10), 2937 }, 2938 }, 2939 { 2940 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 2941 ReschedulePolicy: &ReschedulePolicy{ 2942 Attempts: 1, 2943 Unlimited: true, 2944 DelayFunction: "exponential", 2945 Delay: 5 * time.Minute, 2946 MaxDelay: 1 * time.Hour, 2947 }, 2948 errors: []error{ 2949 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 2950 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 2951 }, 2952 }, 2953 { 2954 desc: "Invalid Unlimited config", 2955 ReschedulePolicy: &ReschedulePolicy{ 2956 Attempts: 1, 2957 Interval: 1 * time.Second, 2958 Unlimited: true, 2959 DelayFunction: "exponential", 2960 }, 2961 errors: []error{ 2962 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2963 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2964 }, 2965 }, 2966 { 2967 desc: "Valid Unlimited config", 2968 ReschedulePolicy: &ReschedulePolicy{ 2969 Unlimited: true, 2970 DelayFunction: "exponential", 2971 Delay: 5 * time.Second, 2972 MaxDelay: 1 * time.Hour, 2973 }, 2974 }, 2975 } 2976 2977 for _, tc := range testCases { 2978 t.Run(tc.desc, func(t *testing.T) { 2979 require := require.New(t) 2980 gotErr := tc.ReschedulePolicy.Validate() 2981 if tc.errors != nil { 2982 // Validate all errors 2983 for _, err := range tc.errors { 2984 require.Contains(gotErr.Error(), err.Error()) 2985 } 2986 } else { 2987 require.Nil(gotErr) 2988 } 2989 }) 2990 } 2991 } 2992 2993 func TestAllocation_Index(t *testing.T) { 2994 a1 := Allocation{ 2995 Name: "example.cache[1]", 2996 TaskGroup: "cache", 2997 JobID: "example", 2998 Job: &Job{ 2999 ID: "example", 3000 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 3001 } 3002 e1 := uint(1) 3003 a2 := a1.Copy() 3004 a2.Name = "example.cache[713127]" 3005 e2 := uint(713127) 3006 3007 if a1.Index() != e1 || a2.Index() != e2 { 3008 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 3009 } 3010 } 3011 3012 func TestTaskArtifact_Validate_Source(t *testing.T) { 3013 valid := &TaskArtifact{GetterSource: "google.com"} 3014 if err := valid.Validate(); err != nil { 3015 t.Fatalf("unexpected error: %v", err) 3016 } 3017 } 3018 3019 func TestTaskArtifact_Validate_Dest(t *testing.T) { 3020 valid := &TaskArtifact{GetterSource: "google.com"} 3021 if err := valid.Validate(); err != nil { 3022 t.Fatalf("unexpected error: %v", err) 3023 } 3024 3025 valid.RelativeDest = "local/" 3026 if err := valid.Validate(); err != nil { 3027 t.Fatalf("unexpected error: %v", err) 3028 } 3029 3030 valid.RelativeDest = "local/.." 3031 if err := valid.Validate(); err != nil { 3032 t.Fatalf("unexpected error: %v", err) 3033 } 3034 3035 valid.RelativeDest = "local/../../.." 3036 if err := valid.Validate(); err == nil { 3037 t.Fatalf("expected error: %v", err) 3038 } 3039 } 3040 3041 // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the 3042 // fields change. 3043 func TestTaskArtifact_Hash(t *testing.T) { 3044 t.Parallel() 3045 3046 cases := []TaskArtifact{ 3047 {}, 3048 { 3049 GetterSource: "a", 3050 }, 3051 { 3052 GetterSource: "b", 3053 }, 3054 { 3055 GetterSource: "b", 3056 GetterOptions: map[string]string{"c": "c"}, 3057 }, 3058 { 3059 GetterSource: "b", 3060 GetterOptions: map[string]string{ 3061 "c": "c", 3062 "d": "d", 3063 }, 3064 }, 3065 { 3066 GetterSource: "b", 3067 GetterOptions: map[string]string{ 3068 "c": "c", 3069 "d": "e", 3070 }, 3071 }, 3072 { 3073 GetterSource: "b", 3074 GetterOptions: map[string]string{ 3075 "c": "c", 3076 "d": "e", 3077 }, 3078 GetterMode: "f", 3079 }, 3080 { 3081 GetterSource: "b", 3082 GetterOptions: map[string]string{ 3083 "c": "c", 3084 "d": "e", 3085 }, 3086 GetterMode: "g", 3087 }, 3088 { 3089 GetterSource: "b", 3090 GetterOptions: map[string]string{ 3091 "c": "c", 3092 "d": "e", 3093 }, 3094 GetterMode: "g", 3095 RelativeDest: "h", 3096 }, 3097 { 3098 GetterSource: "b", 3099 GetterOptions: map[string]string{ 3100 "c": "c", 3101 "d": "e", 3102 }, 3103 GetterMode: "g", 3104 RelativeDest: "i", 3105 }, 3106 } 3107 3108 // Map of hash to source 3109 hashes := make(map[string]TaskArtifact, len(cases)) 3110 for _, tc := range cases { 3111 h := tc.Hash() 3112 3113 // Hash should be deterministic 3114 require.Equal(t, h, tc.Hash()) 3115 3116 // Hash should be unique 3117 if orig, ok := hashes[h]; ok { 3118 require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n", 3119 pretty.Sprint(tc), pretty.Sprint(orig), 3120 ) 3121 } 3122 hashes[h] = tc 3123 } 3124 3125 require.Len(t, hashes, len(cases)) 3126 } 3127 3128 func TestAllocation_ShouldMigrate(t *testing.T) { 3129 alloc := Allocation{ 3130 PreviousAllocation: "123", 3131 TaskGroup: "foo", 3132 Job: &Job{ 3133 TaskGroups: []*TaskGroup{ 3134 { 3135 Name: "foo", 3136 EphemeralDisk: &EphemeralDisk{ 3137 Migrate: true, 3138 Sticky: true, 3139 }, 3140 }, 3141 }, 3142 }, 3143 } 3144 3145 if !alloc.ShouldMigrate() { 3146 t.Fatalf("bad: %v", alloc) 3147 } 3148 3149 alloc1 := Allocation{ 3150 PreviousAllocation: "123", 3151 TaskGroup: "foo", 3152 Job: &Job{ 3153 TaskGroups: []*TaskGroup{ 3154 { 3155 Name: "foo", 3156 EphemeralDisk: &EphemeralDisk{}, 3157 }, 3158 }, 3159 }, 3160 } 3161 3162 if alloc1.ShouldMigrate() { 3163 t.Fatalf("bad: %v", alloc) 3164 } 3165 3166 alloc2 := Allocation{ 3167 PreviousAllocation: "123", 3168 TaskGroup: "foo", 3169 Job: &Job{ 3170 TaskGroups: []*TaskGroup{ 3171 { 3172 Name: "foo", 3173 EphemeralDisk: &EphemeralDisk{ 3174 Sticky: false, 3175 Migrate: true, 3176 }, 3177 }, 3178 }, 3179 }, 3180 } 3181 3182 if alloc2.ShouldMigrate() { 3183 t.Fatalf("bad: %v", alloc) 3184 } 3185 3186 alloc3 := Allocation{ 3187 PreviousAllocation: "123", 3188 TaskGroup: "foo", 3189 Job: &Job{ 3190 TaskGroups: []*TaskGroup{ 3191 { 3192 Name: "foo", 3193 }, 3194 }, 3195 }, 3196 } 3197 3198 if alloc3.ShouldMigrate() { 3199 t.Fatalf("bad: %v", alloc) 3200 } 3201 3202 // No previous 3203 alloc4 := Allocation{ 3204 TaskGroup: "foo", 3205 Job: &Job{ 3206 TaskGroups: []*TaskGroup{ 3207 { 3208 Name: "foo", 3209 EphemeralDisk: &EphemeralDisk{ 3210 Migrate: true, 3211 Sticky: true, 3212 }, 3213 }, 3214 }, 3215 }, 3216 } 3217 3218 if alloc4.ShouldMigrate() { 3219 t.Fatalf("bad: %v", alloc4) 3220 } 3221 } 3222 3223 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 3224 cases := []struct { 3225 Input *TaskArtifact 3226 Err bool 3227 }{ 3228 { 3229 &TaskArtifact{ 3230 GetterSource: "foo.com", 3231 GetterOptions: map[string]string{ 3232 "checksum": "no-type", 3233 }, 3234 }, 3235 true, 3236 }, 3237 { 3238 &TaskArtifact{ 3239 GetterSource: "foo.com", 3240 GetterOptions: map[string]string{ 3241 "checksum": "md5:toosmall", 3242 }, 3243 }, 3244 true, 3245 }, 3246 { 3247 &TaskArtifact{ 3248 GetterSource: "foo.com", 3249 GetterOptions: map[string]string{ 3250 "checksum": "invalid:type", 3251 }, 3252 }, 3253 true, 3254 }, 3255 { 3256 &TaskArtifact{ 3257 GetterSource: "foo.com", 3258 GetterOptions: map[string]string{ 3259 "checksum": "md5:${ARTIFACT_CHECKSUM}", 3260 }, 3261 }, 3262 false, 3263 }, 3264 } 3265 3266 for i, tc := range cases { 3267 err := tc.Input.Validate() 3268 if (err != nil) != tc.Err { 3269 t.Fatalf("case %d: %v", i, err) 3270 continue 3271 } 3272 } 3273 } 3274 3275 func TestPlan_NormalizeAllocations(t *testing.T) { 3276 t.Parallel() 3277 plan := &Plan{ 3278 NodeUpdate: make(map[string][]*Allocation), 3279 NodePreemptions: make(map[string][]*Allocation), 3280 } 3281 stoppedAlloc := MockAlloc() 3282 desiredDesc := "Desired desc" 3283 plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost) 3284 preemptedAlloc := MockAlloc() 3285 preemptingAllocID := uuid.Generate() 3286 plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID) 3287 3288 plan.NormalizeAllocations() 3289 3290 actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0] 3291 expectedStoppedAlloc := &Allocation{ 3292 ID: stoppedAlloc.ID, 3293 DesiredDescription: desiredDesc, 3294 ClientStatus: AllocClientStatusLost, 3295 } 3296 assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc) 3297 actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0] 3298 expectedPreemptedAlloc := &Allocation{ 3299 ID: preemptedAlloc.ID, 3300 PreemptedByAllocation: preemptingAllocID, 3301 } 3302 assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc) 3303 } 3304 3305 func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3306 t.Parallel() 3307 plan := &Plan{ 3308 NodeUpdate: make(map[string][]*Allocation), 3309 } 3310 alloc := MockAlloc() 3311 desiredDesc := "Desired desc" 3312 3313 plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost) 3314 3315 appendedAlloc := plan.NodeUpdate[alloc.NodeID][0] 3316 expectedAlloc := new(Allocation) 3317 *expectedAlloc = *alloc 3318 expectedAlloc.DesiredDescription = desiredDesc 3319 expectedAlloc.DesiredStatus = AllocDesiredStatusStop 3320 expectedAlloc.ClientStatus = AllocClientStatusLost 3321 expectedAlloc.Job = nil 3322 assert.Equal(t, expectedAlloc, appendedAlloc) 3323 assert.Equal(t, alloc.Job, plan.Job) 3324 } 3325 3326 func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) { 3327 t.Parallel() 3328 plan := &Plan{ 3329 NodePreemptions: make(map[string][]*Allocation), 3330 } 3331 alloc := MockAlloc() 3332 preemptingAllocID := uuid.Generate() 3333 3334 plan.AppendPreemptedAlloc(alloc, preemptingAllocID) 3335 3336 appendedAlloc := plan.NodePreemptions[alloc.NodeID][0] 3337 expectedAlloc := &Allocation{ 3338 ID: alloc.ID, 3339 PreemptedByAllocation: preemptingAllocID, 3340 JobID: alloc.JobID, 3341 Namespace: alloc.Namespace, 3342 DesiredStatus: AllocDesiredStatusEvict, 3343 DesiredDescription: fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID), 3344 AllocatedResources: alloc.AllocatedResources, 3345 TaskResources: alloc.TaskResources, 3346 SharedResources: alloc.SharedResources, 3347 } 3348 assert.Equal(t, expectedAlloc, appendedAlloc) 3349 } 3350 3351 func TestAllocation_MsgPackTags(t *testing.T) { 3352 t.Parallel() 3353 planType := reflect.TypeOf(Allocation{}) 3354 3355 msgPackTags, _ := planType.FieldByName("_struct") 3356 3357 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3358 } 3359 3360 func TestEvaluation_MsgPackTags(t *testing.T) { 3361 t.Parallel() 3362 planType := reflect.TypeOf(Evaluation{}) 3363 3364 msgPackTags, _ := planType.FieldByName("_struct") 3365 3366 assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`)) 3367 } 3368 3369 func TestAllocation_Terminated(t *testing.T) { 3370 type desiredState struct { 3371 ClientStatus string 3372 DesiredStatus string 3373 Terminated bool 3374 } 3375 3376 harness := []desiredState{ 3377 { 3378 ClientStatus: AllocClientStatusPending, 3379 DesiredStatus: AllocDesiredStatusStop, 3380 Terminated: false, 3381 }, 3382 { 3383 ClientStatus: AllocClientStatusRunning, 3384 DesiredStatus: AllocDesiredStatusStop, 3385 Terminated: false, 3386 }, 3387 { 3388 ClientStatus: AllocClientStatusFailed, 3389 DesiredStatus: AllocDesiredStatusStop, 3390 Terminated: true, 3391 }, 3392 { 3393 ClientStatus: AllocClientStatusFailed, 3394 DesiredStatus: AllocDesiredStatusRun, 3395 Terminated: true, 3396 }, 3397 } 3398 3399 for _, state := range harness { 3400 alloc := Allocation{} 3401 alloc.DesiredStatus = state.DesiredStatus 3402 alloc.ClientStatus = state.ClientStatus 3403 if alloc.Terminated() != state.Terminated { 3404 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 3405 } 3406 } 3407 } 3408 3409 func TestAllocation_ShouldReschedule(t *testing.T) { 3410 type testCase struct { 3411 Desc string 3412 FailTime time.Time 3413 ClientStatus string 3414 DesiredStatus string 3415 ReschedulePolicy *ReschedulePolicy 3416 RescheduleTrackers []*RescheduleEvent 3417 ShouldReschedule bool 3418 } 3419 3420 fail := time.Now() 3421 3422 harness := []testCase{ 3423 { 3424 Desc: "Reschedule when desired state is stop", 3425 ClientStatus: AllocClientStatusPending, 3426 DesiredStatus: AllocDesiredStatusStop, 3427 FailTime: fail, 3428 ReschedulePolicy: nil, 3429 ShouldReschedule: false, 3430 }, 3431 { 3432 Desc: "Disabled rescheduling", 3433 ClientStatus: AllocClientStatusFailed, 3434 DesiredStatus: AllocDesiredStatusRun, 3435 FailTime: fail, 3436 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 3437 ShouldReschedule: false, 3438 }, 3439 { 3440 Desc: "Reschedule when client status is complete", 3441 ClientStatus: AllocClientStatusComplete, 3442 DesiredStatus: AllocDesiredStatusRun, 3443 FailTime: fail, 3444 ReschedulePolicy: nil, 3445 ShouldReschedule: false, 3446 }, 3447 { 3448 Desc: "Reschedule with nil reschedule policy", 3449 ClientStatus: AllocClientStatusFailed, 3450 DesiredStatus: AllocDesiredStatusRun, 3451 FailTime: fail, 3452 ReschedulePolicy: nil, 3453 ShouldReschedule: false, 3454 }, 3455 { 3456 Desc: "Reschedule with unlimited and attempts >0", 3457 ClientStatus: AllocClientStatusFailed, 3458 DesiredStatus: AllocDesiredStatusRun, 3459 FailTime: fail, 3460 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 3461 ShouldReschedule: true, 3462 }, 3463 { 3464 Desc: "Reschedule when client status is complete", 3465 ClientStatus: AllocClientStatusComplete, 3466 DesiredStatus: AllocDesiredStatusRun, 3467 FailTime: fail, 3468 ReschedulePolicy: nil, 3469 ShouldReschedule: false, 3470 }, 3471 { 3472 Desc: "Reschedule with policy when client status complete", 3473 ClientStatus: AllocClientStatusComplete, 3474 DesiredStatus: AllocDesiredStatusRun, 3475 FailTime: fail, 3476 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3477 ShouldReschedule: false, 3478 }, 3479 { 3480 Desc: "Reschedule with no previous attempts", 3481 ClientStatus: AllocClientStatusFailed, 3482 DesiredStatus: AllocDesiredStatusRun, 3483 FailTime: fail, 3484 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 3485 ShouldReschedule: true, 3486 }, 3487 { 3488 Desc: "Reschedule with leftover attempts", 3489 ClientStatus: AllocClientStatusFailed, 3490 DesiredStatus: AllocDesiredStatusRun, 3491 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3492 FailTime: fail, 3493 RescheduleTrackers: []*RescheduleEvent{ 3494 { 3495 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 3496 }, 3497 }, 3498 ShouldReschedule: true, 3499 }, 3500 { 3501 Desc: "Reschedule with too old previous attempts", 3502 ClientStatus: AllocClientStatusFailed, 3503 DesiredStatus: AllocDesiredStatusRun, 3504 FailTime: fail, 3505 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 3506 RescheduleTrackers: []*RescheduleEvent{ 3507 { 3508 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 3509 }, 3510 }, 3511 ShouldReschedule: true, 3512 }, 3513 { 3514 Desc: "Reschedule with no leftover attempts", 3515 ClientStatus: AllocClientStatusFailed, 3516 DesiredStatus: AllocDesiredStatusRun, 3517 FailTime: fail, 3518 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 3519 RescheduleTrackers: []*RescheduleEvent{ 3520 { 3521 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 3522 }, 3523 { 3524 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 3525 }, 3526 }, 3527 ShouldReschedule: false, 3528 }, 3529 } 3530 3531 for _, state := range harness { 3532 alloc := Allocation{} 3533 alloc.DesiredStatus = state.DesiredStatus 3534 alloc.ClientStatus = state.ClientStatus 3535 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 3536 3537 t.Run(state.Desc, func(t *testing.T) { 3538 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 3539 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 3540 } 3541 }) 3542 3543 } 3544 } 3545 3546 func TestAllocation_LastEventTime(t *testing.T) { 3547 type testCase struct { 3548 desc string 3549 taskState map[string]*TaskState 3550 expectedLastEventTime time.Time 3551 } 3552 3553 t1 := time.Now().UTC() 3554 3555 testCases := []testCase{ 3556 { 3557 desc: "nil task state", 3558 expectedLastEventTime: t1, 3559 }, 3560 { 3561 desc: "empty task state", 3562 taskState: make(map[string]*TaskState), 3563 expectedLastEventTime: t1, 3564 }, 3565 { 3566 desc: "Finished At not set", 3567 taskState: map[string]*TaskState{"foo": {State: "start", 3568 StartedAt: t1.Add(-2 * time.Hour)}}, 3569 expectedLastEventTime: t1, 3570 }, 3571 { 3572 desc: "One finished ", 3573 taskState: map[string]*TaskState{"foo": {State: "start", 3574 StartedAt: t1.Add(-2 * time.Hour), 3575 FinishedAt: t1.Add(-1 * time.Hour)}}, 3576 expectedLastEventTime: t1.Add(-1 * time.Hour), 3577 }, 3578 { 3579 desc: "Multiple task groups", 3580 taskState: map[string]*TaskState{"foo": {State: "start", 3581 StartedAt: t1.Add(-2 * time.Hour), 3582 FinishedAt: t1.Add(-1 * time.Hour)}, 3583 "bar": {State: "start", 3584 StartedAt: t1.Add(-2 * time.Hour), 3585 FinishedAt: t1.Add(-40 * time.Minute)}}, 3586 expectedLastEventTime: t1.Add(-40 * time.Minute), 3587 }, 3588 { 3589 desc: "No finishedAt set, one task event, should use modify time", 3590 taskState: map[string]*TaskState{"foo": { 3591 State: "run", 3592 StartedAt: t1.Add(-2 * time.Hour), 3593 Events: []*TaskEvent{ 3594 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 3595 }}, 3596 }, 3597 expectedLastEventTime: t1, 3598 }, 3599 } 3600 for _, tc := range testCases { 3601 t.Run(tc.desc, func(t *testing.T) { 3602 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 3603 alloc.TaskStates = tc.taskState 3604 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 3605 }) 3606 } 3607 } 3608 3609 func TestAllocation_NextDelay(t *testing.T) { 3610 type testCase struct { 3611 desc string 3612 reschedulePolicy *ReschedulePolicy 3613 alloc *Allocation 3614 expectedRescheduleTime time.Time 3615 expectedRescheduleEligible bool 3616 } 3617 now := time.Now() 3618 testCases := []testCase{ 3619 { 3620 desc: "Allocation hasn't failed yet", 3621 reschedulePolicy: &ReschedulePolicy{ 3622 DelayFunction: "constant", 3623 Delay: 5 * time.Second, 3624 }, 3625 alloc: &Allocation{}, 3626 expectedRescheduleTime: time.Time{}, 3627 expectedRescheduleEligible: false, 3628 }, 3629 { 3630 desc: "Allocation has no reschedule policy", 3631 alloc: &Allocation{}, 3632 expectedRescheduleTime: time.Time{}, 3633 expectedRescheduleEligible: false, 3634 }, 3635 { 3636 desc: "Allocation lacks task state", 3637 reschedulePolicy: &ReschedulePolicy{ 3638 DelayFunction: "constant", 3639 Delay: 5 * time.Second, 3640 Unlimited: true, 3641 }, 3642 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 3643 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 3644 expectedRescheduleEligible: true, 3645 }, 3646 { 3647 desc: "linear delay, unlimited restarts, no reschedule tracker", 3648 reschedulePolicy: &ReschedulePolicy{ 3649 DelayFunction: "constant", 3650 Delay: 5 * time.Second, 3651 Unlimited: true, 3652 }, 3653 alloc: &Allocation{ 3654 ClientStatus: AllocClientStatusFailed, 3655 TaskStates: map[string]*TaskState{"foo": {State: "dead", 3656 StartedAt: now.Add(-1 * time.Hour), 3657 FinishedAt: now.Add(-2 * time.Second)}}, 3658 }, 3659 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3660 expectedRescheduleEligible: true, 3661 }, 3662 { 3663 desc: "linear delay with reschedule tracker", 3664 reschedulePolicy: &ReschedulePolicy{ 3665 DelayFunction: "constant", 3666 Delay: 5 * time.Second, 3667 Interval: 10 * time.Minute, 3668 Attempts: 2, 3669 }, 3670 alloc: &Allocation{ 3671 ClientStatus: AllocClientStatusFailed, 3672 TaskStates: map[string]*TaskState{"foo": {State: "start", 3673 StartedAt: now.Add(-1 * time.Hour), 3674 FinishedAt: now.Add(-2 * time.Second)}}, 3675 RescheduleTracker: &RescheduleTracker{ 3676 Events: []*RescheduleEvent{{ 3677 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3678 Delay: 5 * time.Second, 3679 }}, 3680 }}, 3681 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3682 expectedRescheduleEligible: true, 3683 }, 3684 { 3685 desc: "linear delay with reschedule tracker, attempts exhausted", 3686 reschedulePolicy: &ReschedulePolicy{ 3687 DelayFunction: "constant", 3688 Delay: 5 * time.Second, 3689 Interval: 10 * time.Minute, 3690 Attempts: 2, 3691 }, 3692 alloc: &Allocation{ 3693 ClientStatus: AllocClientStatusFailed, 3694 TaskStates: map[string]*TaskState{"foo": {State: "start", 3695 StartedAt: now.Add(-1 * time.Hour), 3696 FinishedAt: now.Add(-2 * time.Second)}}, 3697 RescheduleTracker: &RescheduleTracker{ 3698 Events: []*RescheduleEvent{ 3699 { 3700 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 3701 Delay: 5 * time.Second, 3702 }, 3703 { 3704 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 3705 Delay: 5 * time.Second, 3706 }, 3707 }, 3708 }}, 3709 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3710 expectedRescheduleEligible: false, 3711 }, 3712 { 3713 desc: "exponential delay - no reschedule tracker", 3714 reschedulePolicy: &ReschedulePolicy{ 3715 DelayFunction: "exponential", 3716 Delay: 5 * time.Second, 3717 MaxDelay: 90 * time.Second, 3718 Unlimited: true, 3719 }, 3720 alloc: &Allocation{ 3721 ClientStatus: AllocClientStatusFailed, 3722 TaskStates: map[string]*TaskState{"foo": {State: "start", 3723 StartedAt: now.Add(-1 * time.Hour), 3724 FinishedAt: now.Add(-2 * time.Second)}}, 3725 }, 3726 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3727 expectedRescheduleEligible: true, 3728 }, 3729 { 3730 desc: "exponential delay with reschedule tracker", 3731 reschedulePolicy: &ReschedulePolicy{ 3732 DelayFunction: "exponential", 3733 Delay: 5 * time.Second, 3734 MaxDelay: 90 * time.Second, 3735 Unlimited: true, 3736 }, 3737 alloc: &Allocation{ 3738 ClientStatus: AllocClientStatusFailed, 3739 TaskStates: map[string]*TaskState{"foo": {State: "start", 3740 StartedAt: now.Add(-1 * time.Hour), 3741 FinishedAt: now.Add(-2 * time.Second)}}, 3742 RescheduleTracker: &RescheduleTracker{ 3743 Events: []*RescheduleEvent{ 3744 { 3745 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3746 Delay: 5 * time.Second, 3747 }, 3748 { 3749 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3750 Delay: 10 * time.Second, 3751 }, 3752 { 3753 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3754 Delay: 20 * time.Second, 3755 }, 3756 }, 3757 }}, 3758 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3759 expectedRescheduleEligible: true, 3760 }, 3761 { 3762 desc: "exponential delay with delay ceiling reached", 3763 reschedulePolicy: &ReschedulePolicy{ 3764 DelayFunction: "exponential", 3765 Delay: 5 * time.Second, 3766 MaxDelay: 90 * time.Second, 3767 Unlimited: true, 3768 }, 3769 alloc: &Allocation{ 3770 ClientStatus: AllocClientStatusFailed, 3771 TaskStates: map[string]*TaskState{"foo": {State: "start", 3772 StartedAt: now.Add(-1 * time.Hour), 3773 FinishedAt: now.Add(-15 * time.Second)}}, 3774 RescheduleTracker: &RescheduleTracker{ 3775 Events: []*RescheduleEvent{ 3776 { 3777 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3778 Delay: 5 * time.Second, 3779 }, 3780 { 3781 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3782 Delay: 10 * time.Second, 3783 }, 3784 { 3785 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3786 Delay: 20 * time.Second, 3787 }, 3788 { 3789 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3790 Delay: 40 * time.Second, 3791 }, 3792 { 3793 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3794 Delay: 80 * time.Second, 3795 }, 3796 }, 3797 }}, 3798 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 3799 expectedRescheduleEligible: true, 3800 }, 3801 { 3802 // Test case where most recent reschedule ran longer than delay ceiling 3803 desc: "exponential delay, delay ceiling reset condition met", 3804 reschedulePolicy: &ReschedulePolicy{ 3805 DelayFunction: "exponential", 3806 Delay: 5 * time.Second, 3807 MaxDelay: 90 * time.Second, 3808 Unlimited: true, 3809 }, 3810 alloc: &Allocation{ 3811 ClientStatus: AllocClientStatusFailed, 3812 TaskStates: map[string]*TaskState{"foo": {State: "start", 3813 StartedAt: now.Add(-1 * time.Hour), 3814 FinishedAt: now.Add(-15 * time.Minute)}}, 3815 RescheduleTracker: &RescheduleTracker{ 3816 Events: []*RescheduleEvent{ 3817 { 3818 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3819 Delay: 5 * time.Second, 3820 }, 3821 { 3822 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3823 Delay: 10 * time.Second, 3824 }, 3825 { 3826 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3827 Delay: 20 * time.Second, 3828 }, 3829 { 3830 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3831 Delay: 40 * time.Second, 3832 }, 3833 { 3834 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3835 Delay: 80 * time.Second, 3836 }, 3837 { 3838 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3839 Delay: 90 * time.Second, 3840 }, 3841 { 3842 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3843 Delay: 90 * time.Second, 3844 }, 3845 }, 3846 }}, 3847 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 3848 expectedRescheduleEligible: true, 3849 }, 3850 { 3851 desc: "fibonacci delay - no reschedule tracker", 3852 reschedulePolicy: &ReschedulePolicy{ 3853 DelayFunction: "fibonacci", 3854 Delay: 5 * time.Second, 3855 MaxDelay: 90 * time.Second, 3856 Unlimited: true, 3857 }, 3858 alloc: &Allocation{ 3859 ClientStatus: AllocClientStatusFailed, 3860 TaskStates: map[string]*TaskState{"foo": {State: "start", 3861 StartedAt: now.Add(-1 * time.Hour), 3862 FinishedAt: now.Add(-2 * time.Second)}}}, 3863 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3864 expectedRescheduleEligible: true, 3865 }, 3866 { 3867 desc: "fibonacci delay with reschedule tracker", 3868 reschedulePolicy: &ReschedulePolicy{ 3869 DelayFunction: "fibonacci", 3870 Delay: 5 * time.Second, 3871 MaxDelay: 90 * time.Second, 3872 Unlimited: true, 3873 }, 3874 alloc: &Allocation{ 3875 ClientStatus: AllocClientStatusFailed, 3876 TaskStates: map[string]*TaskState{"foo": {State: "start", 3877 StartedAt: now.Add(-1 * time.Hour), 3878 FinishedAt: now.Add(-2 * time.Second)}}, 3879 RescheduleTracker: &RescheduleTracker{ 3880 Events: []*RescheduleEvent{ 3881 { 3882 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3883 Delay: 5 * time.Second, 3884 }, 3885 { 3886 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 3887 Delay: 5 * time.Second, 3888 }, 3889 }, 3890 }}, 3891 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 3892 expectedRescheduleEligible: true, 3893 }, 3894 { 3895 desc: "fibonacci delay with more events", 3896 reschedulePolicy: &ReschedulePolicy{ 3897 DelayFunction: "fibonacci", 3898 Delay: 5 * time.Second, 3899 MaxDelay: 90 * time.Second, 3900 Unlimited: true, 3901 }, 3902 alloc: &Allocation{ 3903 ClientStatus: AllocClientStatusFailed, 3904 TaskStates: map[string]*TaskState{"foo": {State: "start", 3905 StartedAt: now.Add(-1 * time.Hour), 3906 FinishedAt: now.Add(-2 * time.Second)}}, 3907 RescheduleTracker: &RescheduleTracker{ 3908 Events: []*RescheduleEvent{ 3909 { 3910 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3911 Delay: 5 * time.Second, 3912 }, 3913 { 3914 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3915 Delay: 5 * time.Second, 3916 }, 3917 { 3918 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3919 Delay: 10 * time.Second, 3920 }, 3921 { 3922 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3923 Delay: 15 * time.Second, 3924 }, 3925 { 3926 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3927 Delay: 25 * time.Second, 3928 }, 3929 }, 3930 }}, 3931 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3932 expectedRescheduleEligible: true, 3933 }, 3934 { 3935 desc: "fibonacci delay with delay ceiling reached", 3936 reschedulePolicy: &ReschedulePolicy{ 3937 DelayFunction: "fibonacci", 3938 Delay: 5 * time.Second, 3939 MaxDelay: 50 * time.Second, 3940 Unlimited: true, 3941 }, 3942 alloc: &Allocation{ 3943 ClientStatus: AllocClientStatusFailed, 3944 TaskStates: map[string]*TaskState{"foo": {State: "start", 3945 StartedAt: now.Add(-1 * time.Hour), 3946 FinishedAt: now.Add(-15 * time.Second)}}, 3947 RescheduleTracker: &RescheduleTracker{ 3948 Events: []*RescheduleEvent{ 3949 { 3950 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3951 Delay: 5 * time.Second, 3952 }, 3953 { 3954 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3955 Delay: 5 * time.Second, 3956 }, 3957 { 3958 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3959 Delay: 10 * time.Second, 3960 }, 3961 { 3962 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3963 Delay: 15 * time.Second, 3964 }, 3965 { 3966 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3967 Delay: 25 * time.Second, 3968 }, 3969 { 3970 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3971 Delay: 40 * time.Second, 3972 }, 3973 }, 3974 }}, 3975 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 3976 expectedRescheduleEligible: true, 3977 }, 3978 { 3979 desc: "fibonacci delay with delay reset condition met", 3980 reschedulePolicy: &ReschedulePolicy{ 3981 DelayFunction: "fibonacci", 3982 Delay: 5 * time.Second, 3983 MaxDelay: 50 * time.Second, 3984 Unlimited: true, 3985 }, 3986 alloc: &Allocation{ 3987 ClientStatus: AllocClientStatusFailed, 3988 TaskStates: map[string]*TaskState{"foo": {State: "start", 3989 StartedAt: now.Add(-1 * time.Hour), 3990 FinishedAt: now.Add(-5 * time.Minute)}}, 3991 RescheduleTracker: &RescheduleTracker{ 3992 Events: []*RescheduleEvent{ 3993 { 3994 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3995 Delay: 5 * time.Second, 3996 }, 3997 { 3998 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3999 Delay: 5 * time.Second, 4000 }, 4001 { 4002 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4003 Delay: 10 * time.Second, 4004 }, 4005 { 4006 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4007 Delay: 15 * time.Second, 4008 }, 4009 { 4010 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4011 Delay: 25 * time.Second, 4012 }, 4013 { 4014 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4015 Delay: 40 * time.Second, 4016 }, 4017 }, 4018 }}, 4019 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 4020 expectedRescheduleEligible: true, 4021 }, 4022 { 4023 desc: "fibonacci delay with the most recent event that reset delay value", 4024 reschedulePolicy: &ReschedulePolicy{ 4025 DelayFunction: "fibonacci", 4026 Delay: 5 * time.Second, 4027 MaxDelay: 50 * time.Second, 4028 Unlimited: true, 4029 }, 4030 alloc: &Allocation{ 4031 ClientStatus: AllocClientStatusFailed, 4032 TaskStates: map[string]*TaskState{"foo": {State: "start", 4033 StartedAt: now.Add(-1 * time.Hour), 4034 FinishedAt: now.Add(-5 * time.Second)}}, 4035 RescheduleTracker: &RescheduleTracker{ 4036 Events: []*RescheduleEvent{ 4037 { 4038 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 4039 Delay: 5 * time.Second, 4040 }, 4041 { 4042 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4043 Delay: 5 * time.Second, 4044 }, 4045 { 4046 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4047 Delay: 10 * time.Second, 4048 }, 4049 { 4050 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4051 Delay: 15 * time.Second, 4052 }, 4053 { 4054 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4055 Delay: 25 * time.Second, 4056 }, 4057 { 4058 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4059 Delay: 40 * time.Second, 4060 }, 4061 { 4062 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 4063 Delay: 50 * time.Second, 4064 }, 4065 { 4066 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 4067 Delay: 5 * time.Second, 4068 }, 4069 }, 4070 }}, 4071 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 4072 expectedRescheduleEligible: true, 4073 }, 4074 } 4075 for _, tc := range testCases { 4076 t.Run(tc.desc, func(t *testing.T) { 4077 require := require.New(t) 4078 j := testJob() 4079 if tc.reschedulePolicy != nil { 4080 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 4081 } 4082 tc.alloc.Job = j 4083 tc.alloc.TaskGroup = j.TaskGroups[0].Name 4084 reschedTime, allowed := tc.alloc.NextRescheduleTime() 4085 require.Equal(tc.expectedRescheduleEligible, allowed) 4086 require.Equal(tc.expectedRescheduleTime, reschedTime) 4087 }) 4088 } 4089 4090 } 4091 4092 func TestRescheduleTracker_Copy(t *testing.T) { 4093 type testCase struct { 4094 original *RescheduleTracker 4095 expected *RescheduleTracker 4096 } 4097 4098 cases := []testCase{ 4099 {nil, nil}, 4100 {&RescheduleTracker{Events: []*RescheduleEvent{ 4101 {RescheduleTime: 2, 4102 PrevAllocID: "12", 4103 PrevNodeID: "12", 4104 Delay: 30 * time.Second}, 4105 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 4106 {RescheduleTime: 2, 4107 PrevAllocID: "12", 4108 PrevNodeID: "12", 4109 Delay: 30 * time.Second}, 4110 }}}, 4111 } 4112 4113 for _, tc := range cases { 4114 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 4115 t.Fatalf("expected %v but got %v", *tc.expected, *got) 4116 } 4117 } 4118 } 4119 4120 func TestVault_Validate(t *testing.T) { 4121 v := &Vault{ 4122 Env: true, 4123 ChangeMode: VaultChangeModeNoop, 4124 } 4125 4126 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 4127 t.Fatalf("Expected policy list empty error") 4128 } 4129 4130 v.Policies = []string{"foo", "root"} 4131 v.ChangeMode = VaultChangeModeSignal 4132 4133 err := v.Validate() 4134 if err == nil { 4135 t.Fatalf("Expected validation errors") 4136 } 4137 4138 if !strings.Contains(err.Error(), "Signal must") { 4139 t.Fatalf("Expected signal empty error") 4140 } 4141 if !strings.Contains(err.Error(), "root") { 4142 t.Fatalf("Expected root error") 4143 } 4144 } 4145 4146 func TestParameterizedJobConfig_Validate(t *testing.T) { 4147 d := &ParameterizedJobConfig{ 4148 Payload: "foo", 4149 } 4150 4151 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 4152 t.Fatalf("Expected unknown payload requirement: %v", err) 4153 } 4154 4155 d.Payload = DispatchPayloadOptional 4156 d.MetaOptional = []string{"foo", "bar"} 4157 d.MetaRequired = []string{"bar", "baz"} 4158 4159 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 4160 t.Fatalf("Expected meta not being disjoint error: %v", err) 4161 } 4162 } 4163 4164 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 4165 job := testJob() 4166 job.ParameterizedJob = &ParameterizedJobConfig{ 4167 Payload: DispatchPayloadOptional, 4168 } 4169 job.Type = JobTypeSystem 4170 4171 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 4172 t.Fatalf("Expected bad scheduler tpye: %v", err) 4173 } 4174 } 4175 4176 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 4177 d := &ParameterizedJobConfig{} 4178 d.Canonicalize() 4179 if d.Payload != DispatchPayloadOptional { 4180 t.Fatalf("Canonicalize failed") 4181 } 4182 } 4183 4184 func TestDispatchPayloadConfig_Validate(t *testing.T) { 4185 d := &DispatchPayloadConfig{ 4186 File: "foo", 4187 } 4188 4189 // task/local/haha 4190 if err := d.Validate(); err != nil { 4191 t.Fatalf("bad: %v", err) 4192 } 4193 4194 // task/haha 4195 d.File = "../haha" 4196 if err := d.Validate(); err != nil { 4197 t.Fatalf("bad: %v", err) 4198 } 4199 4200 // ../haha 4201 d.File = "../../../haha" 4202 if err := d.Validate(); err == nil { 4203 t.Fatalf("bad: %v", err) 4204 } 4205 } 4206 4207 func TestIsRecoverable(t *testing.T) { 4208 if IsRecoverable(nil) { 4209 t.Errorf("nil should not be recoverable") 4210 } 4211 if IsRecoverable(NewRecoverableError(nil, true)) { 4212 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 4213 } 4214 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 4215 t.Errorf("Custom errors should not be recoverable") 4216 } 4217 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 4218 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 4219 } 4220 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 4221 t.Errorf("Explicitly recoverable errors *should* be recoverable") 4222 } 4223 } 4224 4225 func TestACLTokenValidate(t *testing.T) { 4226 tk := &ACLToken{} 4227 4228 // Missing a type 4229 err := tk.Validate() 4230 assert.NotNil(t, err) 4231 if !strings.Contains(err.Error(), "client or management") { 4232 t.Fatalf("bad: %v", err) 4233 } 4234 4235 // Missing policies 4236 tk.Type = ACLClientToken 4237 err = tk.Validate() 4238 assert.NotNil(t, err) 4239 if !strings.Contains(err.Error(), "missing policies") { 4240 t.Fatalf("bad: %v", err) 4241 } 4242 4243 // Invalid policies 4244 tk.Type = ACLManagementToken 4245 tk.Policies = []string{"foo"} 4246 err = tk.Validate() 4247 assert.NotNil(t, err) 4248 if !strings.Contains(err.Error(), "associated with policies") { 4249 t.Fatalf("bad: %v", err) 4250 } 4251 4252 // Name too long policies 4253 tk.Name = "" 4254 for i := 0; i < 8; i++ { 4255 tk.Name += uuid.Generate() 4256 } 4257 tk.Policies = nil 4258 err = tk.Validate() 4259 assert.NotNil(t, err) 4260 if !strings.Contains(err.Error(), "too long") { 4261 t.Fatalf("bad: %v", err) 4262 } 4263 4264 // Make it valid 4265 tk.Name = "foo" 4266 err = tk.Validate() 4267 assert.Nil(t, err) 4268 } 4269 4270 func TestACLTokenPolicySubset(t *testing.T) { 4271 tk := &ACLToken{ 4272 Type: ACLClientToken, 4273 Policies: []string{"foo", "bar", "baz"}, 4274 } 4275 4276 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4277 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4278 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4279 assert.Equal(t, true, tk.PolicySubset([]string{})) 4280 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 4281 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 4282 4283 tk = &ACLToken{ 4284 Type: ACLManagementToken, 4285 } 4286 4287 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 4288 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 4289 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 4290 assert.Equal(t, true, tk.PolicySubset([]string{})) 4291 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 4292 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 4293 } 4294 4295 func TestACLTokenSetHash(t *testing.T) { 4296 tk := &ACLToken{ 4297 Name: "foo", 4298 Type: ACLClientToken, 4299 Policies: []string{"foo", "bar"}, 4300 Global: false, 4301 } 4302 out1 := tk.SetHash() 4303 assert.NotNil(t, out1) 4304 assert.NotNil(t, tk.Hash) 4305 assert.Equal(t, out1, tk.Hash) 4306 4307 tk.Policies = []string{"foo"} 4308 out2 := tk.SetHash() 4309 assert.NotNil(t, out2) 4310 assert.NotNil(t, tk.Hash) 4311 assert.Equal(t, out2, tk.Hash) 4312 assert.NotEqual(t, out1, out2) 4313 } 4314 4315 func TestACLPolicySetHash(t *testing.T) { 4316 ap := &ACLPolicy{ 4317 Name: "foo", 4318 Description: "great policy", 4319 Rules: "node { policy = \"read\" }", 4320 } 4321 out1 := ap.SetHash() 4322 assert.NotNil(t, out1) 4323 assert.NotNil(t, ap.Hash) 4324 assert.Equal(t, out1, ap.Hash) 4325 4326 ap.Rules = "node { policy = \"write\" }" 4327 out2 := ap.SetHash() 4328 assert.NotNil(t, out2) 4329 assert.NotNil(t, ap.Hash) 4330 assert.Equal(t, out2, ap.Hash) 4331 assert.NotEqual(t, out1, out2) 4332 } 4333 4334 func TestTaskEventPopulate(t *testing.T) { 4335 prepopulatedEvent := NewTaskEvent(TaskSetup) 4336 prepopulatedEvent.DisplayMessage = "Hola" 4337 testcases := []struct { 4338 event *TaskEvent 4339 expectedMsg string 4340 }{ 4341 {nil, ""}, 4342 {prepopulatedEvent, "Hola"}, 4343 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 4344 {NewTaskEvent(TaskStarted), "Task started by client"}, 4345 {NewTaskEvent(TaskReceived), "Task received by client"}, 4346 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 4347 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 4348 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 4349 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 4350 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 4351 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 4352 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 4353 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 4354 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 4355 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 4356 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 4357 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 4358 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 4359 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 4360 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 4361 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 4362 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 4363 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 4364 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 4365 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 4366 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 4367 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 4368 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 4369 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 4370 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 4371 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 4372 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 4373 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 4374 {NewTaskEvent("Unknown Type, No message"), ""}, 4375 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 4376 } 4377 4378 for _, tc := range testcases { 4379 tc.event.PopulateEventDisplayMessage() 4380 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 4381 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 4382 } 4383 } 4384 } 4385 4386 func TestNetworkResourcesEquals(t *testing.T) { 4387 require := require.New(t) 4388 var networkResourcesTest = []struct { 4389 input []*NetworkResource 4390 expected bool 4391 errorMsg string 4392 }{ 4393 { 4394 []*NetworkResource{ 4395 { 4396 IP: "10.0.0.1", 4397 MBits: 50, 4398 ReservedPorts: []Port{{"web", 80, 0}}, 4399 }, 4400 { 4401 IP: "10.0.0.1", 4402 MBits: 50, 4403 ReservedPorts: []Port{{"web", 80, 0}}, 4404 }, 4405 }, 4406 true, 4407 "Equal network resources should return true", 4408 }, 4409 { 4410 []*NetworkResource{ 4411 { 4412 IP: "10.0.0.0", 4413 MBits: 50, 4414 ReservedPorts: []Port{{"web", 80, 0}}, 4415 }, 4416 { 4417 IP: "10.0.0.1", 4418 MBits: 50, 4419 ReservedPorts: []Port{{"web", 80, 0}}, 4420 }, 4421 }, 4422 false, 4423 "Different IP addresses should return false", 4424 }, 4425 { 4426 []*NetworkResource{ 4427 { 4428 IP: "10.0.0.1", 4429 MBits: 40, 4430 ReservedPorts: []Port{{"web", 80, 0}}, 4431 }, 4432 { 4433 IP: "10.0.0.1", 4434 MBits: 50, 4435 ReservedPorts: []Port{{"web", 80, 0}}, 4436 }, 4437 }, 4438 false, 4439 "Different MBits values should return false", 4440 }, 4441 { 4442 []*NetworkResource{ 4443 { 4444 IP: "10.0.0.1", 4445 MBits: 50, 4446 ReservedPorts: []Port{{"web", 80, 0}}, 4447 }, 4448 { 4449 IP: "10.0.0.1", 4450 MBits: 50, 4451 ReservedPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4452 }, 4453 }, 4454 false, 4455 "Different ReservedPorts lengths should return false", 4456 }, 4457 { 4458 []*NetworkResource{ 4459 { 4460 IP: "10.0.0.1", 4461 MBits: 50, 4462 ReservedPorts: []Port{{"web", 80, 0}}, 4463 }, 4464 { 4465 IP: "10.0.0.1", 4466 MBits: 50, 4467 ReservedPorts: []Port{}, 4468 }, 4469 }, 4470 false, 4471 "Empty and non empty ReservedPorts values should return false", 4472 }, 4473 { 4474 []*NetworkResource{ 4475 { 4476 IP: "10.0.0.1", 4477 MBits: 50, 4478 ReservedPorts: []Port{{"web", 80, 0}}, 4479 }, 4480 { 4481 IP: "10.0.0.1", 4482 MBits: 50, 4483 ReservedPorts: []Port{{"notweb", 80, 0}}, 4484 }, 4485 }, 4486 false, 4487 "Different valued ReservedPorts values should return false", 4488 }, 4489 { 4490 []*NetworkResource{ 4491 { 4492 IP: "10.0.0.1", 4493 MBits: 50, 4494 DynamicPorts: []Port{{"web", 80, 0}}, 4495 }, 4496 { 4497 IP: "10.0.0.1", 4498 MBits: 50, 4499 DynamicPorts: []Port{{"web", 80, 0}, {"web", 80, 0}}, 4500 }, 4501 }, 4502 false, 4503 "Different DynamicPorts lengths should return false", 4504 }, 4505 { 4506 []*NetworkResource{ 4507 { 4508 IP: "10.0.0.1", 4509 MBits: 50, 4510 DynamicPorts: []Port{{"web", 80, 0}}, 4511 }, 4512 { 4513 IP: "10.0.0.1", 4514 MBits: 50, 4515 DynamicPorts: []Port{}, 4516 }, 4517 }, 4518 false, 4519 "Empty and non empty DynamicPorts values should return false", 4520 }, 4521 { 4522 []*NetworkResource{ 4523 { 4524 IP: "10.0.0.1", 4525 MBits: 50, 4526 DynamicPorts: []Port{{"web", 80, 0}}, 4527 }, 4528 { 4529 IP: "10.0.0.1", 4530 MBits: 50, 4531 DynamicPorts: []Port{{"notweb", 80, 0}}, 4532 }, 4533 }, 4534 false, 4535 "Different valued DynamicPorts values should return false", 4536 }, 4537 } 4538 for _, testCase := range networkResourcesTest { 4539 first := testCase.input[0] 4540 second := testCase.input[1] 4541 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 4542 } 4543 } 4544 4545 func TestNode_Canonicalize(t *testing.T) { 4546 t.Parallel() 4547 require := require.New(t) 4548 4549 // Make sure the eligiblity is set properly 4550 node := &Node{} 4551 node.Canonicalize() 4552 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 4553 4554 node = &Node{ 4555 Drain: true, 4556 } 4557 node.Canonicalize() 4558 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 4559 } 4560 4561 func TestNode_Copy(t *testing.T) { 4562 t.Parallel() 4563 require := require.New(t) 4564 4565 node := &Node{ 4566 ID: uuid.Generate(), 4567 SecretID: uuid.Generate(), 4568 Datacenter: "dc1", 4569 Name: "foobar", 4570 Attributes: map[string]string{ 4571 "kernel.name": "linux", 4572 "arch": "x86", 4573 "nomad.version": "0.5.0", 4574 "driver.exec": "1", 4575 "driver.mock_driver": "1", 4576 }, 4577 Resources: &Resources{ 4578 CPU: 4000, 4579 MemoryMB: 8192, 4580 DiskMB: 100 * 1024, 4581 Networks: []*NetworkResource{ 4582 { 4583 Device: "eth0", 4584 CIDR: "192.168.0.100/32", 4585 MBits: 1000, 4586 }, 4587 }, 4588 }, 4589 Reserved: &Resources{ 4590 CPU: 100, 4591 MemoryMB: 256, 4592 DiskMB: 4 * 1024, 4593 Networks: []*NetworkResource{ 4594 { 4595 Device: "eth0", 4596 IP: "192.168.0.100", 4597 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 4598 MBits: 1, 4599 }, 4600 }, 4601 }, 4602 NodeResources: &NodeResources{ 4603 Cpu: NodeCpuResources{ 4604 CpuShares: 4000, 4605 }, 4606 Memory: NodeMemoryResources{ 4607 MemoryMB: 8192, 4608 }, 4609 Disk: NodeDiskResources{ 4610 DiskMB: 100 * 1024, 4611 }, 4612 Networks: []*NetworkResource{ 4613 { 4614 Device: "eth0", 4615 CIDR: "192.168.0.100/32", 4616 MBits: 1000, 4617 }, 4618 }, 4619 }, 4620 ReservedResources: &NodeReservedResources{ 4621 Cpu: NodeReservedCpuResources{ 4622 CpuShares: 100, 4623 }, 4624 Memory: NodeReservedMemoryResources{ 4625 MemoryMB: 256, 4626 }, 4627 Disk: NodeReservedDiskResources{ 4628 DiskMB: 4 * 1024, 4629 }, 4630 Networks: NodeReservedNetworkResources{ 4631 ReservedHostPorts: "22", 4632 }, 4633 }, 4634 Links: map[string]string{ 4635 "consul": "foobar.dc1", 4636 }, 4637 Meta: map[string]string{ 4638 "pci-dss": "true", 4639 "database": "mysql", 4640 "version": "5.6", 4641 }, 4642 NodeClass: "linux-medium-pci", 4643 Status: NodeStatusReady, 4644 SchedulingEligibility: NodeSchedulingEligible, 4645 Drivers: map[string]*DriverInfo{ 4646 "mock_driver": { 4647 Attributes: map[string]string{"running": "1"}, 4648 Detected: true, 4649 Healthy: true, 4650 HealthDescription: "Currently active", 4651 UpdateTime: time.Now(), 4652 }, 4653 }, 4654 } 4655 node.ComputeClass() 4656 4657 node2 := node.Copy() 4658 4659 require.Equal(node.Attributes, node2.Attributes) 4660 require.Equal(node.Resources, node2.Resources) 4661 require.Equal(node.Reserved, node2.Reserved) 4662 require.Equal(node.Links, node2.Links) 4663 require.Equal(node.Meta, node2.Meta) 4664 require.Equal(node.Events, node2.Events) 4665 require.Equal(node.DrainStrategy, node2.DrainStrategy) 4666 require.Equal(node.Drivers, node2.Drivers) 4667 } 4668 4669 func TestSpread_Validate(t *testing.T) { 4670 type tc struct { 4671 spread *Spread 4672 err error 4673 name string 4674 } 4675 4676 testCases := []tc{ 4677 { 4678 spread: &Spread{}, 4679 err: fmt.Errorf("Missing spread attribute"), 4680 name: "empty spread", 4681 }, 4682 { 4683 spread: &Spread{ 4684 Attribute: "${node.datacenter}", 4685 Weight: -1, 4686 }, 4687 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 4688 name: "Invalid weight", 4689 }, 4690 { 4691 spread: &Spread{ 4692 Attribute: "${node.datacenter}", 4693 Weight: 110, 4694 }, 4695 err: fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"), 4696 name: "Invalid weight", 4697 }, 4698 { 4699 spread: &Spread{ 4700 Attribute: "${node.datacenter}", 4701 Weight: 50, 4702 SpreadTarget: []*SpreadTarget{ 4703 { 4704 Value: "dc1", 4705 Percent: 25, 4706 }, 4707 { 4708 Value: "dc2", 4709 Percent: 150, 4710 }, 4711 }, 4712 }, 4713 err: fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"), 4714 name: "Invalid percentages", 4715 }, 4716 { 4717 spread: &Spread{ 4718 Attribute: "${node.datacenter}", 4719 Weight: 50, 4720 SpreadTarget: []*SpreadTarget{ 4721 { 4722 Value: "dc1", 4723 Percent: 75, 4724 }, 4725 { 4726 Value: "dc2", 4727 Percent: 75, 4728 }, 4729 }, 4730 }, 4731 err: fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150), 4732 name: "Invalid percentages", 4733 }, 4734 { 4735 spread: &Spread{ 4736 Attribute: "${node.datacenter}", 4737 Weight: 50, 4738 SpreadTarget: []*SpreadTarget{ 4739 { 4740 Value: "dc1", 4741 Percent: 25, 4742 }, 4743 { 4744 Value: "dc1", 4745 Percent: 50, 4746 }, 4747 }, 4748 }, 4749 err: fmt.Errorf("Spread target value \"dc1\" already defined"), 4750 name: "No spread targets", 4751 }, 4752 { 4753 spread: &Spread{ 4754 Attribute: "${node.datacenter}", 4755 Weight: 50, 4756 SpreadTarget: []*SpreadTarget{ 4757 { 4758 Value: "dc1", 4759 Percent: 25, 4760 }, 4761 { 4762 Value: "dc2", 4763 Percent: 50, 4764 }, 4765 }, 4766 }, 4767 err: nil, 4768 name: "Valid spread", 4769 }, 4770 } 4771 4772 for _, tc := range testCases { 4773 t.Run(tc.name, func(t *testing.T) { 4774 err := tc.spread.Validate() 4775 if tc.err != nil { 4776 require.NotNil(t, err) 4777 require.Contains(t, err.Error(), tc.err.Error()) 4778 } else { 4779 require.Nil(t, err) 4780 } 4781 }) 4782 } 4783 } 4784 4785 func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) { 4786 require := require.New(t) 4787 cases := []struct { 4788 Input string 4789 Parsed []uint64 4790 Err bool 4791 }{ 4792 { 4793 "1,2,3", 4794 []uint64{1, 2, 3}, 4795 false, 4796 }, 4797 { 4798 "3,1,2,1,2,3,1-3", 4799 []uint64{1, 2, 3}, 4800 false, 4801 }, 4802 { 4803 "3-1", 4804 nil, 4805 true, 4806 }, 4807 { 4808 "1-3,2-4", 4809 []uint64{1, 2, 3, 4}, 4810 false, 4811 }, 4812 { 4813 "1-3,4,5-5,6,7,8-10", 4814 []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 4815 false, 4816 }, 4817 } 4818 4819 for i, tc := range cases { 4820 r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input} 4821 out, err := r.ParseReservedHostPorts() 4822 if (err != nil) != tc.Err { 4823 t.Fatalf("test case %d: %v", i, err) 4824 continue 4825 } 4826 4827 require.Equal(out, tc.Parsed) 4828 } 4829 }