github.com/hspak/nomad@v0.7.2-0.20180309000617-bc4ae22a39a5/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 "github.com/kr/pretty" 15 "github.com/stretchr/testify/assert" 16 "github.com/stretchr/testify/require" 17 ) 18 19 func TestJob_Validate(t *testing.T) { 20 j := &Job{} 21 err := j.Validate() 22 mErr := err.(*multierror.Error) 23 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 24 t.Fatalf("err: %s", err) 25 } 26 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 27 t.Fatalf("err: %s", err) 28 } 29 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 30 t.Fatalf("err: %s", err) 31 } 32 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 33 t.Fatalf("err: %s", err) 34 } 35 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 36 t.Fatalf("err: %s", err) 37 } 38 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 39 t.Fatalf("err: %s", err) 40 } 41 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 42 t.Fatalf("err: %s", err) 43 } 44 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 45 t.Fatalf("err: %s", err) 46 } 47 48 j = &Job{ 49 Type: "invalid-job-type", 50 } 51 err = j.Validate() 52 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 53 t.Errorf("expected %s but found: %v", expected, err) 54 } 55 56 j = &Job{ 57 Type: JobTypeService, 58 Periodic: &PeriodicConfig{ 59 Enabled: true, 60 }, 61 } 62 err = j.Validate() 63 mErr = err.(*multierror.Error) 64 if !strings.Contains(mErr.Error(), "Periodic") { 65 t.Fatalf("err: %s", err) 66 } 67 68 j = &Job{ 69 Region: "global", 70 ID: uuid.Generate(), 71 Namespace: "test", 72 Name: "my-job", 73 Type: JobTypeService, 74 Priority: 50, 75 Datacenters: []string{"dc1"}, 76 TaskGroups: []*TaskGroup{ 77 { 78 Name: "web", 79 RestartPolicy: &RestartPolicy{ 80 Interval: 5 * time.Minute, 81 Delay: 10 * time.Second, 82 Attempts: 10, 83 }, 84 }, 85 { 86 Name: "web", 87 RestartPolicy: &RestartPolicy{ 88 Interval: 5 * time.Minute, 89 Delay: 10 * time.Second, 90 Attempts: 10, 91 }, 92 }, 93 { 94 RestartPolicy: &RestartPolicy{ 95 Interval: 5 * time.Minute, 96 Delay: 10 * time.Second, 97 Attempts: 10, 98 }, 99 }, 100 }, 101 } 102 err = j.Validate() 103 mErr = err.(*multierror.Error) 104 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 105 t.Fatalf("err: %s", err) 106 } 107 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 108 t.Fatalf("err: %s", err) 109 } 110 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 111 t.Fatalf("err: %s", err) 112 } 113 } 114 115 func TestJob_Warnings(t *testing.T) { 116 cases := []struct { 117 Name string 118 Job *Job 119 Expected []string 120 }{ 121 { 122 Name: "Higher counts for update stanza", 123 Expected: []string{"max parallel count is greater"}, 124 Job: &Job{ 125 Type: JobTypeService, 126 TaskGroups: []*TaskGroup{ 127 { 128 Name: "foo", 129 Count: 2, 130 Update: &UpdateStrategy{ 131 MaxParallel: 10, 132 }, 133 }, 134 }, 135 }, 136 }, 137 } 138 139 for _, c := range cases { 140 t.Run(c.Name, func(t *testing.T) { 141 warnings := c.Job.Warnings() 142 if warnings == nil { 143 if len(c.Expected) == 0 { 144 return 145 } else { 146 t.Fatal("Got no warnings when they were expected") 147 } 148 } 149 150 a := warnings.Error() 151 for _, e := range c.Expected { 152 if !strings.Contains(a, e) { 153 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 154 } 155 } 156 }) 157 } 158 } 159 160 func TestJob_Canonicalize_Update(t *testing.T) { 161 cases := []struct { 162 Name string 163 Job *Job 164 Expected *Job 165 Warnings []string 166 }{ 167 { 168 Name: "One task group", 169 Warnings: []string{"conversion to new update stanza"}, 170 Job: &Job{ 171 Namespace: "test", 172 Type: JobTypeService, 173 Update: UpdateStrategy{ 174 MaxParallel: 2, 175 Stagger: 10 * time.Second, 176 }, 177 TaskGroups: []*TaskGroup{ 178 { 179 Name: "foo", 180 Count: 2, 181 }, 182 }, 183 }, 184 Expected: &Job{ 185 Namespace: "test", 186 Type: JobTypeService, 187 Update: UpdateStrategy{ 188 MaxParallel: 2, 189 Stagger: 10 * time.Second, 190 }, 191 TaskGroups: []*TaskGroup{ 192 { 193 Name: "foo", 194 Count: 2, 195 RestartPolicy: NewRestartPolicy(JobTypeService), 196 ReschedulePolicy: NewReshedulePolicy(JobTypeService), 197 EphemeralDisk: DefaultEphemeralDisk(), 198 Update: &UpdateStrategy{ 199 Stagger: 30 * time.Second, 200 MaxParallel: 2, 201 HealthCheck: UpdateStrategyHealthCheck_Checks, 202 MinHealthyTime: 10 * time.Second, 203 HealthyDeadline: 5 * time.Minute, 204 AutoRevert: false, 205 Canary: 0, 206 }, 207 }, 208 }, 209 }, 210 }, 211 { 212 Name: "One task group batch", 213 Warnings: []string{"Update stanza is disallowed for batch jobs"}, 214 Job: &Job{ 215 Namespace: "test", 216 Type: JobTypeBatch, 217 Update: UpdateStrategy{ 218 MaxParallel: 2, 219 Stagger: 10 * time.Second, 220 }, 221 TaskGroups: []*TaskGroup{ 222 { 223 Name: "foo", 224 Count: 2, 225 }, 226 }, 227 }, 228 Expected: &Job{ 229 Namespace: "test", 230 Type: JobTypeBatch, 231 Update: UpdateStrategy{}, 232 TaskGroups: []*TaskGroup{ 233 { 234 Name: "foo", 235 Count: 2, 236 RestartPolicy: NewRestartPolicy(JobTypeBatch), 237 ReschedulePolicy: NewReshedulePolicy(JobTypeBatch), 238 EphemeralDisk: DefaultEphemeralDisk(), 239 }, 240 }, 241 }, 242 }, 243 { 244 Name: "One task group batch - new spec", 245 Warnings: []string{"Update stanza is disallowed for batch jobs"}, 246 Job: &Job{ 247 Namespace: "test", 248 Type: JobTypeBatch, 249 Update: UpdateStrategy{ 250 Stagger: 2 * time.Second, 251 MaxParallel: 2, 252 Canary: 2, 253 MinHealthyTime: 2 * time.Second, 254 HealthyDeadline: 10 * time.Second, 255 HealthCheck: UpdateStrategyHealthCheck_Checks, 256 }, 257 TaskGroups: []*TaskGroup{ 258 { 259 Name: "foo", 260 Count: 2, 261 Update: &UpdateStrategy{ 262 Stagger: 2 * time.Second, 263 MaxParallel: 2, 264 Canary: 2, 265 MinHealthyTime: 2 * time.Second, 266 HealthyDeadline: 10 * time.Second, 267 HealthCheck: UpdateStrategyHealthCheck_Checks, 268 }, 269 }, 270 }, 271 }, 272 Expected: &Job{ 273 Namespace: "test", 274 Type: JobTypeBatch, 275 Update: UpdateStrategy{}, 276 TaskGroups: []*TaskGroup{ 277 { 278 Name: "foo", 279 Count: 2, 280 RestartPolicy: NewRestartPolicy(JobTypeBatch), 281 ReschedulePolicy: NewReshedulePolicy(JobTypeBatch), 282 EphemeralDisk: DefaultEphemeralDisk(), 283 }, 284 }, 285 }, 286 }, 287 { 288 Name: "One task group service - new spec", 289 Job: &Job{ 290 Namespace: "test", 291 Type: JobTypeService, 292 Update: UpdateStrategy{ 293 Stagger: 2 * time.Second, 294 MaxParallel: 2, 295 Canary: 2, 296 MinHealthyTime: 2 * time.Second, 297 HealthyDeadline: 10 * time.Second, 298 HealthCheck: UpdateStrategyHealthCheck_Checks, 299 }, 300 TaskGroups: []*TaskGroup{ 301 { 302 Name: "foo", 303 Count: 2, 304 Update: &UpdateStrategy{ 305 Stagger: 2 * time.Second, 306 MaxParallel: 2, 307 Canary: 2, 308 MinHealthyTime: 2 * time.Second, 309 HealthyDeadline: 10 * time.Second, 310 HealthCheck: UpdateStrategyHealthCheck_Checks, 311 }, 312 }, 313 }, 314 }, 315 Expected: &Job{ 316 Namespace: "test", 317 Type: JobTypeService, 318 Update: UpdateStrategy{ 319 Stagger: 2 * time.Second, 320 MaxParallel: 2, 321 Canary: 2, 322 MinHealthyTime: 2 * time.Second, 323 HealthyDeadline: 10 * time.Second, 324 HealthCheck: UpdateStrategyHealthCheck_Checks, 325 }, 326 TaskGroups: []*TaskGroup{ 327 { 328 Name: "foo", 329 Count: 2, 330 RestartPolicy: NewRestartPolicy(JobTypeService), 331 ReschedulePolicy: NewReshedulePolicy(JobTypeService), 332 EphemeralDisk: DefaultEphemeralDisk(), 333 Update: &UpdateStrategy{ 334 Stagger: 2 * time.Second, 335 MaxParallel: 2, 336 Canary: 2, 337 MinHealthyTime: 2 * time.Second, 338 HealthyDeadline: 10 * time.Second, 339 HealthCheck: UpdateStrategyHealthCheck_Checks, 340 }, 341 }, 342 }, 343 }, 344 }, 345 { 346 Name: "One task group; too high of parallelism", 347 Warnings: []string{"conversion to new update stanza"}, 348 Job: &Job{ 349 Namespace: "test", 350 Type: JobTypeService, 351 Update: UpdateStrategy{ 352 MaxParallel: 200, 353 Stagger: 10 * time.Second, 354 }, 355 TaskGroups: []*TaskGroup{ 356 { 357 Name: "foo", 358 Count: 2, 359 }, 360 }, 361 }, 362 Expected: &Job{ 363 Namespace: "test", 364 Type: JobTypeService, 365 Update: UpdateStrategy{ 366 MaxParallel: 200, 367 Stagger: 10 * time.Second, 368 }, 369 TaskGroups: []*TaskGroup{ 370 { 371 Name: "foo", 372 Count: 2, 373 RestartPolicy: NewRestartPolicy(JobTypeService), 374 ReschedulePolicy: NewReshedulePolicy(JobTypeService), 375 EphemeralDisk: DefaultEphemeralDisk(), 376 Update: &UpdateStrategy{ 377 Stagger: 30 * time.Second, 378 MaxParallel: 2, 379 HealthCheck: UpdateStrategyHealthCheck_Checks, 380 MinHealthyTime: 10 * time.Second, 381 HealthyDeadline: 5 * time.Minute, 382 AutoRevert: false, 383 Canary: 0, 384 }, 385 }, 386 }, 387 }, 388 }, 389 { 390 Name: "Multiple task group; rounding", 391 Warnings: []string{"conversion to new update stanza"}, 392 Job: &Job{ 393 Namespace: "test", 394 Type: JobTypeService, 395 Update: UpdateStrategy{ 396 MaxParallel: 2, 397 Stagger: 10 * time.Second, 398 }, 399 TaskGroups: []*TaskGroup{ 400 { 401 Name: "foo", 402 Count: 2, 403 }, 404 { 405 Name: "bar", 406 Count: 14, 407 }, 408 { 409 Name: "foo", 410 Count: 26, 411 }, 412 }, 413 }, 414 Expected: &Job{ 415 Namespace: "test", 416 Type: JobTypeService, 417 Update: UpdateStrategy{ 418 MaxParallel: 2, 419 Stagger: 10 * time.Second, 420 }, 421 TaskGroups: []*TaskGroup{ 422 { 423 Name: "foo", 424 Count: 2, 425 RestartPolicy: NewRestartPolicy(JobTypeService), 426 ReschedulePolicy: NewReshedulePolicy(JobTypeService), 427 EphemeralDisk: DefaultEphemeralDisk(), 428 Update: &UpdateStrategy{ 429 Stagger: 30 * time.Second, 430 MaxParallel: 1, 431 HealthCheck: UpdateStrategyHealthCheck_Checks, 432 MinHealthyTime: 10 * time.Second, 433 HealthyDeadline: 5 * time.Minute, 434 AutoRevert: false, 435 Canary: 0, 436 }, 437 }, 438 { 439 Name: "bar", 440 Count: 14, 441 RestartPolicy: NewRestartPolicy(JobTypeService), 442 ReschedulePolicy: NewReshedulePolicy(JobTypeService), 443 EphemeralDisk: DefaultEphemeralDisk(), 444 Update: &UpdateStrategy{ 445 Stagger: 30 * time.Second, 446 MaxParallel: 1, 447 HealthCheck: UpdateStrategyHealthCheck_Checks, 448 MinHealthyTime: 10 * time.Second, 449 HealthyDeadline: 5 * time.Minute, 450 AutoRevert: false, 451 Canary: 0, 452 }, 453 }, 454 { 455 Name: "foo", 456 Count: 26, 457 EphemeralDisk: DefaultEphemeralDisk(), 458 RestartPolicy: NewRestartPolicy(JobTypeService), 459 ReschedulePolicy: NewReshedulePolicy(JobTypeService), 460 Update: &UpdateStrategy{ 461 Stagger: 30 * time.Second, 462 MaxParallel: 3, 463 HealthCheck: UpdateStrategyHealthCheck_Checks, 464 MinHealthyTime: 10 * time.Second, 465 HealthyDeadline: 5 * time.Minute, 466 AutoRevert: false, 467 Canary: 0, 468 }, 469 }, 470 }, 471 }, 472 }, 473 } 474 475 for _, c := range cases { 476 t.Run(c.Name, func(t *testing.T) { 477 warnings := c.Job.Canonicalize() 478 if !reflect.DeepEqual(c.Job, c.Expected) { 479 t.Fatalf("Diff %#v", pretty.Diff(c.Job, c.Expected)) 480 } 481 482 wErr := "" 483 if warnings != nil { 484 wErr = warnings.Error() 485 } 486 for _, w := range c.Warnings { 487 if !strings.Contains(wErr, w) { 488 t.Fatalf("Wanted warning %q: got %q", w, wErr) 489 } 490 } 491 492 if len(c.Warnings) == 0 && warnings != nil { 493 t.Fatalf("Wanted no warnings: got %q", wErr) 494 } 495 }) 496 } 497 } 498 499 func TestJob_SpecChanged(t *testing.T) { 500 // Get a base test job 501 base := testJob() 502 503 // Only modify the indexes/mutable state of the job 504 mutatedBase := base.Copy() 505 mutatedBase.Status = "foo" 506 mutatedBase.ModifyIndex = base.ModifyIndex + 100 507 508 // changed contains a spec change that should be detected 509 change := base.Copy() 510 change.Priority = 99 511 512 cases := []struct { 513 Name string 514 Original *Job 515 New *Job 516 Changed bool 517 }{ 518 { 519 Name: "Same job except mutable indexes", 520 Changed: false, 521 Original: base, 522 New: mutatedBase, 523 }, 524 { 525 Name: "Different", 526 Changed: true, 527 Original: base, 528 New: change, 529 }, 530 } 531 532 for _, c := range cases { 533 t.Run(c.Name, func(t *testing.T) { 534 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 535 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 536 } 537 }) 538 } 539 } 540 541 func testJob() *Job { 542 return &Job{ 543 Region: "global", 544 ID: uuid.Generate(), 545 Namespace: "test", 546 Name: "my-job", 547 Type: JobTypeService, 548 Priority: 50, 549 AllAtOnce: false, 550 Datacenters: []string{"dc1"}, 551 Constraints: []*Constraint{ 552 { 553 LTarget: "$attr.kernel.name", 554 RTarget: "linux", 555 Operand: "=", 556 }, 557 }, 558 Periodic: &PeriodicConfig{ 559 Enabled: false, 560 }, 561 TaskGroups: []*TaskGroup{ 562 { 563 Name: "web", 564 Count: 10, 565 EphemeralDisk: DefaultEphemeralDisk(), 566 RestartPolicy: &RestartPolicy{ 567 Mode: RestartPolicyModeFail, 568 Attempts: 3, 569 Interval: 10 * time.Minute, 570 Delay: 1 * time.Minute, 571 }, 572 ReschedulePolicy: &ReschedulePolicy{ 573 Interval: 5 * time.Minute, 574 Attempts: 10, 575 }, 576 Tasks: []*Task{ 577 { 578 Name: "web", 579 Driver: "exec", 580 Config: map[string]interface{}{ 581 "command": "/bin/date", 582 }, 583 Env: map[string]string{ 584 "FOO": "bar", 585 }, 586 Artifacts: []*TaskArtifact{ 587 { 588 GetterSource: "http://foo.com", 589 }, 590 }, 591 Services: []*Service{ 592 { 593 Name: "${TASK}-frontend", 594 PortLabel: "http", 595 }, 596 }, 597 Resources: &Resources{ 598 CPU: 500, 599 MemoryMB: 256, 600 Networks: []*NetworkResource{ 601 { 602 MBits: 50, 603 DynamicPorts: []Port{{Label: "http"}}, 604 }, 605 }, 606 }, 607 LogConfig: &LogConfig{ 608 MaxFiles: 10, 609 MaxFileSizeMB: 1, 610 }, 611 }, 612 }, 613 Meta: map[string]string{ 614 "elb_check_type": "http", 615 "elb_check_interval": "30s", 616 "elb_check_min": "3", 617 }, 618 }, 619 }, 620 Meta: map[string]string{ 621 "owner": "armon", 622 }, 623 } 624 } 625 626 func TestJob_Copy(t *testing.T) { 627 j := testJob() 628 c := j.Copy() 629 if !reflect.DeepEqual(j, c) { 630 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 631 } 632 } 633 634 func TestJob_IsPeriodic(t *testing.T) { 635 j := &Job{ 636 Type: JobTypeService, 637 Periodic: &PeriodicConfig{ 638 Enabled: true, 639 }, 640 } 641 if !j.IsPeriodic() { 642 t.Fatalf("IsPeriodic() returned false on periodic job") 643 } 644 645 j = &Job{ 646 Type: JobTypeService, 647 } 648 if j.IsPeriodic() { 649 t.Fatalf("IsPeriodic() returned true on non-periodic job") 650 } 651 } 652 653 func TestJob_IsPeriodicActive(t *testing.T) { 654 cases := []struct { 655 job *Job 656 active bool 657 }{ 658 { 659 job: &Job{ 660 Type: JobTypeService, 661 Periodic: &PeriodicConfig{ 662 Enabled: true, 663 }, 664 }, 665 active: true, 666 }, 667 { 668 job: &Job{ 669 Type: JobTypeService, 670 Periodic: &PeriodicConfig{ 671 Enabled: false, 672 }, 673 }, 674 active: false, 675 }, 676 { 677 job: &Job{ 678 Type: JobTypeService, 679 Periodic: &PeriodicConfig{ 680 Enabled: true, 681 }, 682 Stop: true, 683 }, 684 active: false, 685 }, 686 { 687 job: &Job{ 688 Type: JobTypeService, 689 Periodic: &PeriodicConfig{ 690 Enabled: false, 691 }, 692 ParameterizedJob: &ParameterizedJobConfig{}, 693 }, 694 active: false, 695 }, 696 } 697 698 for i, c := range cases { 699 if act := c.job.IsPeriodicActive(); act != c.active { 700 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 701 } 702 } 703 } 704 705 func TestJob_SystemJob_Validate(t *testing.T) { 706 j := testJob() 707 j.Type = JobTypeSystem 708 j.Canonicalize() 709 710 err := j.Validate() 711 if err == nil || !strings.Contains(err.Error(), "exceed") { 712 t.Fatalf("expect error due to count") 713 } 714 715 j.TaskGroups[0].Count = 0 716 if err := j.Validate(); err != nil { 717 t.Fatalf("unexpected err: %v", err) 718 } 719 720 j.TaskGroups[0].Count = 1 721 if err := j.Validate(); err != nil { 722 t.Fatalf("unexpected err: %v", err) 723 } 724 } 725 726 func TestJob_VaultPolicies(t *testing.T) { 727 j0 := &Job{} 728 e0 := make(map[string]map[string]*Vault, 0) 729 730 vj1 := &Vault{ 731 Policies: []string{ 732 "p1", 733 "p2", 734 }, 735 } 736 vj2 := &Vault{ 737 Policies: []string{ 738 "p3", 739 "p4", 740 }, 741 } 742 vj3 := &Vault{ 743 Policies: []string{ 744 "p5", 745 }, 746 } 747 j1 := &Job{ 748 TaskGroups: []*TaskGroup{ 749 { 750 Name: "foo", 751 Tasks: []*Task{ 752 { 753 Name: "t1", 754 }, 755 { 756 Name: "t2", 757 Vault: vj1, 758 }, 759 }, 760 }, 761 { 762 Name: "bar", 763 Tasks: []*Task{ 764 { 765 Name: "t3", 766 Vault: vj2, 767 }, 768 { 769 Name: "t4", 770 Vault: vj3, 771 }, 772 }, 773 }, 774 }, 775 } 776 777 e1 := map[string]map[string]*Vault{ 778 "foo": { 779 "t2": vj1, 780 }, 781 "bar": { 782 "t3": vj2, 783 "t4": vj3, 784 }, 785 } 786 787 cases := []struct { 788 Job *Job 789 Expected map[string]map[string]*Vault 790 }{ 791 { 792 Job: j0, 793 Expected: e0, 794 }, 795 { 796 Job: j1, 797 Expected: e1, 798 }, 799 } 800 801 for i, c := range cases { 802 got := c.Job.VaultPolicies() 803 if !reflect.DeepEqual(got, c.Expected) { 804 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 805 } 806 } 807 } 808 809 func TestJob_RequiredSignals(t *testing.T) { 810 j0 := &Job{} 811 e0 := make(map[string]map[string][]string, 0) 812 813 vj1 := &Vault{ 814 Policies: []string{"p1"}, 815 ChangeMode: VaultChangeModeNoop, 816 } 817 vj2 := &Vault{ 818 Policies: []string{"p1"}, 819 ChangeMode: VaultChangeModeSignal, 820 ChangeSignal: "SIGUSR1", 821 } 822 tj1 := &Template{ 823 SourcePath: "foo", 824 DestPath: "bar", 825 ChangeMode: TemplateChangeModeNoop, 826 } 827 tj2 := &Template{ 828 SourcePath: "foo", 829 DestPath: "bar", 830 ChangeMode: TemplateChangeModeSignal, 831 ChangeSignal: "SIGUSR2", 832 } 833 j1 := &Job{ 834 TaskGroups: []*TaskGroup{ 835 { 836 Name: "foo", 837 Tasks: []*Task{ 838 { 839 Name: "t1", 840 }, 841 { 842 Name: "t2", 843 Vault: vj2, 844 Templates: []*Template{tj2}, 845 }, 846 }, 847 }, 848 { 849 Name: "bar", 850 Tasks: []*Task{ 851 { 852 Name: "t3", 853 Vault: vj1, 854 Templates: []*Template{tj1}, 855 }, 856 { 857 Name: "t4", 858 Vault: vj2, 859 }, 860 }, 861 }, 862 }, 863 } 864 865 e1 := map[string]map[string][]string{ 866 "foo": { 867 "t2": {"SIGUSR1", "SIGUSR2"}, 868 }, 869 "bar": { 870 "t4": {"SIGUSR1"}, 871 }, 872 } 873 874 j2 := &Job{ 875 TaskGroups: []*TaskGroup{ 876 { 877 Name: "foo", 878 Tasks: []*Task{ 879 { 880 Name: "t1", 881 KillSignal: "SIGQUIT", 882 }, 883 }, 884 }, 885 }, 886 } 887 888 e2 := map[string]map[string][]string{ 889 "foo": { 890 "t1": {"SIGQUIT"}, 891 }, 892 } 893 894 cases := []struct { 895 Job *Job 896 Expected map[string]map[string][]string 897 }{ 898 { 899 Job: j0, 900 Expected: e0, 901 }, 902 { 903 Job: j1, 904 Expected: e1, 905 }, 906 { 907 Job: j2, 908 Expected: e2, 909 }, 910 } 911 912 for i, c := range cases { 913 got := c.Job.RequiredSignals() 914 if !reflect.DeepEqual(got, c.Expected) { 915 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 916 } 917 } 918 } 919 920 func TestTaskGroup_Validate(t *testing.T) { 921 j := testJob() 922 tg := &TaskGroup{ 923 Count: -1, 924 RestartPolicy: &RestartPolicy{ 925 Interval: 5 * time.Minute, 926 Delay: 10 * time.Second, 927 Attempts: 10, 928 Mode: RestartPolicyModeDelay, 929 }, 930 ReschedulePolicy: &ReschedulePolicy{ 931 Interval: 5 * time.Minute, 932 Attempts: 5, 933 }, 934 } 935 err := tg.Validate(j) 936 mErr := err.(*multierror.Error) 937 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 938 t.Fatalf("err: %s", err) 939 } 940 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 941 t.Fatalf("err: %s", err) 942 } 943 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 944 t.Fatalf("err: %s", err) 945 } 946 947 tg = &TaskGroup{ 948 Tasks: []*Task{ 949 { 950 Name: "task-a", 951 Resources: &Resources{ 952 Networks: []*NetworkResource{ 953 { 954 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 955 }, 956 }, 957 }, 958 }, 959 { 960 Name: "task-b", 961 Resources: &Resources{ 962 Networks: []*NetworkResource{ 963 { 964 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 965 }, 966 }, 967 }, 968 }, 969 }, 970 } 971 err = tg.Validate(&Job{}) 972 expected := `Static port 123 already reserved by task-a:foo` 973 if !strings.Contains(err.Error(), expected) { 974 t.Errorf("expected %s but found: %v", expected, err) 975 } 976 977 tg = &TaskGroup{ 978 Tasks: []*Task{ 979 { 980 Name: "task-a", 981 Resources: &Resources{ 982 Networks: []*NetworkResource{ 983 { 984 ReservedPorts: []Port{ 985 {Label: "foo", Value: 123}, 986 {Label: "bar", Value: 123}, 987 }, 988 }, 989 }, 990 }, 991 }, 992 }, 993 } 994 err = tg.Validate(&Job{}) 995 expected = `Static port 123 already reserved by task-a:foo` 996 if !strings.Contains(err.Error(), expected) { 997 t.Errorf("expected %s but found: %v", expected, err) 998 } 999 1000 tg = &TaskGroup{ 1001 Name: "web", 1002 Count: 1, 1003 Tasks: []*Task{ 1004 {Name: "web", Leader: true}, 1005 {Name: "web", Leader: true}, 1006 {}, 1007 }, 1008 RestartPolicy: &RestartPolicy{ 1009 Interval: 5 * time.Minute, 1010 Delay: 10 * time.Second, 1011 Attempts: 10, 1012 Mode: RestartPolicyModeDelay, 1013 }, 1014 ReschedulePolicy: &ReschedulePolicy{ 1015 Interval: 5 * time.Minute, 1016 Attempts: 10, 1017 }, 1018 } 1019 1020 err = tg.Validate(j) 1021 mErr = err.(*multierror.Error) 1022 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 1023 t.Fatalf("err: %s", err) 1024 } 1025 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 1026 t.Fatalf("err: %s", err) 1027 } 1028 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 1029 t.Fatalf("err: %s", err) 1030 } 1031 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 1032 t.Fatalf("err: %s", err) 1033 } 1034 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 1035 t.Fatalf("err: %s", err) 1036 } 1037 1038 // COMPAT: Enable in 0.7.0 1039 //j.Type = JobTypeBatch 1040 //err = tg.Validate(j) 1041 //if !strings.Contains(err.Error(), "does not allow update block") { 1042 //t.Fatalf("err: %s", err) 1043 //} 1044 } 1045 1046 func TestTask_Validate(t *testing.T) { 1047 task := &Task{} 1048 ephemeralDisk := DefaultEphemeralDisk() 1049 err := task.Validate(ephemeralDisk) 1050 mErr := err.(*multierror.Error) 1051 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 1052 t.Fatalf("err: %s", err) 1053 } 1054 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 1055 t.Fatalf("err: %s", err) 1056 } 1057 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 1058 t.Fatalf("err: %s", err) 1059 } 1060 1061 task = &Task{Name: "web/foo"} 1062 err = task.Validate(ephemeralDisk) 1063 mErr = err.(*multierror.Error) 1064 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 1065 t.Fatalf("err: %s", err) 1066 } 1067 1068 task = &Task{ 1069 Name: "web", 1070 Driver: "docker", 1071 Resources: &Resources{ 1072 CPU: 100, 1073 MemoryMB: 100, 1074 IOPS: 10, 1075 }, 1076 LogConfig: DefaultLogConfig(), 1077 } 1078 ephemeralDisk.SizeMB = 200 1079 err = task.Validate(ephemeralDisk) 1080 if err != nil { 1081 t.Fatalf("err: %s", err) 1082 } 1083 1084 task.Constraints = append(task.Constraints, 1085 &Constraint{ 1086 Operand: ConstraintDistinctHosts, 1087 }, 1088 &Constraint{ 1089 Operand: ConstraintDistinctProperty, 1090 LTarget: "${meta.rack}", 1091 }) 1092 1093 err = task.Validate(ephemeralDisk) 1094 mErr = err.(*multierror.Error) 1095 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 1096 t.Fatalf("err: %s", err) 1097 } 1098 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 1099 t.Fatalf("err: %s", err) 1100 } 1101 } 1102 1103 func TestTask_Validate_Services(t *testing.T) { 1104 s1 := &Service{ 1105 Name: "service-name", 1106 PortLabel: "bar", 1107 Checks: []*ServiceCheck{ 1108 { 1109 Name: "check-name", 1110 Type: ServiceCheckTCP, 1111 Interval: 0 * time.Second, 1112 }, 1113 { 1114 Name: "check-name", 1115 Type: ServiceCheckTCP, 1116 Timeout: 2 * time.Second, 1117 }, 1118 { 1119 Name: "check-name", 1120 Type: ServiceCheckTCP, 1121 Interval: 1 * time.Second, 1122 }, 1123 }, 1124 } 1125 1126 s2 := &Service{ 1127 Name: "service-name", 1128 PortLabel: "bar", 1129 } 1130 1131 s3 := &Service{ 1132 Name: "service-A", 1133 PortLabel: "a", 1134 } 1135 s4 := &Service{ 1136 Name: "service-A", 1137 PortLabel: "b", 1138 } 1139 1140 ephemeralDisk := DefaultEphemeralDisk() 1141 ephemeralDisk.SizeMB = 200 1142 task := &Task{ 1143 Name: "web", 1144 Driver: "docker", 1145 Resources: &Resources{ 1146 CPU: 100, 1147 MemoryMB: 100, 1148 IOPS: 10, 1149 }, 1150 Services: []*Service{s1, s2}, 1151 } 1152 1153 task1 := &Task{ 1154 Name: "web", 1155 Driver: "docker", 1156 Resources: DefaultResources(), 1157 Services: []*Service{s3, s4}, 1158 LogConfig: DefaultLogConfig(), 1159 } 1160 task1.Resources.Networks = []*NetworkResource{ 1161 { 1162 MBits: 10, 1163 DynamicPorts: []Port{ 1164 { 1165 Label: "a", 1166 Value: 1000, 1167 }, 1168 { 1169 Label: "b", 1170 Value: 2000, 1171 }, 1172 }, 1173 }, 1174 } 1175 1176 err := task.Validate(ephemeralDisk) 1177 if err == nil { 1178 t.Fatal("expected an error") 1179 } 1180 1181 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 1182 t.Fatalf("err: %v", err) 1183 } 1184 1185 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 1186 t.Fatalf("err: %v", err) 1187 } 1188 1189 if !strings.Contains(err.Error(), "missing required value interval") { 1190 t.Fatalf("err: %v", err) 1191 } 1192 1193 if !strings.Contains(err.Error(), "cannot be less than") { 1194 t.Fatalf("err: %v", err) 1195 } 1196 1197 if err = task1.Validate(ephemeralDisk); err != nil { 1198 t.Fatalf("err : %v", err) 1199 } 1200 } 1201 1202 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 1203 ephemeralDisk := DefaultEphemeralDisk() 1204 getTask := func(s *Service) *Task { 1205 task := &Task{ 1206 Name: "web", 1207 Driver: "docker", 1208 Resources: DefaultResources(), 1209 Services: []*Service{s}, 1210 LogConfig: DefaultLogConfig(), 1211 } 1212 task.Resources.Networks = []*NetworkResource{ 1213 { 1214 MBits: 10, 1215 DynamicPorts: []Port{ 1216 { 1217 Label: "http", 1218 Value: 80, 1219 }, 1220 }, 1221 }, 1222 } 1223 return task 1224 } 1225 1226 cases := []*Service{ 1227 { 1228 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1229 Name: "DriverModeWithLabel", 1230 PortLabel: "http", 1231 AddressMode: AddressModeDriver, 1232 }, 1233 { 1234 Name: "DriverModeWithPort", 1235 PortLabel: "80", 1236 AddressMode: AddressModeDriver, 1237 }, 1238 { 1239 Name: "HostModeWithLabel", 1240 PortLabel: "http", 1241 AddressMode: AddressModeHost, 1242 }, 1243 { 1244 Name: "HostModeWithoutLabel", 1245 AddressMode: AddressModeHost, 1246 }, 1247 { 1248 Name: "DriverModeWithoutLabel", 1249 AddressMode: AddressModeDriver, 1250 }, 1251 } 1252 1253 for _, service := range cases { 1254 task := getTask(service) 1255 t.Run(service.Name, func(t *testing.T) { 1256 if err := task.Validate(ephemeralDisk); err != nil { 1257 t.Fatalf("unexpected err: %v", err) 1258 } 1259 }) 1260 } 1261 } 1262 1263 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 1264 ephemeralDisk := DefaultEphemeralDisk() 1265 getTask := func(s *Service) *Task { 1266 task := &Task{ 1267 Name: "web", 1268 Driver: "docker", 1269 Resources: DefaultResources(), 1270 Services: []*Service{s}, 1271 LogConfig: DefaultLogConfig(), 1272 } 1273 task.Resources.Networks = []*NetworkResource{ 1274 { 1275 MBits: 10, 1276 DynamicPorts: []Port{ 1277 { 1278 Label: "http", 1279 Value: 80, 1280 }, 1281 }, 1282 }, 1283 } 1284 return task 1285 } 1286 1287 cases := []*Service{ 1288 { 1289 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 1290 Name: "DriverModeWithLabel", 1291 PortLabel: "asdf", 1292 AddressMode: AddressModeDriver, 1293 }, 1294 { 1295 Name: "HostModeWithLabel", 1296 PortLabel: "asdf", 1297 AddressMode: AddressModeHost, 1298 }, 1299 { 1300 Name: "HostModeWithPort", 1301 PortLabel: "80", 1302 AddressMode: AddressModeHost, 1303 }, 1304 } 1305 1306 for _, service := range cases { 1307 task := getTask(service) 1308 t.Run(service.Name, func(t *testing.T) { 1309 err := task.Validate(ephemeralDisk) 1310 if err == nil { 1311 t.Fatalf("expected an error") 1312 } 1313 //t.Logf("err: %v", err) 1314 }) 1315 } 1316 } 1317 1318 func TestTask_Validate_Service_Check(t *testing.T) { 1319 1320 invalidCheck := ServiceCheck{ 1321 Name: "check-name", 1322 Command: "/bin/true", 1323 Type: ServiceCheckScript, 1324 Interval: 10 * time.Second, 1325 } 1326 1327 err := invalidCheck.validate() 1328 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1329 t.Fatalf("expected a timeout validation error but received: %q", err) 1330 } 1331 1332 check1 := ServiceCheck{ 1333 Name: "check-name", 1334 Type: ServiceCheckTCP, 1335 Interval: 10 * time.Second, 1336 Timeout: 2 * time.Second, 1337 } 1338 1339 if err := check1.validate(); err != nil { 1340 t.Fatalf("err: %v", err) 1341 } 1342 1343 check1.InitialStatus = "foo" 1344 err = check1.validate() 1345 if err == nil { 1346 t.Fatal("Expected an error") 1347 } 1348 1349 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1350 t.Fatalf("err: %v", err) 1351 } 1352 1353 check1.InitialStatus = api.HealthCritical 1354 err = check1.validate() 1355 if err != nil { 1356 t.Fatalf("err: %v", err) 1357 } 1358 1359 check1.InitialStatus = api.HealthPassing 1360 err = check1.validate() 1361 if err != nil { 1362 t.Fatalf("err: %v", err) 1363 } 1364 1365 check1.InitialStatus = "" 1366 err = check1.validate() 1367 if err != nil { 1368 t.Fatalf("err: %v", err) 1369 } 1370 1371 check2 := ServiceCheck{ 1372 Name: "check-name-2", 1373 Type: ServiceCheckHTTP, 1374 Interval: 10 * time.Second, 1375 Timeout: 2 * time.Second, 1376 Path: "/foo/bar", 1377 } 1378 1379 err = check2.validate() 1380 if err != nil { 1381 t.Fatalf("err: %v", err) 1382 } 1383 1384 check2.Path = "" 1385 err = check2.validate() 1386 if err == nil { 1387 t.Fatal("Expected an error") 1388 } 1389 if !strings.Contains(err.Error(), "valid http path") { 1390 t.Fatalf("err: %v", err) 1391 } 1392 1393 check2.Path = "http://www.example.com" 1394 err = check2.validate() 1395 if err == nil { 1396 t.Fatal("Expected an error") 1397 } 1398 if !strings.Contains(err.Error(), "relative http path") { 1399 t.Fatalf("err: %v", err) 1400 } 1401 } 1402 1403 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1404 // inherit address mode but do inherit ports. 1405 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1406 getTask := func(s *Service) *Task { 1407 return &Task{ 1408 Resources: &Resources{ 1409 Networks: []*NetworkResource{ 1410 { 1411 DynamicPorts: []Port{ 1412 { 1413 Label: "http", 1414 Value: 9999, 1415 }, 1416 }, 1417 }, 1418 }, 1419 }, 1420 Services: []*Service{s}, 1421 } 1422 } 1423 1424 cases := []struct { 1425 Service *Service 1426 ErrContains string 1427 }{ 1428 { 1429 Service: &Service{ 1430 Name: "invalid-driver", 1431 PortLabel: "80", 1432 AddressMode: "host", 1433 }, 1434 ErrContains: `port label "80" referenced`, 1435 }, 1436 { 1437 Service: &Service{ 1438 Name: "http-driver-fail-1", 1439 PortLabel: "80", 1440 AddressMode: "driver", 1441 Checks: []*ServiceCheck{ 1442 { 1443 Name: "invalid-check-1", 1444 Type: "tcp", 1445 Interval: time.Second, 1446 Timeout: time.Second, 1447 }, 1448 }, 1449 }, 1450 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1451 }, 1452 { 1453 Service: &Service{ 1454 Name: "http-driver-fail-2", 1455 PortLabel: "80", 1456 AddressMode: "driver", 1457 Checks: []*ServiceCheck{ 1458 { 1459 Name: "invalid-check-2", 1460 Type: "tcp", 1461 PortLabel: "80", 1462 Interval: time.Second, 1463 Timeout: time.Second, 1464 }, 1465 }, 1466 }, 1467 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1468 }, 1469 { 1470 Service: &Service{ 1471 Name: "http-driver-fail-3", 1472 PortLabel: "80", 1473 AddressMode: "driver", 1474 Checks: []*ServiceCheck{ 1475 { 1476 Name: "invalid-check-3", 1477 Type: "tcp", 1478 PortLabel: "missing-port-label", 1479 Interval: time.Second, 1480 Timeout: time.Second, 1481 }, 1482 }, 1483 }, 1484 ErrContains: `port label "missing-port-label" referenced`, 1485 }, 1486 { 1487 Service: &Service{ 1488 Name: "http-driver-passes", 1489 PortLabel: "80", 1490 AddressMode: "driver", 1491 Checks: []*ServiceCheck{ 1492 { 1493 Name: "valid-script-check", 1494 Type: "script", 1495 Command: "ok", 1496 Interval: time.Second, 1497 Timeout: time.Second, 1498 }, 1499 { 1500 Name: "valid-host-check", 1501 Type: "tcp", 1502 PortLabel: "http", 1503 Interval: time.Second, 1504 Timeout: time.Second, 1505 }, 1506 { 1507 Name: "valid-driver-check", 1508 Type: "tcp", 1509 AddressMode: "driver", 1510 Interval: time.Second, 1511 Timeout: time.Second, 1512 }, 1513 }, 1514 }, 1515 }, 1516 { 1517 Service: &Service{ 1518 Name: "empty-address-3673-passes-1", 1519 Checks: []*ServiceCheck{ 1520 { 1521 Name: "valid-port-label", 1522 Type: "tcp", 1523 PortLabel: "http", 1524 Interval: time.Second, 1525 Timeout: time.Second, 1526 }, 1527 { 1528 Name: "empty-is-ok", 1529 Type: "script", 1530 Command: "ok", 1531 Interval: time.Second, 1532 Timeout: time.Second, 1533 }, 1534 }, 1535 }, 1536 }, 1537 { 1538 Service: &Service{ 1539 Name: "empty-address-3673-passes-2", 1540 }, 1541 }, 1542 { 1543 Service: &Service{ 1544 Name: "empty-address-3673-fails", 1545 Checks: []*ServiceCheck{ 1546 { 1547 Name: "empty-is-not-ok", 1548 Type: "tcp", 1549 Interval: time.Second, 1550 Timeout: time.Second, 1551 }, 1552 }, 1553 }, 1554 ErrContains: `invalid: check requires a port but neither check nor service`, 1555 }, 1556 } 1557 1558 for _, tc := range cases { 1559 tc := tc 1560 task := getTask(tc.Service) 1561 t.Run(tc.Service.Name, func(t *testing.T) { 1562 err := validateServices(task) 1563 if err == nil && tc.ErrContains == "" { 1564 // Ok! 1565 return 1566 } 1567 if err == nil { 1568 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1569 } 1570 if !strings.Contains(err.Error(), tc.ErrContains) { 1571 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1572 } 1573 }) 1574 } 1575 } 1576 1577 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1578 invalidCheckRestart := &CheckRestart{ 1579 Limit: -1, 1580 Grace: -1, 1581 } 1582 1583 err := invalidCheckRestart.Validate() 1584 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1585 assert.Len(t, err.(*multierror.Error).Errors, 2) 1586 1587 validCheckRestart := &CheckRestart{} 1588 assert.Nil(t, validCheckRestart.Validate()) 1589 1590 validCheckRestart.Limit = 1 1591 validCheckRestart.Grace = 1 1592 assert.Nil(t, validCheckRestart.Validate()) 1593 } 1594 1595 func TestTask_Validate_LogConfig(t *testing.T) { 1596 task := &Task{ 1597 LogConfig: DefaultLogConfig(), 1598 } 1599 ephemeralDisk := &EphemeralDisk{ 1600 SizeMB: 1, 1601 } 1602 1603 err := task.Validate(ephemeralDisk) 1604 mErr := err.(*multierror.Error) 1605 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1606 t.Fatalf("err: %s", err) 1607 } 1608 } 1609 1610 func TestTask_Validate_Template(t *testing.T) { 1611 1612 bad := &Template{} 1613 task := &Task{ 1614 Templates: []*Template{bad}, 1615 } 1616 ephemeralDisk := &EphemeralDisk{ 1617 SizeMB: 1, 1618 } 1619 1620 err := task.Validate(ephemeralDisk) 1621 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1622 t.Fatalf("err: %s", err) 1623 } 1624 1625 // Have two templates that share the same destination 1626 good := &Template{ 1627 SourcePath: "foo", 1628 DestPath: "local/foo", 1629 ChangeMode: "noop", 1630 } 1631 1632 task.Templates = []*Template{good, good} 1633 err = task.Validate(ephemeralDisk) 1634 if !strings.Contains(err.Error(), "same destination as") { 1635 t.Fatalf("err: %s", err) 1636 } 1637 1638 // Env templates can't use signals 1639 task.Templates = []*Template{ 1640 { 1641 Envvars: true, 1642 ChangeMode: "signal", 1643 }, 1644 } 1645 1646 err = task.Validate(ephemeralDisk) 1647 if err == nil { 1648 t.Fatalf("expected error from Template.Validate") 1649 } 1650 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1651 t.Errorf("expected to find %q but found %v", expected, err) 1652 } 1653 } 1654 1655 func TestTemplate_Validate(t *testing.T) { 1656 cases := []struct { 1657 Tmpl *Template 1658 Fail bool 1659 ContainsErrs []string 1660 }{ 1661 { 1662 Tmpl: &Template{}, 1663 Fail: true, 1664 ContainsErrs: []string{ 1665 "specify a source path", 1666 "specify a destination", 1667 TemplateChangeModeInvalidError.Error(), 1668 }, 1669 }, 1670 { 1671 Tmpl: &Template{ 1672 Splay: -100, 1673 }, 1674 Fail: true, 1675 ContainsErrs: []string{ 1676 "positive splay", 1677 }, 1678 }, 1679 { 1680 Tmpl: &Template{ 1681 ChangeMode: "foo", 1682 }, 1683 Fail: true, 1684 ContainsErrs: []string{ 1685 TemplateChangeModeInvalidError.Error(), 1686 }, 1687 }, 1688 { 1689 Tmpl: &Template{ 1690 ChangeMode: "signal", 1691 }, 1692 Fail: true, 1693 ContainsErrs: []string{ 1694 "specify signal value", 1695 }, 1696 }, 1697 { 1698 Tmpl: &Template{ 1699 SourcePath: "foo", 1700 DestPath: "../../root", 1701 ChangeMode: "noop", 1702 }, 1703 Fail: true, 1704 ContainsErrs: []string{ 1705 "destination escapes", 1706 }, 1707 }, 1708 { 1709 Tmpl: &Template{ 1710 SourcePath: "foo", 1711 DestPath: "local/foo", 1712 ChangeMode: "noop", 1713 }, 1714 Fail: false, 1715 }, 1716 { 1717 Tmpl: &Template{ 1718 SourcePath: "foo", 1719 DestPath: "local/foo", 1720 ChangeMode: "noop", 1721 Perms: "0444", 1722 }, 1723 Fail: false, 1724 }, 1725 { 1726 Tmpl: &Template{ 1727 SourcePath: "foo", 1728 DestPath: "local/foo", 1729 ChangeMode: "noop", 1730 Perms: "zza", 1731 }, 1732 Fail: true, 1733 ContainsErrs: []string{ 1734 "as octal", 1735 }, 1736 }, 1737 } 1738 1739 for i, c := range cases { 1740 err := c.Tmpl.Validate() 1741 if err != nil { 1742 if !c.Fail { 1743 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 1744 } 1745 1746 e := err.Error() 1747 for _, exp := range c.ContainsErrs { 1748 if !strings.Contains(e, exp) { 1749 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 1750 } 1751 } 1752 } else if c.Fail { 1753 t.Fatalf("Case %d: should have failed: %v", i+1, err) 1754 } 1755 } 1756 } 1757 1758 func TestConstraint_Validate(t *testing.T) { 1759 c := &Constraint{} 1760 err := c.Validate() 1761 mErr := err.(*multierror.Error) 1762 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 1763 t.Fatalf("err: %s", err) 1764 } 1765 1766 c = &Constraint{ 1767 LTarget: "$attr.kernel.name", 1768 RTarget: "linux", 1769 Operand: "=", 1770 } 1771 err = c.Validate() 1772 if err != nil { 1773 t.Fatalf("err: %v", err) 1774 } 1775 1776 // Perform additional regexp validation 1777 c.Operand = ConstraintRegex 1778 c.RTarget = "(foo" 1779 err = c.Validate() 1780 mErr = err.(*multierror.Error) 1781 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 1782 t.Fatalf("err: %s", err) 1783 } 1784 1785 // Perform version validation 1786 c.Operand = ConstraintVersion 1787 c.RTarget = "~> foo" 1788 err = c.Validate() 1789 mErr = err.(*multierror.Error) 1790 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 1791 t.Fatalf("err: %s", err) 1792 } 1793 1794 // Perform distinct_property validation 1795 c.Operand = ConstraintDistinctProperty 1796 c.RTarget = "0" 1797 err = c.Validate() 1798 mErr = err.(*multierror.Error) 1799 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 1800 t.Fatalf("err: %s", err) 1801 } 1802 1803 c.RTarget = "-1" 1804 err = c.Validate() 1805 mErr = err.(*multierror.Error) 1806 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 1807 t.Fatalf("err: %s", err) 1808 } 1809 1810 // Perform distinct_hosts validation 1811 c.Operand = ConstraintDistinctHosts 1812 c.LTarget = "" 1813 c.RTarget = "" 1814 if err := c.Validate(); err != nil { 1815 t.Fatalf("expected valid constraint: %v", err) 1816 } 1817 1818 // Perform set_contains validation 1819 c.Operand = ConstraintSetContains 1820 c.RTarget = "" 1821 err = c.Validate() 1822 mErr = err.(*multierror.Error) 1823 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 1824 t.Fatalf("err: %s", err) 1825 } 1826 1827 // Perform LTarget validation 1828 c.Operand = ConstraintRegex 1829 c.RTarget = "foo" 1830 c.LTarget = "" 1831 err = c.Validate() 1832 mErr = err.(*multierror.Error) 1833 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 1834 t.Fatalf("err: %s", err) 1835 } 1836 1837 // Perform constraint type validation 1838 c.Operand = "foo" 1839 err = c.Validate() 1840 mErr = err.(*multierror.Error) 1841 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 1842 t.Fatalf("err: %s", err) 1843 } 1844 } 1845 1846 func TestUpdateStrategy_Validate(t *testing.T) { 1847 u := &UpdateStrategy{ 1848 MaxParallel: 0, 1849 HealthCheck: "foo", 1850 MinHealthyTime: -10, 1851 HealthyDeadline: -15, 1852 AutoRevert: false, 1853 Canary: -1, 1854 } 1855 1856 err := u.Validate() 1857 mErr := err.(*multierror.Error) 1858 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 1859 t.Fatalf("err: %s", err) 1860 } 1861 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") { 1862 t.Fatalf("err: %s", err) 1863 } 1864 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 1865 t.Fatalf("err: %s", err) 1866 } 1867 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 1868 t.Fatalf("err: %s", err) 1869 } 1870 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 1871 t.Fatalf("err: %s", err) 1872 } 1873 if !strings.Contains(mErr.Errors[5].Error(), "Minimum healthy time must be less than healthy deadline") { 1874 t.Fatalf("err: %s", err) 1875 } 1876 } 1877 1878 func TestResource_NetIndex(t *testing.T) { 1879 r := &Resources{ 1880 Networks: []*NetworkResource{ 1881 {Device: "eth0"}, 1882 {Device: "lo0"}, 1883 {Device: ""}, 1884 }, 1885 } 1886 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 1887 t.Fatalf("Bad: %d", idx) 1888 } 1889 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 1890 t.Fatalf("Bad: %d", idx) 1891 } 1892 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 1893 t.Fatalf("Bad: %d", idx) 1894 } 1895 } 1896 1897 func TestResource_Superset(t *testing.T) { 1898 r1 := &Resources{ 1899 CPU: 2000, 1900 MemoryMB: 2048, 1901 DiskMB: 10000, 1902 IOPS: 100, 1903 } 1904 r2 := &Resources{ 1905 CPU: 2000, 1906 MemoryMB: 1024, 1907 DiskMB: 5000, 1908 IOPS: 50, 1909 } 1910 1911 if s, _ := r1.Superset(r1); !s { 1912 t.Fatalf("bad") 1913 } 1914 if s, _ := r1.Superset(r2); !s { 1915 t.Fatalf("bad") 1916 } 1917 if s, _ := r2.Superset(r1); s { 1918 t.Fatalf("bad") 1919 } 1920 if s, _ := r2.Superset(r2); !s { 1921 t.Fatalf("bad") 1922 } 1923 } 1924 1925 func TestResource_Add(t *testing.T) { 1926 r1 := &Resources{ 1927 CPU: 2000, 1928 MemoryMB: 2048, 1929 DiskMB: 10000, 1930 IOPS: 100, 1931 Networks: []*NetworkResource{ 1932 { 1933 CIDR: "10.0.0.0/8", 1934 MBits: 100, 1935 ReservedPorts: []Port{{"ssh", 22}}, 1936 }, 1937 }, 1938 } 1939 r2 := &Resources{ 1940 CPU: 2000, 1941 MemoryMB: 1024, 1942 DiskMB: 5000, 1943 IOPS: 50, 1944 Networks: []*NetworkResource{ 1945 { 1946 IP: "10.0.0.1", 1947 MBits: 50, 1948 ReservedPorts: []Port{{"web", 80}}, 1949 }, 1950 }, 1951 } 1952 1953 err := r1.Add(r2) 1954 if err != nil { 1955 t.Fatalf("Err: %v", err) 1956 } 1957 1958 expect := &Resources{ 1959 CPU: 3000, 1960 MemoryMB: 3072, 1961 DiskMB: 15000, 1962 IOPS: 150, 1963 Networks: []*NetworkResource{ 1964 { 1965 CIDR: "10.0.0.0/8", 1966 MBits: 150, 1967 ReservedPorts: []Port{{"ssh", 22}, {"web", 80}}, 1968 }, 1969 }, 1970 } 1971 1972 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1973 t.Fatalf("bad: %#v %#v", expect, r1) 1974 } 1975 } 1976 1977 func TestResource_Add_Network(t *testing.T) { 1978 r1 := &Resources{} 1979 r2 := &Resources{ 1980 Networks: []*NetworkResource{ 1981 { 1982 MBits: 50, 1983 DynamicPorts: []Port{{"http", 0}, {"https", 0}}, 1984 }, 1985 }, 1986 } 1987 r3 := &Resources{ 1988 Networks: []*NetworkResource{ 1989 { 1990 MBits: 25, 1991 DynamicPorts: []Port{{"admin", 0}}, 1992 }, 1993 }, 1994 } 1995 1996 err := r1.Add(r2) 1997 if err != nil { 1998 t.Fatalf("Err: %v", err) 1999 } 2000 err = r1.Add(r3) 2001 if err != nil { 2002 t.Fatalf("Err: %v", err) 2003 } 2004 2005 expect := &Resources{ 2006 Networks: []*NetworkResource{ 2007 { 2008 MBits: 75, 2009 DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}}, 2010 }, 2011 }, 2012 } 2013 2014 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 2015 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 2016 } 2017 } 2018 2019 func TestEncodeDecode(t *testing.T) { 2020 type FooRequest struct { 2021 Foo string 2022 Bar int 2023 Baz bool 2024 } 2025 arg := &FooRequest{ 2026 Foo: "test", 2027 Bar: 42, 2028 Baz: true, 2029 } 2030 buf, err := Encode(1, arg) 2031 if err != nil { 2032 t.Fatalf("err: %v", err) 2033 } 2034 2035 var out FooRequest 2036 err = Decode(buf[1:], &out) 2037 if err != nil { 2038 t.Fatalf("err: %v", err) 2039 } 2040 2041 if !reflect.DeepEqual(arg, &out) { 2042 t.Fatalf("bad: %#v %#v", arg, out) 2043 } 2044 } 2045 2046 func BenchmarkEncodeDecode(b *testing.B) { 2047 job := testJob() 2048 2049 for i := 0; i < b.N; i++ { 2050 buf, err := Encode(1, job) 2051 if err != nil { 2052 b.Fatalf("err: %v", err) 2053 } 2054 2055 var out Job 2056 err = Decode(buf[1:], &out) 2057 if err != nil { 2058 b.Fatalf("err: %v", err) 2059 } 2060 } 2061 } 2062 2063 func TestInvalidServiceCheck(t *testing.T) { 2064 s := Service{ 2065 Name: "service-name", 2066 PortLabel: "bar", 2067 Checks: []*ServiceCheck{ 2068 { 2069 Name: "check-name", 2070 Type: "lol", 2071 }, 2072 }, 2073 } 2074 if err := s.Validate(); err == nil { 2075 t.Fatalf("Service should be invalid (invalid type)") 2076 } 2077 2078 s = Service{ 2079 Name: "service.name", 2080 PortLabel: "bar", 2081 } 2082 if err := s.ValidateName(s.Name); err == nil { 2083 t.Fatalf("Service should be invalid (contains a dot): %v", err) 2084 } 2085 2086 s = Service{ 2087 Name: "-my-service", 2088 PortLabel: "bar", 2089 } 2090 if err := s.Validate(); err == nil { 2091 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 2092 } 2093 2094 s = Service{ 2095 Name: "my-service-${NOMAD_META_FOO}", 2096 PortLabel: "bar", 2097 } 2098 if err := s.Validate(); err != nil { 2099 t.Fatalf("Service should be valid: %v", err) 2100 } 2101 2102 s = Service{ 2103 Name: "my_service-${NOMAD_META_FOO}", 2104 PortLabel: "bar", 2105 } 2106 if err := s.Validate(); err == nil { 2107 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 2108 } 2109 2110 s = Service{ 2111 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 2112 PortLabel: "bar", 2113 } 2114 if err := s.ValidateName(s.Name); err == nil { 2115 t.Fatalf("Service should be invalid (too long): %v", err) 2116 } 2117 2118 s = Service{ 2119 Name: "service-name", 2120 Checks: []*ServiceCheck{ 2121 { 2122 Name: "check-tcp", 2123 Type: ServiceCheckTCP, 2124 Interval: 5 * time.Second, 2125 Timeout: 2 * time.Second, 2126 }, 2127 { 2128 Name: "check-http", 2129 Type: ServiceCheckHTTP, 2130 Path: "/foo", 2131 Interval: 5 * time.Second, 2132 Timeout: 2 * time.Second, 2133 }, 2134 }, 2135 } 2136 if err := s.Validate(); err == nil { 2137 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 2138 } 2139 2140 s = Service{ 2141 Name: "service-name", 2142 Checks: []*ServiceCheck{ 2143 { 2144 Name: "check-script", 2145 Type: ServiceCheckScript, 2146 Command: "/bin/date", 2147 Interval: 5 * time.Second, 2148 Timeout: 2 * time.Second, 2149 }, 2150 }, 2151 } 2152 if err := s.Validate(); err != nil { 2153 t.Fatalf("un-expected error: %v", err) 2154 } 2155 } 2156 2157 func TestDistinctCheckID(t *testing.T) { 2158 c1 := ServiceCheck{ 2159 Name: "web-health", 2160 Type: "http", 2161 Path: "/health", 2162 Interval: 2 * time.Second, 2163 Timeout: 3 * time.Second, 2164 } 2165 c2 := ServiceCheck{ 2166 Name: "web-health", 2167 Type: "http", 2168 Path: "/health1", 2169 Interval: 2 * time.Second, 2170 Timeout: 3 * time.Second, 2171 } 2172 2173 c3 := ServiceCheck{ 2174 Name: "web-health", 2175 Type: "http", 2176 Path: "/health", 2177 Interval: 4 * time.Second, 2178 Timeout: 3 * time.Second, 2179 } 2180 serviceID := "123" 2181 c1Hash := c1.Hash(serviceID) 2182 c2Hash := c2.Hash(serviceID) 2183 c3Hash := c3.Hash(serviceID) 2184 2185 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 2186 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 2187 } 2188 2189 } 2190 2191 func TestService_Canonicalize(t *testing.T) { 2192 job := "example" 2193 taskGroup := "cache" 2194 task := "redis" 2195 2196 s := Service{ 2197 Name: "${TASK}-db", 2198 } 2199 2200 s.Canonicalize(job, taskGroup, task) 2201 if s.Name != "redis-db" { 2202 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2203 } 2204 2205 s.Name = "db" 2206 s.Canonicalize(job, taskGroup, task) 2207 if s.Name != "db" { 2208 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 2209 } 2210 2211 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 2212 s.Canonicalize(job, taskGroup, task) 2213 if s.Name != "example-cache-redis-db" { 2214 t.Fatalf("Expected name: %v, Actual: %v", "expample-cache-redis-db", s.Name) 2215 } 2216 2217 s.Name = "${BASE}-db" 2218 s.Canonicalize(job, taskGroup, task) 2219 if s.Name != "example-cache-redis-db" { 2220 t.Fatalf("Expected name: %v, Actual: %v", "expample-cache-redis-db", s.Name) 2221 } 2222 2223 } 2224 2225 func TestJob_ExpandServiceNames(t *testing.T) { 2226 j := &Job{ 2227 Name: "my-job", 2228 TaskGroups: []*TaskGroup{ 2229 { 2230 Name: "web", 2231 Tasks: []*Task{ 2232 { 2233 Name: "frontend", 2234 Services: []*Service{ 2235 { 2236 Name: "${BASE}-default", 2237 }, 2238 { 2239 Name: "jmx", 2240 }, 2241 }, 2242 }, 2243 }, 2244 }, 2245 { 2246 Name: "admin", 2247 Tasks: []*Task{ 2248 { 2249 Name: "admin-web", 2250 }, 2251 }, 2252 }, 2253 }, 2254 } 2255 2256 j.Canonicalize() 2257 2258 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 2259 if service1Name != "my-job-web-frontend-default" { 2260 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 2261 } 2262 2263 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 2264 if service2Name != "jmx" { 2265 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 2266 } 2267 2268 } 2269 2270 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 2271 // Create a config that is enabled but with no interval specified. 2272 p := &PeriodicConfig{Enabled: true} 2273 if err := p.Validate(); err == nil { 2274 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2275 } 2276 2277 // Create a config that is enabled, with a spec but no type specified. 2278 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2279 if err := p.Validate(); err == nil { 2280 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2281 } 2282 2283 // Create a config that is enabled, with a spec type but no spec specified. 2284 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2285 if err := p.Validate(); err == nil { 2286 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2287 } 2288 2289 // Create a config that is enabled, with a bad time zone. 2290 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2291 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2292 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2293 } 2294 } 2295 2296 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2297 specs := []string{"foo", "* *", "@foo"} 2298 for _, spec := range specs { 2299 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2300 p.Canonicalize() 2301 if err := p.Validate(); err == nil { 2302 t.Fatal("Invalid cron spec") 2303 } 2304 } 2305 } 2306 2307 func TestPeriodicConfig_ValidCron(t *testing.T) { 2308 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2309 for _, spec := range specs { 2310 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2311 p.Canonicalize() 2312 if err := p.Validate(); err != nil { 2313 t.Fatal("Passed valid cron") 2314 } 2315 } 2316 } 2317 2318 func TestPeriodicConfig_NextCron(t *testing.T) { 2319 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2320 specs := []string{"0 0 29 2 * 1980", "*/5 * * * *"} 2321 expected := []time.Time{{}, time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC)} 2322 for i, spec := range specs { 2323 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2324 p.Canonicalize() 2325 n := p.Next(from) 2326 if expected[i] != n { 2327 t.Fatalf("Next(%v) returned %v; want %v", from, n, expected[i]) 2328 } 2329 } 2330 } 2331 2332 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2333 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2334 for _, zone := range zones { 2335 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2336 p.Canonicalize() 2337 if err := p.Validate(); err != nil { 2338 t.Fatalf("Valid tz errored: %v", err) 2339 } 2340 } 2341 } 2342 2343 func TestPeriodicConfig_DST(t *testing.T) { 2344 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2345 p := &PeriodicConfig{ 2346 Enabled: true, 2347 SpecType: PeriodicSpecCron, 2348 Spec: "0 2 11-12 3 * 2017", 2349 TimeZone: "America/Los_Angeles", 2350 } 2351 p.Canonicalize() 2352 2353 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2354 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2355 2356 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2357 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2358 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2359 2360 n1 := p.Next(t1).UTC() 2361 n2 := p.Next(t2).UTC() 2362 2363 if !reflect.DeepEqual(e1, n1) { 2364 t.Fatalf("Got %v; want %v", n1, e1) 2365 } 2366 if !reflect.DeepEqual(e2, n2) { 2367 t.Fatalf("Got %v; want %v", n1, e1) 2368 } 2369 } 2370 2371 func TestRestartPolicy_Validate(t *testing.T) { 2372 // Policy with acceptable restart options passes 2373 p := &RestartPolicy{ 2374 Mode: RestartPolicyModeFail, 2375 Attempts: 0, 2376 Interval: 5 * time.Second, 2377 } 2378 if err := p.Validate(); err != nil { 2379 t.Fatalf("err: %v", err) 2380 } 2381 2382 // Policy with ambiguous restart options fails 2383 p = &RestartPolicy{ 2384 Mode: RestartPolicyModeDelay, 2385 Attempts: 0, 2386 Interval: 5 * time.Second, 2387 } 2388 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 2389 t.Fatalf("expect ambiguity error, got: %v", err) 2390 } 2391 2392 // Bad policy mode fails 2393 p = &RestartPolicy{ 2394 Mode: "nope", 2395 Attempts: 1, 2396 Interval: 5 * time.Second, 2397 } 2398 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 2399 t.Fatalf("expect mode error, got: %v", err) 2400 } 2401 2402 // Fails when attempts*delay does not fit inside interval 2403 p = &RestartPolicy{ 2404 Mode: RestartPolicyModeDelay, 2405 Attempts: 3, 2406 Delay: 5 * time.Second, 2407 Interval: 5 * time.Second, 2408 } 2409 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 2410 t.Fatalf("expect restart interval error, got: %v", err) 2411 } 2412 2413 // Fails when interval is to small 2414 p = &RestartPolicy{ 2415 Mode: RestartPolicyModeDelay, 2416 Attempts: 3, 2417 Delay: 5 * time.Second, 2418 Interval: 2 * time.Second, 2419 } 2420 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 2421 t.Fatalf("expect interval too small error, got: %v", err) 2422 } 2423 } 2424 2425 func TestReschedulePolicy_Validate(t *testing.T) { 2426 type testCase struct { 2427 ReschedulePolicy *ReschedulePolicy 2428 err error 2429 } 2430 2431 testCases := []testCase{ 2432 { 2433 ReschedulePolicy: &ReschedulePolicy{ 2434 Attempts: 0, 2435 Interval: 0 * time.Second}, 2436 err: nil, 2437 }, 2438 { 2439 ReschedulePolicy: &ReschedulePolicy{ 2440 Attempts: 1, 2441 Interval: 5 * time.Minute}, 2442 err: nil, 2443 }, 2444 { 2445 ReschedulePolicy: &ReschedulePolicy{ 2446 Attempts: -1, 2447 Interval: 5 * time.Minute}, 2448 err: nil, 2449 }, 2450 { 2451 ReschedulePolicy: &ReschedulePolicy{ 2452 Attempts: 1, 2453 Interval: 1 * time.Second}, 2454 err: fmt.Errorf("Interval cannot be less than %v (got %v)", RestartPolicyMinInterval, time.Second), 2455 }, 2456 } 2457 2458 assert := assert.New(t) 2459 2460 for _, tc := range testCases { 2461 if tc.err != nil { 2462 assert.Contains(tc.ReschedulePolicy.Validate().Error(), tc.err.Error()) 2463 } else { 2464 assert.Nil(tc.err) 2465 } 2466 } 2467 } 2468 2469 func TestAllocation_Index(t *testing.T) { 2470 a1 := Allocation{ 2471 Name: "example.cache[1]", 2472 TaskGroup: "cache", 2473 JobID: "example", 2474 Job: &Job{ 2475 ID: "example", 2476 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 2477 } 2478 e1 := uint(1) 2479 a2 := a1.Copy() 2480 a2.Name = "example.cache[713127]" 2481 e2 := uint(713127) 2482 2483 if a1.Index() != e1 || a2.Index() != e2 { 2484 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 2485 } 2486 } 2487 2488 func TestTaskArtifact_Validate_Source(t *testing.T) { 2489 valid := &TaskArtifact{GetterSource: "google.com"} 2490 if err := valid.Validate(); err != nil { 2491 t.Fatalf("unexpected error: %v", err) 2492 } 2493 } 2494 2495 func TestTaskArtifact_Validate_Dest(t *testing.T) { 2496 valid := &TaskArtifact{GetterSource: "google.com"} 2497 if err := valid.Validate(); err != nil { 2498 t.Fatalf("unexpected error: %v", err) 2499 } 2500 2501 valid.RelativeDest = "local/" 2502 if err := valid.Validate(); err != nil { 2503 t.Fatalf("unexpected error: %v", err) 2504 } 2505 2506 valid.RelativeDest = "local/.." 2507 if err := valid.Validate(); err != nil { 2508 t.Fatalf("unexpected error: %v", err) 2509 } 2510 2511 valid.RelativeDest = "local/../../.." 2512 if err := valid.Validate(); err == nil { 2513 t.Fatalf("expected error: %v", err) 2514 } 2515 } 2516 2517 func TestAllocation_ShouldMigrate(t *testing.T) { 2518 alloc := Allocation{ 2519 PreviousAllocation: "123", 2520 TaskGroup: "foo", 2521 Job: &Job{ 2522 TaskGroups: []*TaskGroup{ 2523 { 2524 Name: "foo", 2525 EphemeralDisk: &EphemeralDisk{ 2526 Migrate: true, 2527 Sticky: true, 2528 }, 2529 }, 2530 }, 2531 }, 2532 } 2533 2534 if !alloc.ShouldMigrate() { 2535 t.Fatalf("bad: %v", alloc) 2536 } 2537 2538 alloc1 := Allocation{ 2539 PreviousAllocation: "123", 2540 TaskGroup: "foo", 2541 Job: &Job{ 2542 TaskGroups: []*TaskGroup{ 2543 { 2544 Name: "foo", 2545 EphemeralDisk: &EphemeralDisk{}, 2546 }, 2547 }, 2548 }, 2549 } 2550 2551 if alloc1.ShouldMigrate() { 2552 t.Fatalf("bad: %v", alloc) 2553 } 2554 2555 alloc2 := Allocation{ 2556 PreviousAllocation: "123", 2557 TaskGroup: "foo", 2558 Job: &Job{ 2559 TaskGroups: []*TaskGroup{ 2560 { 2561 Name: "foo", 2562 EphemeralDisk: &EphemeralDisk{ 2563 Sticky: false, 2564 Migrate: true, 2565 }, 2566 }, 2567 }, 2568 }, 2569 } 2570 2571 if alloc2.ShouldMigrate() { 2572 t.Fatalf("bad: %v", alloc) 2573 } 2574 2575 alloc3 := Allocation{ 2576 PreviousAllocation: "123", 2577 TaskGroup: "foo", 2578 Job: &Job{ 2579 TaskGroups: []*TaskGroup{ 2580 { 2581 Name: "foo", 2582 }, 2583 }, 2584 }, 2585 } 2586 2587 if alloc3.ShouldMigrate() { 2588 t.Fatalf("bad: %v", alloc) 2589 } 2590 2591 // No previous 2592 alloc4 := Allocation{ 2593 TaskGroup: "foo", 2594 Job: &Job{ 2595 TaskGroups: []*TaskGroup{ 2596 { 2597 Name: "foo", 2598 EphemeralDisk: &EphemeralDisk{ 2599 Migrate: true, 2600 Sticky: true, 2601 }, 2602 }, 2603 }, 2604 }, 2605 } 2606 2607 if alloc4.ShouldMigrate() { 2608 t.Fatalf("bad: %v", alloc4) 2609 } 2610 } 2611 2612 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 2613 cases := []struct { 2614 Input *TaskArtifact 2615 Err bool 2616 }{ 2617 { 2618 &TaskArtifact{ 2619 GetterSource: "foo.com", 2620 GetterOptions: map[string]string{ 2621 "checksum": "no-type", 2622 }, 2623 }, 2624 true, 2625 }, 2626 { 2627 &TaskArtifact{ 2628 GetterSource: "foo.com", 2629 GetterOptions: map[string]string{ 2630 "checksum": "md5:toosmall", 2631 }, 2632 }, 2633 true, 2634 }, 2635 { 2636 &TaskArtifact{ 2637 GetterSource: "foo.com", 2638 GetterOptions: map[string]string{ 2639 "checksum": "invalid:type", 2640 }, 2641 }, 2642 true, 2643 }, 2644 } 2645 2646 for i, tc := range cases { 2647 err := tc.Input.Validate() 2648 if (err != nil) != tc.Err { 2649 t.Fatalf("case %d: %v", i, err) 2650 continue 2651 } 2652 } 2653 } 2654 2655 func TestAllocation_Terminated(t *testing.T) { 2656 type desiredState struct { 2657 ClientStatus string 2658 DesiredStatus string 2659 Terminated bool 2660 } 2661 2662 harness := []desiredState{ 2663 { 2664 ClientStatus: AllocClientStatusPending, 2665 DesiredStatus: AllocDesiredStatusStop, 2666 Terminated: false, 2667 }, 2668 { 2669 ClientStatus: AllocClientStatusRunning, 2670 DesiredStatus: AllocDesiredStatusStop, 2671 Terminated: false, 2672 }, 2673 { 2674 ClientStatus: AllocClientStatusFailed, 2675 DesiredStatus: AllocDesiredStatusStop, 2676 Terminated: true, 2677 }, 2678 { 2679 ClientStatus: AllocClientStatusFailed, 2680 DesiredStatus: AllocDesiredStatusRun, 2681 Terminated: true, 2682 }, 2683 } 2684 2685 for _, state := range harness { 2686 alloc := Allocation{} 2687 alloc.DesiredStatus = state.DesiredStatus 2688 alloc.ClientStatus = state.ClientStatus 2689 if alloc.Terminated() != state.Terminated { 2690 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 2691 } 2692 } 2693 } 2694 2695 func TestAllocation_ShouldReschedule(t *testing.T) { 2696 type testCase struct { 2697 Desc string 2698 FailTime time.Time 2699 ClientStatus string 2700 DesiredStatus string 2701 ReschedulePolicy *ReschedulePolicy 2702 RescheduleTrackers []*RescheduleEvent 2703 ShouldReschedule bool 2704 } 2705 2706 fail := time.Now() 2707 2708 harness := []testCase{ 2709 { 2710 Desc: "Reschedule when desired state is stop", 2711 ClientStatus: AllocClientStatusPending, 2712 DesiredStatus: AllocDesiredStatusStop, 2713 FailTime: fail, 2714 ReschedulePolicy: nil, 2715 ShouldReschedule: false, 2716 }, 2717 { 2718 Desc: "Disabled recheduling", 2719 ClientStatus: AllocClientStatusFailed, 2720 DesiredStatus: AllocDesiredStatusRun, 2721 FailTime: fail, 2722 ReschedulePolicy: &ReschedulePolicy{0, 1 * time.Minute}, 2723 ShouldReschedule: false, 2724 }, 2725 { 2726 Desc: "Reschedule when client status is complete", 2727 ClientStatus: AllocClientStatusComplete, 2728 DesiredStatus: AllocDesiredStatusRun, 2729 FailTime: fail, 2730 ReschedulePolicy: nil, 2731 ShouldReschedule: false, 2732 }, 2733 { 2734 Desc: "Reschedule with nil reschedule policy", 2735 ClientStatus: AllocClientStatusFailed, 2736 DesiredStatus: AllocDesiredStatusRun, 2737 FailTime: fail, 2738 ReschedulePolicy: nil, 2739 ShouldReschedule: false, 2740 }, 2741 { 2742 Desc: "Reschedule when client status is complete", 2743 ClientStatus: AllocClientStatusComplete, 2744 DesiredStatus: AllocDesiredStatusRun, 2745 FailTime: fail, 2746 ReschedulePolicy: nil, 2747 ShouldReschedule: false, 2748 }, 2749 { 2750 Desc: "Reschedule with policy when client status complete", 2751 ClientStatus: AllocClientStatusComplete, 2752 DesiredStatus: AllocDesiredStatusRun, 2753 FailTime: fail, 2754 ReschedulePolicy: &ReschedulePolicy{1, 1 * time.Minute}, 2755 ShouldReschedule: false, 2756 }, 2757 { 2758 Desc: "Reschedule with no previous attempts", 2759 ClientStatus: AllocClientStatusFailed, 2760 DesiredStatus: AllocDesiredStatusRun, 2761 FailTime: fail, 2762 ReschedulePolicy: &ReschedulePolicy{1, 1 * time.Minute}, 2763 ShouldReschedule: true, 2764 }, 2765 { 2766 Desc: "Reschedule with leftover attempts", 2767 ClientStatus: AllocClientStatusFailed, 2768 DesiredStatus: AllocDesiredStatusRun, 2769 ReschedulePolicy: &ReschedulePolicy{2, 5 * time.Minute}, 2770 FailTime: fail, 2771 RescheduleTrackers: []*RescheduleEvent{ 2772 { 2773 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 2774 }, 2775 }, 2776 ShouldReschedule: true, 2777 }, 2778 { 2779 Desc: "Reschedule with too old previous attempts", 2780 ClientStatus: AllocClientStatusFailed, 2781 DesiredStatus: AllocDesiredStatusRun, 2782 FailTime: fail, 2783 ReschedulePolicy: &ReschedulePolicy{1, 5 * time.Minute}, 2784 RescheduleTrackers: []*RescheduleEvent{ 2785 { 2786 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 2787 }, 2788 }, 2789 ShouldReschedule: true, 2790 }, 2791 { 2792 Desc: "Reschedule with no leftover attempts", 2793 ClientStatus: AllocClientStatusFailed, 2794 DesiredStatus: AllocDesiredStatusRun, 2795 FailTime: fail, 2796 ReschedulePolicy: &ReschedulePolicy{2, 5 * time.Minute}, 2797 RescheduleTrackers: []*RescheduleEvent{ 2798 { 2799 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 2800 }, 2801 { 2802 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 2803 }, 2804 }, 2805 ShouldReschedule: false, 2806 }, 2807 } 2808 2809 for _, state := range harness { 2810 alloc := Allocation{} 2811 alloc.DesiredStatus = state.DesiredStatus 2812 alloc.ClientStatus = state.ClientStatus 2813 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 2814 2815 t.Run(state.Desc, func(t *testing.T) { 2816 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 2817 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 2818 } 2819 }) 2820 2821 } 2822 } 2823 2824 func TestRescheduleTracker_Copy(t *testing.T) { 2825 type testCase struct { 2826 original *RescheduleTracker 2827 expected *RescheduleTracker 2828 } 2829 2830 cases := []testCase{ 2831 {nil, nil}, 2832 {&RescheduleTracker{Events: []*RescheduleEvent{ 2833 {2, "12", "12"}, 2834 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 2835 {2, "12", "12"}, 2836 }}}, 2837 } 2838 2839 for _, tc := range cases { 2840 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 2841 t.Fatalf("expected %v but got %v", *tc.expected, *got) 2842 } 2843 } 2844 } 2845 2846 func TestVault_Validate(t *testing.T) { 2847 v := &Vault{ 2848 Env: true, 2849 ChangeMode: VaultChangeModeNoop, 2850 } 2851 2852 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 2853 t.Fatalf("Expected policy list empty error") 2854 } 2855 2856 v.Policies = []string{"foo", "root"} 2857 v.ChangeMode = VaultChangeModeSignal 2858 2859 err := v.Validate() 2860 if err == nil { 2861 t.Fatalf("Expected validation errors") 2862 } 2863 2864 if !strings.Contains(err.Error(), "Signal must") { 2865 t.Fatalf("Expected signal empty error") 2866 } 2867 if !strings.Contains(err.Error(), "root") { 2868 t.Fatalf("Expected root error") 2869 } 2870 } 2871 2872 func TestParameterizedJobConfig_Validate(t *testing.T) { 2873 d := &ParameterizedJobConfig{ 2874 Payload: "foo", 2875 } 2876 2877 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 2878 t.Fatalf("Expected unknown payload requirement: %v", err) 2879 } 2880 2881 d.Payload = DispatchPayloadOptional 2882 d.MetaOptional = []string{"foo", "bar"} 2883 d.MetaRequired = []string{"bar", "baz"} 2884 2885 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 2886 t.Fatalf("Expected meta not being disjoint error: %v", err) 2887 } 2888 } 2889 2890 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 2891 job := testJob() 2892 job.ParameterizedJob = &ParameterizedJobConfig{ 2893 Payload: DispatchPayloadOptional, 2894 } 2895 job.Type = JobTypeSystem 2896 2897 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 2898 t.Fatalf("Expected bad scheduler tpye: %v", err) 2899 } 2900 } 2901 2902 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 2903 d := &ParameterizedJobConfig{} 2904 d.Canonicalize() 2905 if d.Payload != DispatchPayloadOptional { 2906 t.Fatalf("Canonicalize failed") 2907 } 2908 } 2909 2910 func TestDispatchPayloadConfig_Validate(t *testing.T) { 2911 d := &DispatchPayloadConfig{ 2912 File: "foo", 2913 } 2914 2915 // task/local/haha 2916 if err := d.Validate(); err != nil { 2917 t.Fatalf("bad: %v", err) 2918 } 2919 2920 // task/haha 2921 d.File = "../haha" 2922 if err := d.Validate(); err != nil { 2923 t.Fatalf("bad: %v", err) 2924 } 2925 2926 // ../haha 2927 d.File = "../../../haha" 2928 if err := d.Validate(); err == nil { 2929 t.Fatalf("bad: %v", err) 2930 } 2931 } 2932 2933 func TestIsRecoverable(t *testing.T) { 2934 if IsRecoverable(nil) { 2935 t.Errorf("nil should not be recoverable") 2936 } 2937 if IsRecoverable(NewRecoverableError(nil, true)) { 2938 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 2939 } 2940 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 2941 t.Errorf("Custom errors should not be recoverable") 2942 } 2943 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 2944 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 2945 } 2946 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 2947 t.Errorf("Explicitly recoverable errors *should* be recoverable") 2948 } 2949 } 2950 2951 func TestACLTokenValidate(t *testing.T) { 2952 tk := &ACLToken{} 2953 2954 // Mising a type 2955 err := tk.Validate() 2956 assert.NotNil(t, err) 2957 if !strings.Contains(err.Error(), "client or management") { 2958 t.Fatalf("bad: %v", err) 2959 } 2960 2961 // Missing policies 2962 tk.Type = ACLClientToken 2963 err = tk.Validate() 2964 assert.NotNil(t, err) 2965 if !strings.Contains(err.Error(), "missing policies") { 2966 t.Fatalf("bad: %v", err) 2967 } 2968 2969 // Invalid policices 2970 tk.Type = ACLManagementToken 2971 tk.Policies = []string{"foo"} 2972 err = tk.Validate() 2973 assert.NotNil(t, err) 2974 if !strings.Contains(err.Error(), "associated with policies") { 2975 t.Fatalf("bad: %v", err) 2976 } 2977 2978 // Name too long policices 2979 tk.Name = uuid.Generate() + uuid.Generate() 2980 tk.Policies = nil 2981 err = tk.Validate() 2982 assert.NotNil(t, err) 2983 if !strings.Contains(err.Error(), "too long") { 2984 t.Fatalf("bad: %v", err) 2985 } 2986 2987 // Make it valid 2988 tk.Name = "foo" 2989 err = tk.Validate() 2990 assert.Nil(t, err) 2991 } 2992 2993 func TestACLTokenPolicySubset(t *testing.T) { 2994 tk := &ACLToken{ 2995 Type: ACLClientToken, 2996 Policies: []string{"foo", "bar", "baz"}, 2997 } 2998 2999 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3000 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3001 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3002 assert.Equal(t, true, tk.PolicySubset([]string{})) 3003 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 3004 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 3005 3006 tk = &ACLToken{ 3007 Type: ACLManagementToken, 3008 } 3009 3010 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3011 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3012 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3013 assert.Equal(t, true, tk.PolicySubset([]string{})) 3014 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 3015 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 3016 } 3017 3018 func TestACLTokenSetHash(t *testing.T) { 3019 tk := &ACLToken{ 3020 Name: "foo", 3021 Type: ACLClientToken, 3022 Policies: []string{"foo", "bar"}, 3023 Global: false, 3024 } 3025 out1 := tk.SetHash() 3026 assert.NotNil(t, out1) 3027 assert.NotNil(t, tk.Hash) 3028 assert.Equal(t, out1, tk.Hash) 3029 3030 tk.Policies = []string{"foo"} 3031 out2 := tk.SetHash() 3032 assert.NotNil(t, out2) 3033 assert.NotNil(t, tk.Hash) 3034 assert.Equal(t, out2, tk.Hash) 3035 assert.NotEqual(t, out1, out2) 3036 } 3037 3038 func TestACLPolicySetHash(t *testing.T) { 3039 ap := &ACLPolicy{ 3040 Name: "foo", 3041 Description: "great policy", 3042 Rules: "node { policy = \"read\" }", 3043 } 3044 out1 := ap.SetHash() 3045 assert.NotNil(t, out1) 3046 assert.NotNil(t, ap.Hash) 3047 assert.Equal(t, out1, ap.Hash) 3048 3049 ap.Rules = "node { policy = \"write\" }" 3050 out2 := ap.SetHash() 3051 assert.NotNil(t, out2) 3052 assert.NotNil(t, ap.Hash) 3053 assert.Equal(t, out2, ap.Hash) 3054 assert.NotEqual(t, out1, out2) 3055 } 3056 3057 func TestTaskEventPopulate(t *testing.T) { 3058 prepopulatedEvent := NewTaskEvent(TaskSetup) 3059 prepopulatedEvent.DisplayMessage = "Hola" 3060 testcases := []struct { 3061 event *TaskEvent 3062 expectedMsg string 3063 }{ 3064 {nil, ""}, 3065 {prepopulatedEvent, "Hola"}, 3066 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 3067 {NewTaskEvent(TaskStarted), "Task started by client"}, 3068 {NewTaskEvent(TaskReceived), "Task received by client"}, 3069 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 3070 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 3071 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 3072 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 3073 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 3074 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 3075 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 3076 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 3077 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 3078 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 3079 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 3080 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Killing task: Its time for you to die"}, 3081 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 3082 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 3083 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 3084 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 3085 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 3086 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 3087 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 3088 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 3089 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 3090 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 3091 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 3092 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 3093 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 3094 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 3095 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 3096 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 3097 {NewTaskEvent("Unknown Type, No message"), ""}, 3098 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 3099 } 3100 3101 for _, tc := range testcases { 3102 tc.event.PopulateEventDisplayMessage() 3103 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 3104 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 3105 } 3106 } 3107 } 3108 3109 func TestNetworkResourcesEquals(t *testing.T) { 3110 require := require.New(t) 3111 var networkResourcesTest = []struct { 3112 input []*NetworkResource 3113 expected bool 3114 errorMsg string 3115 }{ 3116 { 3117 []*NetworkResource{ 3118 { 3119 IP: "10.0.0.1", 3120 MBits: 50, 3121 ReservedPorts: []Port{{"web", 80}}, 3122 }, 3123 { 3124 IP: "10.0.0.1", 3125 MBits: 50, 3126 ReservedPorts: []Port{{"web", 80}}, 3127 }, 3128 }, 3129 true, 3130 "Equal network resources should return true", 3131 }, 3132 { 3133 []*NetworkResource{ 3134 { 3135 IP: "10.0.0.0", 3136 MBits: 50, 3137 ReservedPorts: []Port{{"web", 80}}, 3138 }, 3139 { 3140 IP: "10.0.0.1", 3141 MBits: 50, 3142 ReservedPorts: []Port{{"web", 80}}, 3143 }, 3144 }, 3145 false, 3146 "Different IP addresses should return false", 3147 }, 3148 { 3149 []*NetworkResource{ 3150 { 3151 IP: "10.0.0.1", 3152 MBits: 40, 3153 ReservedPorts: []Port{{"web", 80}}, 3154 }, 3155 { 3156 IP: "10.0.0.1", 3157 MBits: 50, 3158 ReservedPorts: []Port{{"web", 80}}, 3159 }, 3160 }, 3161 false, 3162 "Different MBits values should return false", 3163 }, 3164 { 3165 []*NetworkResource{ 3166 { 3167 IP: "10.0.0.1", 3168 MBits: 50, 3169 ReservedPorts: []Port{{"web", 80}}, 3170 }, 3171 { 3172 IP: "10.0.0.1", 3173 MBits: 50, 3174 ReservedPorts: []Port{{"web", 80}, {"web", 80}}, 3175 }, 3176 }, 3177 false, 3178 "Different ReservedPorts lengths should return false", 3179 }, 3180 { 3181 []*NetworkResource{ 3182 { 3183 IP: "10.0.0.1", 3184 MBits: 50, 3185 ReservedPorts: []Port{{"web", 80}}, 3186 }, 3187 { 3188 IP: "10.0.0.1", 3189 MBits: 50, 3190 ReservedPorts: []Port{}, 3191 }, 3192 }, 3193 false, 3194 "Empty and non empty ReservedPorts values should return false", 3195 }, 3196 { 3197 []*NetworkResource{ 3198 { 3199 IP: "10.0.0.1", 3200 MBits: 50, 3201 ReservedPorts: []Port{{"web", 80}}, 3202 }, 3203 { 3204 IP: "10.0.0.1", 3205 MBits: 50, 3206 ReservedPorts: []Port{{"notweb", 80}}, 3207 }, 3208 }, 3209 false, 3210 "Different valued ReservedPorts values should return false", 3211 }, 3212 { 3213 []*NetworkResource{ 3214 { 3215 IP: "10.0.0.1", 3216 MBits: 50, 3217 DynamicPorts: []Port{{"web", 80}}, 3218 }, 3219 { 3220 IP: "10.0.0.1", 3221 MBits: 50, 3222 DynamicPorts: []Port{{"web", 80}, {"web", 80}}, 3223 }, 3224 }, 3225 false, 3226 "Different DynamicPorts lengths should return false", 3227 }, 3228 { 3229 []*NetworkResource{ 3230 { 3231 IP: "10.0.0.1", 3232 MBits: 50, 3233 DynamicPorts: []Port{{"web", 80}}, 3234 }, 3235 { 3236 IP: "10.0.0.1", 3237 MBits: 50, 3238 DynamicPorts: []Port{}, 3239 }, 3240 }, 3241 false, 3242 "Empty and non empty DynamicPorts values should return false", 3243 }, 3244 { 3245 []*NetworkResource{ 3246 { 3247 IP: "10.0.0.1", 3248 MBits: 50, 3249 DynamicPorts: []Port{{"web", 80}}, 3250 }, 3251 { 3252 IP: "10.0.0.1", 3253 MBits: 50, 3254 DynamicPorts: []Port{{"notweb", 80}}, 3255 }, 3256 }, 3257 false, 3258 "Different valued DynamicPorts values should return false", 3259 }, 3260 } 3261 for _, testCase := range networkResourcesTest { 3262 first := testCase.input[0] 3263 second := testCase.input[1] 3264 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 3265 } 3266 }