github.com/djenriquez/nomad-1@v0.8.1/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 ) 17 18 func TestJob_Validate(t *testing.T) { 19 j := &Job{} 20 err := j.Validate() 21 mErr := err.(*multierror.Error) 22 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 23 t.Fatalf("err: %s", err) 24 } 25 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 26 t.Fatalf("err: %s", err) 27 } 28 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 29 t.Fatalf("err: %s", err) 30 } 31 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 32 t.Fatalf("err: %s", err) 33 } 34 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 35 t.Fatalf("err: %s", err) 36 } 37 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 38 t.Fatalf("err: %s", err) 39 } 40 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 41 t.Fatalf("err: %s", err) 42 } 43 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 44 t.Fatalf("err: %s", err) 45 } 46 47 j = &Job{ 48 Type: "invalid-job-type", 49 } 50 err = j.Validate() 51 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 52 t.Errorf("expected %s but found: %v", expected, err) 53 } 54 55 j = &Job{ 56 Type: JobTypeService, 57 Periodic: &PeriodicConfig{ 58 Enabled: true, 59 }, 60 } 61 err = j.Validate() 62 mErr = err.(*multierror.Error) 63 if !strings.Contains(mErr.Error(), "Periodic") { 64 t.Fatalf("err: %s", err) 65 } 66 67 j = &Job{ 68 Region: "global", 69 ID: uuid.Generate(), 70 Namespace: "test", 71 Name: "my-job", 72 Type: JobTypeService, 73 Priority: 50, 74 Datacenters: []string{"dc1"}, 75 TaskGroups: []*TaskGroup{ 76 { 77 Name: "web", 78 RestartPolicy: &RestartPolicy{ 79 Interval: 5 * time.Minute, 80 Delay: 10 * time.Second, 81 Attempts: 10, 82 }, 83 }, 84 { 85 Name: "web", 86 RestartPolicy: &RestartPolicy{ 87 Interval: 5 * time.Minute, 88 Delay: 10 * time.Second, 89 Attempts: 10, 90 }, 91 }, 92 { 93 RestartPolicy: &RestartPolicy{ 94 Interval: 5 * time.Minute, 95 Delay: 10 * time.Second, 96 Attempts: 10, 97 }, 98 }, 99 }, 100 } 101 err = j.Validate() 102 mErr = err.(*multierror.Error) 103 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 104 t.Fatalf("err: %s", err) 105 } 106 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 107 t.Fatalf("err: %s", err) 108 } 109 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 110 t.Fatalf("err: %s", err) 111 } 112 } 113 114 func TestJob_Warnings(t *testing.T) { 115 cases := []struct { 116 Name string 117 Job *Job 118 Expected []string 119 }{ 120 { 121 Name: "Higher counts for update stanza", 122 Expected: []string{"max parallel count is greater"}, 123 Job: &Job{ 124 Type: JobTypeService, 125 TaskGroups: []*TaskGroup{ 126 { 127 Name: "foo", 128 Count: 2, 129 Update: &UpdateStrategy{ 130 MaxParallel: 10, 131 }, 132 }, 133 }, 134 }, 135 }, 136 } 137 138 for _, c := range cases { 139 t.Run(c.Name, func(t *testing.T) { 140 warnings := c.Job.Warnings() 141 if warnings == nil { 142 if len(c.Expected) == 0 { 143 return 144 } else { 145 t.Fatal("Got no warnings when they were expected") 146 } 147 } 148 149 a := warnings.Error() 150 for _, e := range c.Expected { 151 if !strings.Contains(a, e) { 152 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 153 } 154 } 155 }) 156 } 157 } 158 159 func TestJob_SpecChanged(t *testing.T) { 160 // Get a base test job 161 base := testJob() 162 163 // Only modify the indexes/mutable state of the job 164 mutatedBase := base.Copy() 165 mutatedBase.Status = "foo" 166 mutatedBase.ModifyIndex = base.ModifyIndex + 100 167 168 // changed contains a spec change that should be detected 169 change := base.Copy() 170 change.Priority = 99 171 172 cases := []struct { 173 Name string 174 Original *Job 175 New *Job 176 Changed bool 177 }{ 178 { 179 Name: "Same job except mutable indexes", 180 Changed: false, 181 Original: base, 182 New: mutatedBase, 183 }, 184 { 185 Name: "Different", 186 Changed: true, 187 Original: base, 188 New: change, 189 }, 190 } 191 192 for _, c := range cases { 193 t.Run(c.Name, func(t *testing.T) { 194 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 195 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 196 } 197 }) 198 } 199 } 200 201 func testJob() *Job { 202 return &Job{ 203 Region: "global", 204 ID: uuid.Generate(), 205 Namespace: "test", 206 Name: "my-job", 207 Type: JobTypeService, 208 Priority: 50, 209 AllAtOnce: false, 210 Datacenters: []string{"dc1"}, 211 Constraints: []*Constraint{ 212 { 213 LTarget: "$attr.kernel.name", 214 RTarget: "linux", 215 Operand: "=", 216 }, 217 }, 218 Periodic: &PeriodicConfig{ 219 Enabled: false, 220 }, 221 TaskGroups: []*TaskGroup{ 222 { 223 Name: "web", 224 Count: 10, 225 EphemeralDisk: DefaultEphemeralDisk(), 226 RestartPolicy: &RestartPolicy{ 227 Mode: RestartPolicyModeFail, 228 Attempts: 3, 229 Interval: 10 * time.Minute, 230 Delay: 1 * time.Minute, 231 }, 232 ReschedulePolicy: &ReschedulePolicy{ 233 Interval: 5 * time.Minute, 234 Attempts: 10, 235 Delay: 5 * time.Second, 236 DelayFunction: "constant", 237 }, 238 Tasks: []*Task{ 239 { 240 Name: "web", 241 Driver: "exec", 242 Config: map[string]interface{}{ 243 "command": "/bin/date", 244 }, 245 Env: map[string]string{ 246 "FOO": "bar", 247 }, 248 Artifacts: []*TaskArtifact{ 249 { 250 GetterSource: "http://foo.com", 251 }, 252 }, 253 Services: []*Service{ 254 { 255 Name: "${TASK}-frontend", 256 PortLabel: "http", 257 }, 258 }, 259 Resources: &Resources{ 260 CPU: 500, 261 MemoryMB: 256, 262 Networks: []*NetworkResource{ 263 { 264 MBits: 50, 265 DynamicPorts: []Port{{Label: "http"}}, 266 }, 267 }, 268 }, 269 LogConfig: &LogConfig{ 270 MaxFiles: 10, 271 MaxFileSizeMB: 1, 272 }, 273 }, 274 }, 275 Meta: map[string]string{ 276 "elb_check_type": "http", 277 "elb_check_interval": "30s", 278 "elb_check_min": "3", 279 }, 280 }, 281 }, 282 Meta: map[string]string{ 283 "owner": "armon", 284 }, 285 } 286 } 287 288 func TestJob_Copy(t *testing.T) { 289 j := testJob() 290 c := j.Copy() 291 if !reflect.DeepEqual(j, c) { 292 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 293 } 294 } 295 296 func TestJob_IsPeriodic(t *testing.T) { 297 j := &Job{ 298 Type: JobTypeService, 299 Periodic: &PeriodicConfig{ 300 Enabled: true, 301 }, 302 } 303 if !j.IsPeriodic() { 304 t.Fatalf("IsPeriodic() returned false on periodic job") 305 } 306 307 j = &Job{ 308 Type: JobTypeService, 309 } 310 if j.IsPeriodic() { 311 t.Fatalf("IsPeriodic() returned true on non-periodic job") 312 } 313 } 314 315 func TestJob_IsPeriodicActive(t *testing.T) { 316 cases := []struct { 317 job *Job 318 active bool 319 }{ 320 { 321 job: &Job{ 322 Type: JobTypeService, 323 Periodic: &PeriodicConfig{ 324 Enabled: true, 325 }, 326 }, 327 active: true, 328 }, 329 { 330 job: &Job{ 331 Type: JobTypeService, 332 Periodic: &PeriodicConfig{ 333 Enabled: false, 334 }, 335 }, 336 active: false, 337 }, 338 { 339 job: &Job{ 340 Type: JobTypeService, 341 Periodic: &PeriodicConfig{ 342 Enabled: true, 343 }, 344 Stop: true, 345 }, 346 active: false, 347 }, 348 { 349 job: &Job{ 350 Type: JobTypeService, 351 Periodic: &PeriodicConfig{ 352 Enabled: false, 353 }, 354 ParameterizedJob: &ParameterizedJobConfig{}, 355 }, 356 active: false, 357 }, 358 } 359 360 for i, c := range cases { 361 if act := c.job.IsPeriodicActive(); act != c.active { 362 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 363 } 364 } 365 } 366 367 func TestJob_SystemJob_Validate(t *testing.T) { 368 j := testJob() 369 j.Type = JobTypeSystem 370 j.TaskGroups[0].ReschedulePolicy = nil 371 j.Canonicalize() 372 373 err := j.Validate() 374 if err == nil || !strings.Contains(err.Error(), "exceed") { 375 t.Fatalf("expect error due to count") 376 } 377 378 j.TaskGroups[0].Count = 0 379 if err := j.Validate(); err != nil { 380 t.Fatalf("unexpected err: %v", err) 381 } 382 383 j.TaskGroups[0].Count = 1 384 if err := j.Validate(); err != nil { 385 t.Fatalf("unexpected err: %v", err) 386 } 387 } 388 389 func TestJob_VaultPolicies(t *testing.T) { 390 j0 := &Job{} 391 e0 := make(map[string]map[string]*Vault, 0) 392 393 vj1 := &Vault{ 394 Policies: []string{ 395 "p1", 396 "p2", 397 }, 398 } 399 vj2 := &Vault{ 400 Policies: []string{ 401 "p3", 402 "p4", 403 }, 404 } 405 vj3 := &Vault{ 406 Policies: []string{ 407 "p5", 408 }, 409 } 410 j1 := &Job{ 411 TaskGroups: []*TaskGroup{ 412 { 413 Name: "foo", 414 Tasks: []*Task{ 415 { 416 Name: "t1", 417 }, 418 { 419 Name: "t2", 420 Vault: vj1, 421 }, 422 }, 423 }, 424 { 425 Name: "bar", 426 Tasks: []*Task{ 427 { 428 Name: "t3", 429 Vault: vj2, 430 }, 431 { 432 Name: "t4", 433 Vault: vj3, 434 }, 435 }, 436 }, 437 }, 438 } 439 440 e1 := map[string]map[string]*Vault{ 441 "foo": { 442 "t2": vj1, 443 }, 444 "bar": { 445 "t3": vj2, 446 "t4": vj3, 447 }, 448 } 449 450 cases := []struct { 451 Job *Job 452 Expected map[string]map[string]*Vault 453 }{ 454 { 455 Job: j0, 456 Expected: e0, 457 }, 458 { 459 Job: j1, 460 Expected: e1, 461 }, 462 } 463 464 for i, c := range cases { 465 got := c.Job.VaultPolicies() 466 if !reflect.DeepEqual(got, c.Expected) { 467 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 468 } 469 } 470 } 471 472 func TestJob_RequiredSignals(t *testing.T) { 473 j0 := &Job{} 474 e0 := make(map[string]map[string][]string, 0) 475 476 vj1 := &Vault{ 477 Policies: []string{"p1"}, 478 ChangeMode: VaultChangeModeNoop, 479 } 480 vj2 := &Vault{ 481 Policies: []string{"p1"}, 482 ChangeMode: VaultChangeModeSignal, 483 ChangeSignal: "SIGUSR1", 484 } 485 tj1 := &Template{ 486 SourcePath: "foo", 487 DestPath: "bar", 488 ChangeMode: TemplateChangeModeNoop, 489 } 490 tj2 := &Template{ 491 SourcePath: "foo", 492 DestPath: "bar", 493 ChangeMode: TemplateChangeModeSignal, 494 ChangeSignal: "SIGUSR2", 495 } 496 j1 := &Job{ 497 TaskGroups: []*TaskGroup{ 498 { 499 Name: "foo", 500 Tasks: []*Task{ 501 { 502 Name: "t1", 503 }, 504 { 505 Name: "t2", 506 Vault: vj2, 507 Templates: []*Template{tj2}, 508 }, 509 }, 510 }, 511 { 512 Name: "bar", 513 Tasks: []*Task{ 514 { 515 Name: "t3", 516 Vault: vj1, 517 Templates: []*Template{tj1}, 518 }, 519 { 520 Name: "t4", 521 Vault: vj2, 522 }, 523 }, 524 }, 525 }, 526 } 527 528 e1 := map[string]map[string][]string{ 529 "foo": { 530 "t2": {"SIGUSR1", "SIGUSR2"}, 531 }, 532 "bar": { 533 "t4": {"SIGUSR1"}, 534 }, 535 } 536 537 j2 := &Job{ 538 TaskGroups: []*TaskGroup{ 539 { 540 Name: "foo", 541 Tasks: []*Task{ 542 { 543 Name: "t1", 544 KillSignal: "SIGQUIT", 545 }, 546 }, 547 }, 548 }, 549 } 550 551 e2 := map[string]map[string][]string{ 552 "foo": { 553 "t1": {"SIGQUIT"}, 554 }, 555 } 556 557 cases := []struct { 558 Job *Job 559 Expected map[string]map[string][]string 560 }{ 561 { 562 Job: j0, 563 Expected: e0, 564 }, 565 { 566 Job: j1, 567 Expected: e1, 568 }, 569 { 570 Job: j2, 571 Expected: e2, 572 }, 573 } 574 575 for i, c := range cases { 576 got := c.Job.RequiredSignals() 577 if !reflect.DeepEqual(got, c.Expected) { 578 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 579 } 580 } 581 } 582 583 func TestTaskGroup_Validate(t *testing.T) { 584 j := testJob() 585 tg := &TaskGroup{ 586 Count: -1, 587 RestartPolicy: &RestartPolicy{ 588 Interval: 5 * time.Minute, 589 Delay: 10 * time.Second, 590 Attempts: 10, 591 Mode: RestartPolicyModeDelay, 592 }, 593 ReschedulePolicy: &ReschedulePolicy{ 594 Interval: 5 * time.Minute, 595 Attempts: 5, 596 Delay: 5 * time.Second, 597 }, 598 } 599 err := tg.Validate(j) 600 mErr := err.(*multierror.Error) 601 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 602 t.Fatalf("err: %s", err) 603 } 604 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 605 t.Fatalf("err: %s", err) 606 } 607 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 608 t.Fatalf("err: %s", err) 609 } 610 611 tg = &TaskGroup{ 612 Tasks: []*Task{ 613 { 614 Name: "task-a", 615 Resources: &Resources{ 616 Networks: []*NetworkResource{ 617 { 618 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 619 }, 620 }, 621 }, 622 }, 623 { 624 Name: "task-b", 625 Resources: &Resources{ 626 Networks: []*NetworkResource{ 627 { 628 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 629 }, 630 }, 631 }, 632 }, 633 }, 634 } 635 err = tg.Validate(&Job{}) 636 expected := `Static port 123 already reserved by task-a:foo` 637 if !strings.Contains(err.Error(), expected) { 638 t.Errorf("expected %s but found: %v", expected, err) 639 } 640 641 tg = &TaskGroup{ 642 Tasks: []*Task{ 643 { 644 Name: "task-a", 645 Resources: &Resources{ 646 Networks: []*NetworkResource{ 647 { 648 ReservedPorts: []Port{ 649 {Label: "foo", Value: 123}, 650 {Label: "bar", Value: 123}, 651 }, 652 }, 653 }, 654 }, 655 }, 656 }, 657 } 658 err = tg.Validate(&Job{}) 659 expected = `Static port 123 already reserved by task-a:foo` 660 if !strings.Contains(err.Error(), expected) { 661 t.Errorf("expected %s but found: %v", expected, err) 662 } 663 664 tg = &TaskGroup{ 665 Name: "web", 666 Count: 1, 667 Tasks: []*Task{ 668 {Name: "web", Leader: true}, 669 {Name: "web", Leader: true}, 670 {}, 671 }, 672 RestartPolicy: &RestartPolicy{ 673 Interval: 5 * time.Minute, 674 Delay: 10 * time.Second, 675 Attempts: 10, 676 Mode: RestartPolicyModeDelay, 677 }, 678 ReschedulePolicy: &ReschedulePolicy{ 679 Interval: 5 * time.Minute, 680 Attempts: 10, 681 Delay: 5 * time.Second, 682 DelayFunction: "constant", 683 }, 684 } 685 686 err = tg.Validate(j) 687 mErr = err.(*multierror.Error) 688 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 689 t.Fatalf("err: %s", err) 690 } 691 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 692 t.Fatalf("err: %s", err) 693 } 694 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 695 t.Fatalf("err: %s", err) 696 } 697 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 698 t.Fatalf("err: %s", err) 699 } 700 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 701 t.Fatalf("err: %s", err) 702 } 703 704 tg = &TaskGroup{ 705 Name: "web", 706 Count: 1, 707 Tasks: []*Task{ 708 {Name: "web", Leader: true}, 709 }, 710 Update: DefaultUpdateStrategy.Copy(), 711 } 712 j.Type = JobTypeBatch 713 err = tg.Validate(j) 714 if !strings.Contains(err.Error(), "does not allow update block") { 715 t.Fatalf("err: %s", err) 716 } 717 718 tg = &TaskGroup{ 719 Count: -1, 720 RestartPolicy: &RestartPolicy{ 721 Interval: 5 * time.Minute, 722 Delay: 10 * time.Second, 723 Attempts: 10, 724 Mode: RestartPolicyModeDelay, 725 }, 726 ReschedulePolicy: &ReschedulePolicy{ 727 Interval: 5 * time.Minute, 728 Attempts: 5, 729 Delay: 5 * time.Second, 730 }, 731 } 732 j.Type = JobTypeSystem 733 err = tg.Validate(j) 734 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 735 t.Fatalf("err: %s", err) 736 } 737 } 738 739 func TestTask_Validate(t *testing.T) { 740 task := &Task{} 741 ephemeralDisk := DefaultEphemeralDisk() 742 err := task.Validate(ephemeralDisk) 743 mErr := err.(*multierror.Error) 744 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 745 t.Fatalf("err: %s", err) 746 } 747 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 748 t.Fatalf("err: %s", err) 749 } 750 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 751 t.Fatalf("err: %s", err) 752 } 753 754 task = &Task{Name: "web/foo"} 755 err = task.Validate(ephemeralDisk) 756 mErr = err.(*multierror.Error) 757 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 758 t.Fatalf("err: %s", err) 759 } 760 761 task = &Task{ 762 Name: "web", 763 Driver: "docker", 764 Resources: &Resources{ 765 CPU: 100, 766 MemoryMB: 100, 767 IOPS: 10, 768 }, 769 LogConfig: DefaultLogConfig(), 770 } 771 ephemeralDisk.SizeMB = 200 772 err = task.Validate(ephemeralDisk) 773 if err != nil { 774 t.Fatalf("err: %s", err) 775 } 776 777 task.Constraints = append(task.Constraints, 778 &Constraint{ 779 Operand: ConstraintDistinctHosts, 780 }, 781 &Constraint{ 782 Operand: ConstraintDistinctProperty, 783 LTarget: "${meta.rack}", 784 }) 785 786 err = task.Validate(ephemeralDisk) 787 mErr = err.(*multierror.Error) 788 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 789 t.Fatalf("err: %s", err) 790 } 791 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 792 t.Fatalf("err: %s", err) 793 } 794 } 795 796 func TestTask_Validate_Services(t *testing.T) { 797 s1 := &Service{ 798 Name: "service-name", 799 PortLabel: "bar", 800 Checks: []*ServiceCheck{ 801 { 802 Name: "check-name", 803 Type: ServiceCheckTCP, 804 Interval: 0 * time.Second, 805 }, 806 { 807 Name: "check-name", 808 Type: ServiceCheckTCP, 809 Timeout: 2 * time.Second, 810 }, 811 { 812 Name: "check-name", 813 Type: ServiceCheckTCP, 814 Interval: 1 * time.Second, 815 }, 816 }, 817 } 818 819 s2 := &Service{ 820 Name: "service-name", 821 PortLabel: "bar", 822 } 823 824 s3 := &Service{ 825 Name: "service-A", 826 PortLabel: "a", 827 } 828 s4 := &Service{ 829 Name: "service-A", 830 PortLabel: "b", 831 } 832 833 ephemeralDisk := DefaultEphemeralDisk() 834 ephemeralDisk.SizeMB = 200 835 task := &Task{ 836 Name: "web", 837 Driver: "docker", 838 Resources: &Resources{ 839 CPU: 100, 840 MemoryMB: 100, 841 IOPS: 10, 842 }, 843 Services: []*Service{s1, s2}, 844 } 845 846 task1 := &Task{ 847 Name: "web", 848 Driver: "docker", 849 Resources: DefaultResources(), 850 Services: []*Service{s3, s4}, 851 LogConfig: DefaultLogConfig(), 852 } 853 task1.Resources.Networks = []*NetworkResource{ 854 { 855 MBits: 10, 856 DynamicPorts: []Port{ 857 { 858 Label: "a", 859 Value: 1000, 860 }, 861 { 862 Label: "b", 863 Value: 2000, 864 }, 865 }, 866 }, 867 } 868 869 err := task.Validate(ephemeralDisk) 870 if err == nil { 871 t.Fatal("expected an error") 872 } 873 874 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 875 t.Fatalf("err: %v", err) 876 } 877 878 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 879 t.Fatalf("err: %v", err) 880 } 881 882 if !strings.Contains(err.Error(), "missing required value interval") { 883 t.Fatalf("err: %v", err) 884 } 885 886 if !strings.Contains(err.Error(), "cannot be less than") { 887 t.Fatalf("err: %v", err) 888 } 889 890 if err = task1.Validate(ephemeralDisk); err != nil { 891 t.Fatalf("err : %v", err) 892 } 893 } 894 895 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 896 ephemeralDisk := DefaultEphemeralDisk() 897 getTask := func(s *Service) *Task { 898 task := &Task{ 899 Name: "web", 900 Driver: "docker", 901 Resources: DefaultResources(), 902 Services: []*Service{s}, 903 LogConfig: DefaultLogConfig(), 904 } 905 task.Resources.Networks = []*NetworkResource{ 906 { 907 MBits: 10, 908 DynamicPorts: []Port{ 909 { 910 Label: "http", 911 Value: 80, 912 }, 913 }, 914 }, 915 } 916 return task 917 } 918 919 cases := []*Service{ 920 { 921 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 922 Name: "DriverModeWithLabel", 923 PortLabel: "http", 924 AddressMode: AddressModeDriver, 925 }, 926 { 927 Name: "DriverModeWithPort", 928 PortLabel: "80", 929 AddressMode: AddressModeDriver, 930 }, 931 { 932 Name: "HostModeWithLabel", 933 PortLabel: "http", 934 AddressMode: AddressModeHost, 935 }, 936 { 937 Name: "HostModeWithoutLabel", 938 AddressMode: AddressModeHost, 939 }, 940 { 941 Name: "DriverModeWithoutLabel", 942 AddressMode: AddressModeDriver, 943 }, 944 } 945 946 for _, service := range cases { 947 task := getTask(service) 948 t.Run(service.Name, func(t *testing.T) { 949 if err := task.Validate(ephemeralDisk); err != nil { 950 t.Fatalf("unexpected err: %v", err) 951 } 952 }) 953 } 954 } 955 956 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 957 ephemeralDisk := DefaultEphemeralDisk() 958 getTask := func(s *Service) *Task { 959 task := &Task{ 960 Name: "web", 961 Driver: "docker", 962 Resources: DefaultResources(), 963 Services: []*Service{s}, 964 LogConfig: DefaultLogConfig(), 965 } 966 task.Resources.Networks = []*NetworkResource{ 967 { 968 MBits: 10, 969 DynamicPorts: []Port{ 970 { 971 Label: "http", 972 Value: 80, 973 }, 974 }, 975 }, 976 } 977 return task 978 } 979 980 cases := []*Service{ 981 { 982 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 983 Name: "DriverModeWithLabel", 984 PortLabel: "asdf", 985 AddressMode: AddressModeDriver, 986 }, 987 { 988 Name: "HostModeWithLabel", 989 PortLabel: "asdf", 990 AddressMode: AddressModeHost, 991 }, 992 { 993 Name: "HostModeWithPort", 994 PortLabel: "80", 995 AddressMode: AddressModeHost, 996 }, 997 } 998 999 for _, service := range cases { 1000 task := getTask(service) 1001 t.Run(service.Name, func(t *testing.T) { 1002 err := task.Validate(ephemeralDisk) 1003 if err == nil { 1004 t.Fatalf("expected an error") 1005 } 1006 //t.Logf("err: %v", err) 1007 }) 1008 } 1009 } 1010 1011 func TestTask_Validate_Service_Check(t *testing.T) { 1012 1013 invalidCheck := ServiceCheck{ 1014 Name: "check-name", 1015 Command: "/bin/true", 1016 Type: ServiceCheckScript, 1017 Interval: 10 * time.Second, 1018 } 1019 1020 err := invalidCheck.validate() 1021 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1022 t.Fatalf("expected a timeout validation error but received: %q", err) 1023 } 1024 1025 check1 := ServiceCheck{ 1026 Name: "check-name", 1027 Type: ServiceCheckTCP, 1028 Interval: 10 * time.Second, 1029 Timeout: 2 * time.Second, 1030 } 1031 1032 if err := check1.validate(); err != nil { 1033 t.Fatalf("err: %v", err) 1034 } 1035 1036 check1.InitialStatus = "foo" 1037 err = check1.validate() 1038 if err == nil { 1039 t.Fatal("Expected an error") 1040 } 1041 1042 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1043 t.Fatalf("err: %v", err) 1044 } 1045 1046 check1.InitialStatus = api.HealthCritical 1047 err = check1.validate() 1048 if err != nil { 1049 t.Fatalf("err: %v", err) 1050 } 1051 1052 check1.InitialStatus = api.HealthPassing 1053 err = check1.validate() 1054 if err != nil { 1055 t.Fatalf("err: %v", err) 1056 } 1057 1058 check1.InitialStatus = "" 1059 err = check1.validate() 1060 if err != nil { 1061 t.Fatalf("err: %v", err) 1062 } 1063 1064 check2 := ServiceCheck{ 1065 Name: "check-name-2", 1066 Type: ServiceCheckHTTP, 1067 Interval: 10 * time.Second, 1068 Timeout: 2 * time.Second, 1069 Path: "/foo/bar", 1070 } 1071 1072 err = check2.validate() 1073 if err != nil { 1074 t.Fatalf("err: %v", err) 1075 } 1076 1077 check2.Path = "" 1078 err = check2.validate() 1079 if err == nil { 1080 t.Fatal("Expected an error") 1081 } 1082 if !strings.Contains(err.Error(), "valid http path") { 1083 t.Fatalf("err: %v", err) 1084 } 1085 1086 check2.Path = "http://www.example.com" 1087 err = check2.validate() 1088 if err == nil { 1089 t.Fatal("Expected an error") 1090 } 1091 if !strings.Contains(err.Error(), "relative http path") { 1092 t.Fatalf("err: %v", err) 1093 } 1094 } 1095 1096 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1097 // inherit address mode but do inherit ports. 1098 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1099 getTask := func(s *Service) *Task { 1100 return &Task{ 1101 Resources: &Resources{ 1102 Networks: []*NetworkResource{ 1103 { 1104 DynamicPorts: []Port{ 1105 { 1106 Label: "http", 1107 Value: 9999, 1108 }, 1109 }, 1110 }, 1111 }, 1112 }, 1113 Services: []*Service{s}, 1114 } 1115 } 1116 1117 cases := []struct { 1118 Service *Service 1119 ErrContains string 1120 }{ 1121 { 1122 Service: &Service{ 1123 Name: "invalid-driver", 1124 PortLabel: "80", 1125 AddressMode: "host", 1126 }, 1127 ErrContains: `port label "80" referenced`, 1128 }, 1129 { 1130 Service: &Service{ 1131 Name: "http-driver-fail-1", 1132 PortLabel: "80", 1133 AddressMode: "driver", 1134 Checks: []*ServiceCheck{ 1135 { 1136 Name: "invalid-check-1", 1137 Type: "tcp", 1138 Interval: time.Second, 1139 Timeout: time.Second, 1140 }, 1141 }, 1142 }, 1143 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1144 }, 1145 { 1146 Service: &Service{ 1147 Name: "http-driver-fail-2", 1148 PortLabel: "80", 1149 AddressMode: "driver", 1150 Checks: []*ServiceCheck{ 1151 { 1152 Name: "invalid-check-2", 1153 Type: "tcp", 1154 PortLabel: "80", 1155 Interval: time.Second, 1156 Timeout: time.Second, 1157 }, 1158 }, 1159 }, 1160 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1161 }, 1162 { 1163 Service: &Service{ 1164 Name: "http-driver-fail-3", 1165 PortLabel: "80", 1166 AddressMode: "driver", 1167 Checks: []*ServiceCheck{ 1168 { 1169 Name: "invalid-check-3", 1170 Type: "tcp", 1171 PortLabel: "missing-port-label", 1172 Interval: time.Second, 1173 Timeout: time.Second, 1174 }, 1175 }, 1176 }, 1177 ErrContains: `port label "missing-port-label" referenced`, 1178 }, 1179 { 1180 Service: &Service{ 1181 Name: "http-driver-passes", 1182 PortLabel: "80", 1183 AddressMode: "driver", 1184 Checks: []*ServiceCheck{ 1185 { 1186 Name: "valid-script-check", 1187 Type: "script", 1188 Command: "ok", 1189 Interval: time.Second, 1190 Timeout: time.Second, 1191 }, 1192 { 1193 Name: "valid-host-check", 1194 Type: "tcp", 1195 PortLabel: "http", 1196 Interval: time.Second, 1197 Timeout: time.Second, 1198 }, 1199 { 1200 Name: "valid-driver-check", 1201 Type: "tcp", 1202 AddressMode: "driver", 1203 Interval: time.Second, 1204 Timeout: time.Second, 1205 }, 1206 }, 1207 }, 1208 }, 1209 { 1210 Service: &Service{ 1211 Name: "empty-address-3673-passes-1", 1212 Checks: []*ServiceCheck{ 1213 { 1214 Name: "valid-port-label", 1215 Type: "tcp", 1216 PortLabel: "http", 1217 Interval: time.Second, 1218 Timeout: time.Second, 1219 }, 1220 { 1221 Name: "empty-is-ok", 1222 Type: "script", 1223 Command: "ok", 1224 Interval: time.Second, 1225 Timeout: time.Second, 1226 }, 1227 }, 1228 }, 1229 }, 1230 { 1231 Service: &Service{ 1232 Name: "empty-address-3673-passes-2", 1233 }, 1234 }, 1235 { 1236 Service: &Service{ 1237 Name: "empty-address-3673-fails", 1238 Checks: []*ServiceCheck{ 1239 { 1240 Name: "empty-is-not-ok", 1241 Type: "tcp", 1242 Interval: time.Second, 1243 Timeout: time.Second, 1244 }, 1245 }, 1246 }, 1247 ErrContains: `invalid: check requires a port but neither check nor service`, 1248 }, 1249 } 1250 1251 for _, tc := range cases { 1252 tc := tc 1253 task := getTask(tc.Service) 1254 t.Run(tc.Service.Name, func(t *testing.T) { 1255 err := validateServices(task) 1256 if err == nil && tc.ErrContains == "" { 1257 // Ok! 1258 return 1259 } 1260 if err == nil { 1261 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1262 } 1263 if !strings.Contains(err.Error(), tc.ErrContains) { 1264 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1265 } 1266 }) 1267 } 1268 } 1269 1270 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1271 invalidCheckRestart := &CheckRestart{ 1272 Limit: -1, 1273 Grace: -1, 1274 } 1275 1276 err := invalidCheckRestart.Validate() 1277 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1278 assert.Len(t, err.(*multierror.Error).Errors, 2) 1279 1280 validCheckRestart := &CheckRestart{} 1281 assert.Nil(t, validCheckRestart.Validate()) 1282 1283 validCheckRestart.Limit = 1 1284 validCheckRestart.Grace = 1 1285 assert.Nil(t, validCheckRestart.Validate()) 1286 } 1287 1288 func TestTask_Validate_LogConfig(t *testing.T) { 1289 task := &Task{ 1290 LogConfig: DefaultLogConfig(), 1291 } 1292 ephemeralDisk := &EphemeralDisk{ 1293 SizeMB: 1, 1294 } 1295 1296 err := task.Validate(ephemeralDisk) 1297 mErr := err.(*multierror.Error) 1298 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1299 t.Fatalf("err: %s", err) 1300 } 1301 } 1302 1303 func TestTask_Validate_Template(t *testing.T) { 1304 1305 bad := &Template{} 1306 task := &Task{ 1307 Templates: []*Template{bad}, 1308 } 1309 ephemeralDisk := &EphemeralDisk{ 1310 SizeMB: 1, 1311 } 1312 1313 err := task.Validate(ephemeralDisk) 1314 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1315 t.Fatalf("err: %s", err) 1316 } 1317 1318 // Have two templates that share the same destination 1319 good := &Template{ 1320 SourcePath: "foo", 1321 DestPath: "local/foo", 1322 ChangeMode: "noop", 1323 } 1324 1325 task.Templates = []*Template{good, good} 1326 err = task.Validate(ephemeralDisk) 1327 if !strings.Contains(err.Error(), "same destination as") { 1328 t.Fatalf("err: %s", err) 1329 } 1330 1331 // Env templates can't use signals 1332 task.Templates = []*Template{ 1333 { 1334 Envvars: true, 1335 ChangeMode: "signal", 1336 }, 1337 } 1338 1339 err = task.Validate(ephemeralDisk) 1340 if err == nil { 1341 t.Fatalf("expected error from Template.Validate") 1342 } 1343 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1344 t.Errorf("expected to find %q but found %v", expected, err) 1345 } 1346 } 1347 1348 func TestTemplate_Validate(t *testing.T) { 1349 cases := []struct { 1350 Tmpl *Template 1351 Fail bool 1352 ContainsErrs []string 1353 }{ 1354 { 1355 Tmpl: &Template{}, 1356 Fail: true, 1357 ContainsErrs: []string{ 1358 "specify a source path", 1359 "specify a destination", 1360 TemplateChangeModeInvalidError.Error(), 1361 }, 1362 }, 1363 { 1364 Tmpl: &Template{ 1365 Splay: -100, 1366 }, 1367 Fail: true, 1368 ContainsErrs: []string{ 1369 "positive splay", 1370 }, 1371 }, 1372 { 1373 Tmpl: &Template{ 1374 ChangeMode: "foo", 1375 }, 1376 Fail: true, 1377 ContainsErrs: []string{ 1378 TemplateChangeModeInvalidError.Error(), 1379 }, 1380 }, 1381 { 1382 Tmpl: &Template{ 1383 ChangeMode: "signal", 1384 }, 1385 Fail: true, 1386 ContainsErrs: []string{ 1387 "specify signal value", 1388 }, 1389 }, 1390 { 1391 Tmpl: &Template{ 1392 SourcePath: "foo", 1393 DestPath: "../../root", 1394 ChangeMode: "noop", 1395 }, 1396 Fail: true, 1397 ContainsErrs: []string{ 1398 "destination escapes", 1399 }, 1400 }, 1401 { 1402 Tmpl: &Template{ 1403 SourcePath: "foo", 1404 DestPath: "local/foo", 1405 ChangeMode: "noop", 1406 }, 1407 Fail: false, 1408 }, 1409 { 1410 Tmpl: &Template{ 1411 SourcePath: "foo", 1412 DestPath: "local/foo", 1413 ChangeMode: "noop", 1414 Perms: "0444", 1415 }, 1416 Fail: false, 1417 }, 1418 { 1419 Tmpl: &Template{ 1420 SourcePath: "foo", 1421 DestPath: "local/foo", 1422 ChangeMode: "noop", 1423 Perms: "zza", 1424 }, 1425 Fail: true, 1426 ContainsErrs: []string{ 1427 "as octal", 1428 }, 1429 }, 1430 } 1431 1432 for i, c := range cases { 1433 err := c.Tmpl.Validate() 1434 if err != nil { 1435 if !c.Fail { 1436 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 1437 } 1438 1439 e := err.Error() 1440 for _, exp := range c.ContainsErrs { 1441 if !strings.Contains(e, exp) { 1442 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 1443 } 1444 } 1445 } else if c.Fail { 1446 t.Fatalf("Case %d: should have failed: %v", i+1, err) 1447 } 1448 } 1449 } 1450 1451 func TestConstraint_Validate(t *testing.T) { 1452 c := &Constraint{} 1453 err := c.Validate() 1454 mErr := err.(*multierror.Error) 1455 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 1456 t.Fatalf("err: %s", err) 1457 } 1458 1459 c = &Constraint{ 1460 LTarget: "$attr.kernel.name", 1461 RTarget: "linux", 1462 Operand: "=", 1463 } 1464 err = c.Validate() 1465 if err != nil { 1466 t.Fatalf("err: %v", err) 1467 } 1468 1469 // Perform additional regexp validation 1470 c.Operand = ConstraintRegex 1471 c.RTarget = "(foo" 1472 err = c.Validate() 1473 mErr = err.(*multierror.Error) 1474 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 1475 t.Fatalf("err: %s", err) 1476 } 1477 1478 // Perform version validation 1479 c.Operand = ConstraintVersion 1480 c.RTarget = "~> foo" 1481 err = c.Validate() 1482 mErr = err.(*multierror.Error) 1483 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 1484 t.Fatalf("err: %s", err) 1485 } 1486 1487 // Perform distinct_property validation 1488 c.Operand = ConstraintDistinctProperty 1489 c.RTarget = "0" 1490 err = c.Validate() 1491 mErr = err.(*multierror.Error) 1492 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 1493 t.Fatalf("err: %s", err) 1494 } 1495 1496 c.RTarget = "-1" 1497 err = c.Validate() 1498 mErr = err.(*multierror.Error) 1499 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 1500 t.Fatalf("err: %s", err) 1501 } 1502 1503 // Perform distinct_hosts validation 1504 c.Operand = ConstraintDistinctHosts 1505 c.LTarget = "" 1506 c.RTarget = "" 1507 if err := c.Validate(); err != nil { 1508 t.Fatalf("expected valid constraint: %v", err) 1509 } 1510 1511 // Perform set_contains validation 1512 c.Operand = ConstraintSetContains 1513 c.RTarget = "" 1514 err = c.Validate() 1515 mErr = err.(*multierror.Error) 1516 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 1517 t.Fatalf("err: %s", err) 1518 } 1519 1520 // Perform LTarget validation 1521 c.Operand = ConstraintRegex 1522 c.RTarget = "foo" 1523 c.LTarget = "" 1524 err = c.Validate() 1525 mErr = err.(*multierror.Error) 1526 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 1527 t.Fatalf("err: %s", err) 1528 } 1529 1530 // Perform constraint type validation 1531 c.Operand = "foo" 1532 err = c.Validate() 1533 mErr = err.(*multierror.Error) 1534 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 1535 t.Fatalf("err: %s", err) 1536 } 1537 } 1538 1539 func TestUpdateStrategy_Validate(t *testing.T) { 1540 u := &UpdateStrategy{ 1541 MaxParallel: 0, 1542 HealthCheck: "foo", 1543 MinHealthyTime: -10, 1544 HealthyDeadline: -15, 1545 AutoRevert: false, 1546 Canary: -1, 1547 } 1548 1549 err := u.Validate() 1550 mErr := err.(*multierror.Error) 1551 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 1552 t.Fatalf("err: %s", err) 1553 } 1554 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") { 1555 t.Fatalf("err: %s", err) 1556 } 1557 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 1558 t.Fatalf("err: %s", err) 1559 } 1560 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 1561 t.Fatalf("err: %s", err) 1562 } 1563 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 1564 t.Fatalf("err: %s", err) 1565 } 1566 if !strings.Contains(mErr.Errors[5].Error(), "Minimum healthy time must be less than healthy deadline") { 1567 t.Fatalf("err: %s", err) 1568 } 1569 } 1570 1571 func TestResource_NetIndex(t *testing.T) { 1572 r := &Resources{ 1573 Networks: []*NetworkResource{ 1574 {Device: "eth0"}, 1575 {Device: "lo0"}, 1576 {Device: ""}, 1577 }, 1578 } 1579 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 1580 t.Fatalf("Bad: %d", idx) 1581 } 1582 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 1583 t.Fatalf("Bad: %d", idx) 1584 } 1585 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 1586 t.Fatalf("Bad: %d", idx) 1587 } 1588 } 1589 1590 func TestResource_Superset(t *testing.T) { 1591 r1 := &Resources{ 1592 CPU: 2000, 1593 MemoryMB: 2048, 1594 DiskMB: 10000, 1595 IOPS: 100, 1596 } 1597 r2 := &Resources{ 1598 CPU: 2000, 1599 MemoryMB: 1024, 1600 DiskMB: 5000, 1601 IOPS: 50, 1602 } 1603 1604 if s, _ := r1.Superset(r1); !s { 1605 t.Fatalf("bad") 1606 } 1607 if s, _ := r1.Superset(r2); !s { 1608 t.Fatalf("bad") 1609 } 1610 if s, _ := r2.Superset(r1); s { 1611 t.Fatalf("bad") 1612 } 1613 if s, _ := r2.Superset(r2); !s { 1614 t.Fatalf("bad") 1615 } 1616 } 1617 1618 func TestResource_Add(t *testing.T) { 1619 r1 := &Resources{ 1620 CPU: 2000, 1621 MemoryMB: 2048, 1622 DiskMB: 10000, 1623 IOPS: 100, 1624 Networks: []*NetworkResource{ 1625 { 1626 CIDR: "10.0.0.0/8", 1627 MBits: 100, 1628 ReservedPorts: []Port{{"ssh", 22}}, 1629 }, 1630 }, 1631 } 1632 r2 := &Resources{ 1633 CPU: 2000, 1634 MemoryMB: 1024, 1635 DiskMB: 5000, 1636 IOPS: 50, 1637 Networks: []*NetworkResource{ 1638 { 1639 IP: "10.0.0.1", 1640 MBits: 50, 1641 ReservedPorts: []Port{{"web", 80}}, 1642 }, 1643 }, 1644 } 1645 1646 err := r1.Add(r2) 1647 if err != nil { 1648 t.Fatalf("Err: %v", err) 1649 } 1650 1651 expect := &Resources{ 1652 CPU: 3000, 1653 MemoryMB: 3072, 1654 DiskMB: 15000, 1655 IOPS: 150, 1656 Networks: []*NetworkResource{ 1657 { 1658 CIDR: "10.0.0.0/8", 1659 MBits: 150, 1660 ReservedPorts: []Port{{"ssh", 22}, {"web", 80}}, 1661 }, 1662 }, 1663 } 1664 1665 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1666 t.Fatalf("bad: %#v %#v", expect, r1) 1667 } 1668 } 1669 1670 func TestResource_Add_Network(t *testing.T) { 1671 r1 := &Resources{} 1672 r2 := &Resources{ 1673 Networks: []*NetworkResource{ 1674 { 1675 MBits: 50, 1676 DynamicPorts: []Port{{"http", 0}, {"https", 0}}, 1677 }, 1678 }, 1679 } 1680 r3 := &Resources{ 1681 Networks: []*NetworkResource{ 1682 { 1683 MBits: 25, 1684 DynamicPorts: []Port{{"admin", 0}}, 1685 }, 1686 }, 1687 } 1688 1689 err := r1.Add(r2) 1690 if err != nil { 1691 t.Fatalf("Err: %v", err) 1692 } 1693 err = r1.Add(r3) 1694 if err != nil { 1695 t.Fatalf("Err: %v", err) 1696 } 1697 1698 expect := &Resources{ 1699 Networks: []*NetworkResource{ 1700 { 1701 MBits: 75, 1702 DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}}, 1703 }, 1704 }, 1705 } 1706 1707 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1708 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 1709 } 1710 } 1711 1712 func TestEncodeDecode(t *testing.T) { 1713 type FooRequest struct { 1714 Foo string 1715 Bar int 1716 Baz bool 1717 } 1718 arg := &FooRequest{ 1719 Foo: "test", 1720 Bar: 42, 1721 Baz: true, 1722 } 1723 buf, err := Encode(1, arg) 1724 if err != nil { 1725 t.Fatalf("err: %v", err) 1726 } 1727 1728 var out FooRequest 1729 err = Decode(buf[1:], &out) 1730 if err != nil { 1731 t.Fatalf("err: %v", err) 1732 } 1733 1734 if !reflect.DeepEqual(arg, &out) { 1735 t.Fatalf("bad: %#v %#v", arg, out) 1736 } 1737 } 1738 1739 func BenchmarkEncodeDecode(b *testing.B) { 1740 job := testJob() 1741 1742 for i := 0; i < b.N; i++ { 1743 buf, err := Encode(1, job) 1744 if err != nil { 1745 b.Fatalf("err: %v", err) 1746 } 1747 1748 var out Job 1749 err = Decode(buf[1:], &out) 1750 if err != nil { 1751 b.Fatalf("err: %v", err) 1752 } 1753 } 1754 } 1755 1756 func TestInvalidServiceCheck(t *testing.T) { 1757 s := Service{ 1758 Name: "service-name", 1759 PortLabel: "bar", 1760 Checks: []*ServiceCheck{ 1761 { 1762 Name: "check-name", 1763 Type: "lol", 1764 }, 1765 }, 1766 } 1767 if err := s.Validate(); err == nil { 1768 t.Fatalf("Service should be invalid (invalid type)") 1769 } 1770 1771 s = Service{ 1772 Name: "service.name", 1773 PortLabel: "bar", 1774 } 1775 if err := s.ValidateName(s.Name); err == nil { 1776 t.Fatalf("Service should be invalid (contains a dot): %v", err) 1777 } 1778 1779 s = Service{ 1780 Name: "-my-service", 1781 PortLabel: "bar", 1782 } 1783 if err := s.Validate(); err == nil { 1784 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 1785 } 1786 1787 s = Service{ 1788 Name: "my-service-${NOMAD_META_FOO}", 1789 PortLabel: "bar", 1790 } 1791 if err := s.Validate(); err != nil { 1792 t.Fatalf("Service should be valid: %v", err) 1793 } 1794 1795 s = Service{ 1796 Name: "my_service-${NOMAD_META_FOO}", 1797 PortLabel: "bar", 1798 } 1799 if err := s.Validate(); err == nil { 1800 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 1801 } 1802 1803 s = Service{ 1804 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 1805 PortLabel: "bar", 1806 } 1807 if err := s.ValidateName(s.Name); err == nil { 1808 t.Fatalf("Service should be invalid (too long): %v", err) 1809 } 1810 1811 s = Service{ 1812 Name: "service-name", 1813 Checks: []*ServiceCheck{ 1814 { 1815 Name: "check-tcp", 1816 Type: ServiceCheckTCP, 1817 Interval: 5 * time.Second, 1818 Timeout: 2 * time.Second, 1819 }, 1820 { 1821 Name: "check-http", 1822 Type: ServiceCheckHTTP, 1823 Path: "/foo", 1824 Interval: 5 * time.Second, 1825 Timeout: 2 * time.Second, 1826 }, 1827 }, 1828 } 1829 if err := s.Validate(); err == nil { 1830 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 1831 } 1832 1833 s = Service{ 1834 Name: "service-name", 1835 Checks: []*ServiceCheck{ 1836 { 1837 Name: "check-script", 1838 Type: ServiceCheckScript, 1839 Command: "/bin/date", 1840 Interval: 5 * time.Second, 1841 Timeout: 2 * time.Second, 1842 }, 1843 }, 1844 } 1845 if err := s.Validate(); err != nil { 1846 t.Fatalf("un-expected error: %v", err) 1847 } 1848 } 1849 1850 func TestDistinctCheckID(t *testing.T) { 1851 c1 := ServiceCheck{ 1852 Name: "web-health", 1853 Type: "http", 1854 Path: "/health", 1855 Interval: 2 * time.Second, 1856 Timeout: 3 * time.Second, 1857 } 1858 c2 := ServiceCheck{ 1859 Name: "web-health", 1860 Type: "http", 1861 Path: "/health1", 1862 Interval: 2 * time.Second, 1863 Timeout: 3 * time.Second, 1864 } 1865 1866 c3 := ServiceCheck{ 1867 Name: "web-health", 1868 Type: "http", 1869 Path: "/health", 1870 Interval: 4 * time.Second, 1871 Timeout: 3 * time.Second, 1872 } 1873 serviceID := "123" 1874 c1Hash := c1.Hash(serviceID) 1875 c2Hash := c2.Hash(serviceID) 1876 c3Hash := c3.Hash(serviceID) 1877 1878 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 1879 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 1880 } 1881 1882 } 1883 1884 func TestService_Canonicalize(t *testing.T) { 1885 job := "example" 1886 taskGroup := "cache" 1887 task := "redis" 1888 1889 s := Service{ 1890 Name: "${TASK}-db", 1891 } 1892 1893 s.Canonicalize(job, taskGroup, task) 1894 if s.Name != "redis-db" { 1895 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 1896 } 1897 1898 s.Name = "db" 1899 s.Canonicalize(job, taskGroup, task) 1900 if s.Name != "db" { 1901 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 1902 } 1903 1904 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 1905 s.Canonicalize(job, taskGroup, task) 1906 if s.Name != "example-cache-redis-db" { 1907 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 1908 } 1909 1910 s.Name = "${BASE}-db" 1911 s.Canonicalize(job, taskGroup, task) 1912 if s.Name != "example-cache-redis-db" { 1913 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 1914 } 1915 1916 } 1917 1918 func TestJob_ExpandServiceNames(t *testing.T) { 1919 j := &Job{ 1920 Name: "my-job", 1921 TaskGroups: []*TaskGroup{ 1922 { 1923 Name: "web", 1924 Tasks: []*Task{ 1925 { 1926 Name: "frontend", 1927 Services: []*Service{ 1928 { 1929 Name: "${BASE}-default", 1930 }, 1931 { 1932 Name: "jmx", 1933 }, 1934 }, 1935 }, 1936 }, 1937 }, 1938 { 1939 Name: "admin", 1940 Tasks: []*Task{ 1941 { 1942 Name: "admin-web", 1943 }, 1944 }, 1945 }, 1946 }, 1947 } 1948 1949 j.Canonicalize() 1950 1951 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 1952 if service1Name != "my-job-web-frontend-default" { 1953 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 1954 } 1955 1956 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 1957 if service2Name != "jmx" { 1958 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 1959 } 1960 1961 } 1962 1963 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 1964 // Create a config that is enabled but with no interval specified. 1965 p := &PeriodicConfig{Enabled: true} 1966 if err := p.Validate(); err == nil { 1967 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 1968 } 1969 1970 // Create a config that is enabled, with a spec but no type specified. 1971 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 1972 if err := p.Validate(); err == nil { 1973 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 1974 } 1975 1976 // Create a config that is enabled, with a spec type but no spec specified. 1977 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 1978 if err := p.Validate(); err == nil { 1979 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 1980 } 1981 1982 // Create a config that is enabled, with a bad time zone. 1983 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 1984 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 1985 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 1986 } 1987 } 1988 1989 func TestPeriodicConfig_InvalidCron(t *testing.T) { 1990 specs := []string{"foo", "* *", "@foo"} 1991 for _, spec := range specs { 1992 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 1993 p.Canonicalize() 1994 if err := p.Validate(); err == nil { 1995 t.Fatal("Invalid cron spec") 1996 } 1997 } 1998 } 1999 2000 func TestPeriodicConfig_ValidCron(t *testing.T) { 2001 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2002 for _, spec := range specs { 2003 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2004 p.Canonicalize() 2005 if err := p.Validate(); err != nil { 2006 t.Fatal("Passed valid cron") 2007 } 2008 } 2009 } 2010 2011 func TestPeriodicConfig_NextCron(t *testing.T) { 2012 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2013 specs := []string{"0 0 29 2 * 1980", "*/5 * * * *"} 2014 expected := []time.Time{{}, time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC)} 2015 for i, spec := range specs { 2016 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2017 p.Canonicalize() 2018 n := p.Next(from) 2019 if expected[i] != n { 2020 t.Fatalf("Next(%v) returned %v; want %v", from, n, expected[i]) 2021 } 2022 } 2023 } 2024 2025 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2026 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2027 for _, zone := range zones { 2028 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2029 p.Canonicalize() 2030 if err := p.Validate(); err != nil { 2031 t.Fatalf("Valid tz errored: %v", err) 2032 } 2033 } 2034 } 2035 2036 func TestPeriodicConfig_DST(t *testing.T) { 2037 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2038 p := &PeriodicConfig{ 2039 Enabled: true, 2040 SpecType: PeriodicSpecCron, 2041 Spec: "0 2 11-12 3 * 2017", 2042 TimeZone: "America/Los_Angeles", 2043 } 2044 p.Canonicalize() 2045 2046 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2047 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2048 2049 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2050 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2051 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2052 2053 n1 := p.Next(t1).UTC() 2054 n2 := p.Next(t2).UTC() 2055 2056 if !reflect.DeepEqual(e1, n1) { 2057 t.Fatalf("Got %v; want %v", n1, e1) 2058 } 2059 if !reflect.DeepEqual(e2, n2) { 2060 t.Fatalf("Got %v; want %v", n1, e1) 2061 } 2062 } 2063 2064 func TestRestartPolicy_Validate(t *testing.T) { 2065 // Policy with acceptable restart options passes 2066 p := &RestartPolicy{ 2067 Mode: RestartPolicyModeFail, 2068 Attempts: 0, 2069 Interval: 5 * time.Second, 2070 } 2071 if err := p.Validate(); err != nil { 2072 t.Fatalf("err: %v", err) 2073 } 2074 2075 // Policy with ambiguous restart options fails 2076 p = &RestartPolicy{ 2077 Mode: RestartPolicyModeDelay, 2078 Attempts: 0, 2079 Interval: 5 * time.Second, 2080 } 2081 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 2082 t.Fatalf("expect ambiguity error, got: %v", err) 2083 } 2084 2085 // Bad policy mode fails 2086 p = &RestartPolicy{ 2087 Mode: "nope", 2088 Attempts: 1, 2089 Interval: 5 * time.Second, 2090 } 2091 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 2092 t.Fatalf("expect mode error, got: %v", err) 2093 } 2094 2095 // Fails when attempts*delay does not fit inside interval 2096 p = &RestartPolicy{ 2097 Mode: RestartPolicyModeDelay, 2098 Attempts: 3, 2099 Delay: 5 * time.Second, 2100 Interval: 5 * time.Second, 2101 } 2102 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 2103 t.Fatalf("expect restart interval error, got: %v", err) 2104 } 2105 2106 // Fails when interval is to small 2107 p = &RestartPolicy{ 2108 Mode: RestartPolicyModeDelay, 2109 Attempts: 3, 2110 Delay: 5 * time.Second, 2111 Interval: 2 * time.Second, 2112 } 2113 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 2114 t.Fatalf("expect interval too small error, got: %v", err) 2115 } 2116 } 2117 2118 func TestReschedulePolicy_Validate(t *testing.T) { 2119 type testCase struct { 2120 desc string 2121 ReschedulePolicy *ReschedulePolicy 2122 errors []error 2123 } 2124 2125 testCases := []testCase{ 2126 { 2127 desc: "Nil", 2128 }, 2129 { 2130 desc: "Disabled", 2131 ReschedulePolicy: &ReschedulePolicy{ 2132 Attempts: 0, 2133 Interval: 0 * time.Second}, 2134 }, 2135 { 2136 desc: "Disabled", 2137 ReschedulePolicy: &ReschedulePolicy{ 2138 Attempts: -1, 2139 Interval: 5 * time.Minute}, 2140 }, 2141 { 2142 desc: "Valid Linear Delay", 2143 ReschedulePolicy: &ReschedulePolicy{ 2144 Attempts: 1, 2145 Interval: 5 * time.Minute, 2146 Delay: 10 * time.Second, 2147 DelayFunction: "constant"}, 2148 }, 2149 { 2150 desc: "Valid Exponential Delay", 2151 ReschedulePolicy: &ReschedulePolicy{ 2152 Attempts: 5, 2153 Interval: 1 * time.Hour, 2154 Delay: 30 * time.Second, 2155 MaxDelay: 5 * time.Minute, 2156 DelayFunction: "exponential"}, 2157 }, 2158 { 2159 desc: "Valid Fibonacci Delay", 2160 ReschedulePolicy: &ReschedulePolicy{ 2161 Attempts: 5, 2162 Interval: 15 * time.Minute, 2163 Delay: 10 * time.Second, 2164 MaxDelay: 5 * time.Minute, 2165 DelayFunction: "fibonacci"}, 2166 }, 2167 { 2168 desc: "Invalid delay function", 2169 ReschedulePolicy: &ReschedulePolicy{ 2170 Attempts: 1, 2171 Interval: 1 * time.Second, 2172 DelayFunction: "blah"}, 2173 errors: []error{ 2174 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2175 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2176 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 2177 }, 2178 }, 2179 { 2180 desc: "Invalid delay ceiling", 2181 ReschedulePolicy: &ReschedulePolicy{ 2182 Attempts: 1, 2183 Interval: 8 * time.Second, 2184 DelayFunction: "exponential", 2185 Delay: 15 * time.Second, 2186 MaxDelay: 5 * time.Second}, 2187 errors: []error{ 2188 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 2189 15*time.Second, 5*time.Second), 2190 }, 2191 }, 2192 { 2193 desc: "Invalid delay and interval", 2194 ReschedulePolicy: &ReschedulePolicy{ 2195 Attempts: 1, 2196 Interval: 1 * time.Second, 2197 DelayFunction: "constant"}, 2198 errors: []error{ 2199 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2200 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2201 }, 2202 }, { 2203 // Should suggest 2h40m as the interval 2204 desc: "Invalid Attempts - linear delay", 2205 ReschedulePolicy: &ReschedulePolicy{ 2206 Attempts: 10, 2207 Interval: 1 * time.Hour, 2208 Delay: 20 * time.Minute, 2209 DelayFunction: "constant", 2210 }, 2211 errors: []error{ 2212 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 2213 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 2214 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2215 200*time.Minute, 10), 2216 }, 2217 }, 2218 { 2219 // Should suggest 4h40m as the interval 2220 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 2221 desc: "Invalid Attempts - exponential delay", 2222 ReschedulePolicy: &ReschedulePolicy{ 2223 Attempts: 10, 2224 Interval: 30 * time.Minute, 2225 Delay: 5 * time.Minute, 2226 MaxDelay: 40 * time.Minute, 2227 DelayFunction: "exponential", 2228 }, 2229 errors: []error{ 2230 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2231 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 2232 "exponential", 40*time.Minute), 2233 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2234 280*time.Minute, 10), 2235 }, 2236 }, 2237 { 2238 // Should suggest 8h as the interval 2239 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 2240 desc: "Invalid Attempts - fibonacci delay", 2241 ReschedulePolicy: &ReschedulePolicy{ 2242 Attempts: 10, 2243 Interval: 1 * time.Hour, 2244 Delay: 20 * time.Minute, 2245 MaxDelay: 80 * time.Minute, 2246 DelayFunction: "fibonacci", 2247 }, 2248 errors: []error{ 2249 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2250 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 2251 "fibonacci", 80*time.Minute), 2252 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2253 480*time.Minute, 10), 2254 }, 2255 }, 2256 { 2257 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 2258 ReschedulePolicy: &ReschedulePolicy{ 2259 Attempts: 1, 2260 Unlimited: true, 2261 DelayFunction: "exponential", 2262 Delay: 5 * time.Minute, 2263 MaxDelay: 1 * time.Hour, 2264 }, 2265 errors: []error{ 2266 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 2267 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 2268 }, 2269 }, 2270 { 2271 desc: "Invalid Unlimited config", 2272 ReschedulePolicy: &ReschedulePolicy{ 2273 Attempts: 1, 2274 Interval: 1 * time.Second, 2275 Unlimited: true, 2276 DelayFunction: "exponential", 2277 }, 2278 errors: []error{ 2279 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2280 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2281 }, 2282 }, 2283 { 2284 desc: "Valid Unlimited config", 2285 ReschedulePolicy: &ReschedulePolicy{ 2286 Unlimited: true, 2287 DelayFunction: "exponential", 2288 Delay: 5 * time.Second, 2289 MaxDelay: 1 * time.Hour, 2290 }, 2291 }, 2292 } 2293 2294 for _, tc := range testCases { 2295 t.Run(tc.desc, func(t *testing.T) { 2296 require := require.New(t) 2297 gotErr := tc.ReschedulePolicy.Validate() 2298 if tc.errors != nil { 2299 // Validate all errors 2300 for _, err := range tc.errors { 2301 require.Contains(gotErr.Error(), err.Error()) 2302 } 2303 } else { 2304 require.Nil(gotErr) 2305 } 2306 }) 2307 } 2308 } 2309 2310 func TestAllocation_Index(t *testing.T) { 2311 a1 := Allocation{ 2312 Name: "example.cache[1]", 2313 TaskGroup: "cache", 2314 JobID: "example", 2315 Job: &Job{ 2316 ID: "example", 2317 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 2318 } 2319 e1 := uint(1) 2320 a2 := a1.Copy() 2321 a2.Name = "example.cache[713127]" 2322 e2 := uint(713127) 2323 2324 if a1.Index() != e1 || a2.Index() != e2 { 2325 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 2326 } 2327 } 2328 2329 func TestTaskArtifact_Validate_Source(t *testing.T) { 2330 valid := &TaskArtifact{GetterSource: "google.com"} 2331 if err := valid.Validate(); err != nil { 2332 t.Fatalf("unexpected error: %v", err) 2333 } 2334 } 2335 2336 func TestTaskArtifact_Validate_Dest(t *testing.T) { 2337 valid := &TaskArtifact{GetterSource: "google.com"} 2338 if err := valid.Validate(); err != nil { 2339 t.Fatalf("unexpected error: %v", err) 2340 } 2341 2342 valid.RelativeDest = "local/" 2343 if err := valid.Validate(); err != nil { 2344 t.Fatalf("unexpected error: %v", err) 2345 } 2346 2347 valid.RelativeDest = "local/.." 2348 if err := valid.Validate(); err != nil { 2349 t.Fatalf("unexpected error: %v", err) 2350 } 2351 2352 valid.RelativeDest = "local/../../.." 2353 if err := valid.Validate(); err == nil { 2354 t.Fatalf("expected error: %v", err) 2355 } 2356 } 2357 2358 func TestAllocation_ShouldMigrate(t *testing.T) { 2359 alloc := Allocation{ 2360 PreviousAllocation: "123", 2361 TaskGroup: "foo", 2362 Job: &Job{ 2363 TaskGroups: []*TaskGroup{ 2364 { 2365 Name: "foo", 2366 EphemeralDisk: &EphemeralDisk{ 2367 Migrate: true, 2368 Sticky: true, 2369 }, 2370 }, 2371 }, 2372 }, 2373 } 2374 2375 if !alloc.ShouldMigrate() { 2376 t.Fatalf("bad: %v", alloc) 2377 } 2378 2379 alloc1 := Allocation{ 2380 PreviousAllocation: "123", 2381 TaskGroup: "foo", 2382 Job: &Job{ 2383 TaskGroups: []*TaskGroup{ 2384 { 2385 Name: "foo", 2386 EphemeralDisk: &EphemeralDisk{}, 2387 }, 2388 }, 2389 }, 2390 } 2391 2392 if alloc1.ShouldMigrate() { 2393 t.Fatalf("bad: %v", alloc) 2394 } 2395 2396 alloc2 := Allocation{ 2397 PreviousAllocation: "123", 2398 TaskGroup: "foo", 2399 Job: &Job{ 2400 TaskGroups: []*TaskGroup{ 2401 { 2402 Name: "foo", 2403 EphemeralDisk: &EphemeralDisk{ 2404 Sticky: false, 2405 Migrate: true, 2406 }, 2407 }, 2408 }, 2409 }, 2410 } 2411 2412 if alloc2.ShouldMigrate() { 2413 t.Fatalf("bad: %v", alloc) 2414 } 2415 2416 alloc3 := Allocation{ 2417 PreviousAllocation: "123", 2418 TaskGroup: "foo", 2419 Job: &Job{ 2420 TaskGroups: []*TaskGroup{ 2421 { 2422 Name: "foo", 2423 }, 2424 }, 2425 }, 2426 } 2427 2428 if alloc3.ShouldMigrate() { 2429 t.Fatalf("bad: %v", alloc) 2430 } 2431 2432 // No previous 2433 alloc4 := Allocation{ 2434 TaskGroup: "foo", 2435 Job: &Job{ 2436 TaskGroups: []*TaskGroup{ 2437 { 2438 Name: "foo", 2439 EphemeralDisk: &EphemeralDisk{ 2440 Migrate: true, 2441 Sticky: true, 2442 }, 2443 }, 2444 }, 2445 }, 2446 } 2447 2448 if alloc4.ShouldMigrate() { 2449 t.Fatalf("bad: %v", alloc4) 2450 } 2451 } 2452 2453 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 2454 cases := []struct { 2455 Input *TaskArtifact 2456 Err bool 2457 }{ 2458 { 2459 &TaskArtifact{ 2460 GetterSource: "foo.com", 2461 GetterOptions: map[string]string{ 2462 "checksum": "no-type", 2463 }, 2464 }, 2465 true, 2466 }, 2467 { 2468 &TaskArtifact{ 2469 GetterSource: "foo.com", 2470 GetterOptions: map[string]string{ 2471 "checksum": "md5:toosmall", 2472 }, 2473 }, 2474 true, 2475 }, 2476 { 2477 &TaskArtifact{ 2478 GetterSource: "foo.com", 2479 GetterOptions: map[string]string{ 2480 "checksum": "invalid:type", 2481 }, 2482 }, 2483 true, 2484 }, 2485 } 2486 2487 for i, tc := range cases { 2488 err := tc.Input.Validate() 2489 if (err != nil) != tc.Err { 2490 t.Fatalf("case %d: %v", i, err) 2491 continue 2492 } 2493 } 2494 } 2495 2496 func TestAllocation_Terminated(t *testing.T) { 2497 type desiredState struct { 2498 ClientStatus string 2499 DesiredStatus string 2500 Terminated bool 2501 } 2502 2503 harness := []desiredState{ 2504 { 2505 ClientStatus: AllocClientStatusPending, 2506 DesiredStatus: AllocDesiredStatusStop, 2507 Terminated: false, 2508 }, 2509 { 2510 ClientStatus: AllocClientStatusRunning, 2511 DesiredStatus: AllocDesiredStatusStop, 2512 Terminated: false, 2513 }, 2514 { 2515 ClientStatus: AllocClientStatusFailed, 2516 DesiredStatus: AllocDesiredStatusStop, 2517 Terminated: true, 2518 }, 2519 { 2520 ClientStatus: AllocClientStatusFailed, 2521 DesiredStatus: AllocDesiredStatusRun, 2522 Terminated: true, 2523 }, 2524 } 2525 2526 for _, state := range harness { 2527 alloc := Allocation{} 2528 alloc.DesiredStatus = state.DesiredStatus 2529 alloc.ClientStatus = state.ClientStatus 2530 if alloc.Terminated() != state.Terminated { 2531 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 2532 } 2533 } 2534 } 2535 2536 func TestAllocation_ShouldReschedule(t *testing.T) { 2537 type testCase struct { 2538 Desc string 2539 FailTime time.Time 2540 ClientStatus string 2541 DesiredStatus string 2542 ReschedulePolicy *ReschedulePolicy 2543 RescheduleTrackers []*RescheduleEvent 2544 ShouldReschedule bool 2545 } 2546 2547 fail := time.Now() 2548 2549 harness := []testCase{ 2550 { 2551 Desc: "Reschedule when desired state is stop", 2552 ClientStatus: AllocClientStatusPending, 2553 DesiredStatus: AllocDesiredStatusStop, 2554 FailTime: fail, 2555 ReschedulePolicy: nil, 2556 ShouldReschedule: false, 2557 }, 2558 { 2559 Desc: "Disabled rescheduling", 2560 ClientStatus: AllocClientStatusFailed, 2561 DesiredStatus: AllocDesiredStatusRun, 2562 FailTime: fail, 2563 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 2564 ShouldReschedule: false, 2565 }, 2566 { 2567 Desc: "Reschedule when client status is complete", 2568 ClientStatus: AllocClientStatusComplete, 2569 DesiredStatus: AllocDesiredStatusRun, 2570 FailTime: fail, 2571 ReschedulePolicy: nil, 2572 ShouldReschedule: false, 2573 }, 2574 { 2575 Desc: "Reschedule with nil reschedule policy", 2576 ClientStatus: AllocClientStatusFailed, 2577 DesiredStatus: AllocDesiredStatusRun, 2578 FailTime: fail, 2579 ReschedulePolicy: nil, 2580 ShouldReschedule: false, 2581 }, 2582 { 2583 Desc: "Reschedule with unlimited and attempts >0", 2584 ClientStatus: AllocClientStatusFailed, 2585 DesiredStatus: AllocDesiredStatusRun, 2586 FailTime: fail, 2587 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 2588 ShouldReschedule: true, 2589 }, 2590 { 2591 Desc: "Reschedule when client status is complete", 2592 ClientStatus: AllocClientStatusComplete, 2593 DesiredStatus: AllocDesiredStatusRun, 2594 FailTime: fail, 2595 ReschedulePolicy: nil, 2596 ShouldReschedule: false, 2597 }, 2598 { 2599 Desc: "Reschedule with policy when client status complete", 2600 ClientStatus: AllocClientStatusComplete, 2601 DesiredStatus: AllocDesiredStatusRun, 2602 FailTime: fail, 2603 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 2604 ShouldReschedule: false, 2605 }, 2606 { 2607 Desc: "Reschedule with no previous attempts", 2608 ClientStatus: AllocClientStatusFailed, 2609 DesiredStatus: AllocDesiredStatusRun, 2610 FailTime: fail, 2611 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 2612 ShouldReschedule: true, 2613 }, 2614 { 2615 Desc: "Reschedule with leftover attempts", 2616 ClientStatus: AllocClientStatusFailed, 2617 DesiredStatus: AllocDesiredStatusRun, 2618 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 2619 FailTime: fail, 2620 RescheduleTrackers: []*RescheduleEvent{ 2621 { 2622 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 2623 }, 2624 }, 2625 ShouldReschedule: true, 2626 }, 2627 { 2628 Desc: "Reschedule with too old previous attempts", 2629 ClientStatus: AllocClientStatusFailed, 2630 DesiredStatus: AllocDesiredStatusRun, 2631 FailTime: fail, 2632 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 2633 RescheduleTrackers: []*RescheduleEvent{ 2634 { 2635 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 2636 }, 2637 }, 2638 ShouldReschedule: true, 2639 }, 2640 { 2641 Desc: "Reschedule with no leftover attempts", 2642 ClientStatus: AllocClientStatusFailed, 2643 DesiredStatus: AllocDesiredStatusRun, 2644 FailTime: fail, 2645 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 2646 RescheduleTrackers: []*RescheduleEvent{ 2647 { 2648 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 2649 }, 2650 { 2651 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 2652 }, 2653 }, 2654 ShouldReschedule: false, 2655 }, 2656 } 2657 2658 for _, state := range harness { 2659 alloc := Allocation{} 2660 alloc.DesiredStatus = state.DesiredStatus 2661 alloc.ClientStatus = state.ClientStatus 2662 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 2663 2664 t.Run(state.Desc, func(t *testing.T) { 2665 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 2666 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 2667 } 2668 }) 2669 2670 } 2671 } 2672 2673 func TestAllocation_LastEventTime(t *testing.T) { 2674 type testCase struct { 2675 desc string 2676 taskState map[string]*TaskState 2677 expectedLastEventTime time.Time 2678 } 2679 2680 t1 := time.Now().UTC() 2681 2682 testCases := []testCase{ 2683 { 2684 desc: "nil task state", 2685 expectedLastEventTime: t1, 2686 }, 2687 { 2688 desc: "empty task state", 2689 taskState: make(map[string]*TaskState), 2690 expectedLastEventTime: t1, 2691 }, 2692 { 2693 desc: "Finished At not set", 2694 taskState: map[string]*TaskState{"foo": {State: "start", 2695 StartedAt: t1.Add(-2 * time.Hour)}}, 2696 expectedLastEventTime: t1, 2697 }, 2698 { 2699 desc: "One finished ", 2700 taskState: map[string]*TaskState{"foo": {State: "start", 2701 StartedAt: t1.Add(-2 * time.Hour), 2702 FinishedAt: t1.Add(-1 * time.Hour)}}, 2703 expectedLastEventTime: t1.Add(-1 * time.Hour), 2704 }, 2705 { 2706 desc: "Multiple task groups", 2707 taskState: map[string]*TaskState{"foo": {State: "start", 2708 StartedAt: t1.Add(-2 * time.Hour), 2709 FinishedAt: t1.Add(-1 * time.Hour)}, 2710 "bar": {State: "start", 2711 StartedAt: t1.Add(-2 * time.Hour), 2712 FinishedAt: t1.Add(-40 * time.Minute)}}, 2713 expectedLastEventTime: t1.Add(-40 * time.Minute), 2714 }, 2715 { 2716 desc: "No finishedAt set, one task event, should use modify time", 2717 taskState: map[string]*TaskState{"foo": { 2718 State: "run", 2719 StartedAt: t1.Add(-2 * time.Hour), 2720 Events: []*TaskEvent{ 2721 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 2722 }}, 2723 }, 2724 expectedLastEventTime: t1, 2725 }, 2726 } 2727 for _, tc := range testCases { 2728 t.Run(tc.desc, func(t *testing.T) { 2729 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 2730 alloc.TaskStates = tc.taskState 2731 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 2732 }) 2733 } 2734 } 2735 2736 func TestAllocation_NextDelay(t *testing.T) { 2737 type testCase struct { 2738 desc string 2739 reschedulePolicy *ReschedulePolicy 2740 alloc *Allocation 2741 expectedRescheduleTime time.Time 2742 expectedRescheduleEligible bool 2743 } 2744 now := time.Now() 2745 testCases := []testCase{ 2746 { 2747 desc: "Allocation hasn't failed yet", 2748 reschedulePolicy: &ReschedulePolicy{ 2749 DelayFunction: "constant", 2750 Delay: 5 * time.Second, 2751 }, 2752 alloc: &Allocation{}, 2753 expectedRescheduleTime: time.Time{}, 2754 expectedRescheduleEligible: false, 2755 }, 2756 { 2757 desc: "Allocation lacks task state", 2758 reschedulePolicy: &ReschedulePolicy{ 2759 DelayFunction: "constant", 2760 Delay: 5 * time.Second, 2761 Unlimited: true, 2762 }, 2763 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 2764 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 2765 expectedRescheduleEligible: true, 2766 }, 2767 { 2768 desc: "linear delay, unlimited restarts, no reschedule tracker", 2769 reschedulePolicy: &ReschedulePolicy{ 2770 DelayFunction: "constant", 2771 Delay: 5 * time.Second, 2772 Unlimited: true, 2773 }, 2774 alloc: &Allocation{ 2775 ClientStatus: AllocClientStatusFailed, 2776 TaskStates: map[string]*TaskState{"foo": {State: "dead", 2777 StartedAt: now.Add(-1 * time.Hour), 2778 FinishedAt: now.Add(-2 * time.Second)}}, 2779 }, 2780 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2781 expectedRescheduleEligible: true, 2782 }, 2783 { 2784 desc: "linear delay with reschedule tracker", 2785 reschedulePolicy: &ReschedulePolicy{ 2786 DelayFunction: "constant", 2787 Delay: 5 * time.Second, 2788 Interval: 10 * time.Minute, 2789 Attempts: 2, 2790 }, 2791 alloc: &Allocation{ 2792 ClientStatus: AllocClientStatusFailed, 2793 TaskStates: map[string]*TaskState{"foo": {State: "start", 2794 StartedAt: now.Add(-1 * time.Hour), 2795 FinishedAt: now.Add(-2 * time.Second)}}, 2796 RescheduleTracker: &RescheduleTracker{ 2797 Events: []*RescheduleEvent{{ 2798 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 2799 Delay: 5 * time.Second, 2800 }}, 2801 }}, 2802 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2803 expectedRescheduleEligible: true, 2804 }, 2805 { 2806 desc: "linear delay with reschedule tracker, attempts exhausted", 2807 reschedulePolicy: &ReschedulePolicy{ 2808 DelayFunction: "constant", 2809 Delay: 5 * time.Second, 2810 Interval: 10 * time.Minute, 2811 Attempts: 2, 2812 }, 2813 alloc: &Allocation{ 2814 ClientStatus: AllocClientStatusFailed, 2815 TaskStates: map[string]*TaskState{"foo": {State: "start", 2816 StartedAt: now.Add(-1 * time.Hour), 2817 FinishedAt: now.Add(-2 * time.Second)}}, 2818 RescheduleTracker: &RescheduleTracker{ 2819 Events: []*RescheduleEvent{ 2820 { 2821 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 2822 Delay: 5 * time.Second, 2823 }, 2824 { 2825 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 2826 Delay: 5 * time.Second, 2827 }, 2828 }, 2829 }}, 2830 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2831 expectedRescheduleEligible: false, 2832 }, 2833 { 2834 desc: "exponential delay - no reschedule tracker", 2835 reschedulePolicy: &ReschedulePolicy{ 2836 DelayFunction: "exponential", 2837 Delay: 5 * time.Second, 2838 MaxDelay: 90 * time.Second, 2839 Unlimited: true, 2840 }, 2841 alloc: &Allocation{ 2842 ClientStatus: AllocClientStatusFailed, 2843 TaskStates: map[string]*TaskState{"foo": {State: "start", 2844 StartedAt: now.Add(-1 * time.Hour), 2845 FinishedAt: now.Add(-2 * time.Second)}}, 2846 }, 2847 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2848 expectedRescheduleEligible: true, 2849 }, 2850 { 2851 desc: "exponential delay with reschedule tracker", 2852 reschedulePolicy: &ReschedulePolicy{ 2853 DelayFunction: "exponential", 2854 Delay: 5 * time.Second, 2855 MaxDelay: 90 * time.Second, 2856 Unlimited: true, 2857 }, 2858 alloc: &Allocation{ 2859 ClientStatus: AllocClientStatusFailed, 2860 TaskStates: map[string]*TaskState{"foo": {State: "start", 2861 StartedAt: now.Add(-1 * time.Hour), 2862 FinishedAt: now.Add(-2 * time.Second)}}, 2863 RescheduleTracker: &RescheduleTracker{ 2864 Events: []*RescheduleEvent{ 2865 { 2866 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2867 Delay: 5 * time.Second, 2868 }, 2869 { 2870 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2871 Delay: 10 * time.Second, 2872 }, 2873 { 2874 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2875 Delay: 20 * time.Second, 2876 }, 2877 }, 2878 }}, 2879 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 2880 expectedRescheduleEligible: true, 2881 }, 2882 { 2883 desc: "exponential delay with delay ceiling reached", 2884 reschedulePolicy: &ReschedulePolicy{ 2885 DelayFunction: "exponential", 2886 Delay: 5 * time.Second, 2887 MaxDelay: 90 * time.Second, 2888 Unlimited: true, 2889 }, 2890 alloc: &Allocation{ 2891 ClientStatus: AllocClientStatusFailed, 2892 TaskStates: map[string]*TaskState{"foo": {State: "start", 2893 StartedAt: now.Add(-1 * time.Hour), 2894 FinishedAt: now.Add(-15 * time.Second)}}, 2895 RescheduleTracker: &RescheduleTracker{ 2896 Events: []*RescheduleEvent{ 2897 { 2898 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2899 Delay: 5 * time.Second, 2900 }, 2901 { 2902 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2903 Delay: 10 * time.Second, 2904 }, 2905 { 2906 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2907 Delay: 20 * time.Second, 2908 }, 2909 { 2910 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2911 Delay: 40 * time.Second, 2912 }, 2913 { 2914 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 2915 Delay: 80 * time.Second, 2916 }, 2917 }, 2918 }}, 2919 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 2920 expectedRescheduleEligible: true, 2921 }, 2922 { 2923 // Test case where most recent reschedule ran longer than delay ceiling 2924 desc: "exponential delay, delay ceiling reset condition met", 2925 reschedulePolicy: &ReschedulePolicy{ 2926 DelayFunction: "exponential", 2927 Delay: 5 * time.Second, 2928 MaxDelay: 90 * time.Second, 2929 Unlimited: true, 2930 }, 2931 alloc: &Allocation{ 2932 ClientStatus: AllocClientStatusFailed, 2933 TaskStates: map[string]*TaskState{"foo": {State: "start", 2934 StartedAt: now.Add(-1 * time.Hour), 2935 FinishedAt: now.Add(-15 * time.Minute)}}, 2936 RescheduleTracker: &RescheduleTracker{ 2937 Events: []*RescheduleEvent{ 2938 { 2939 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2940 Delay: 5 * time.Second, 2941 }, 2942 { 2943 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2944 Delay: 10 * time.Second, 2945 }, 2946 { 2947 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2948 Delay: 20 * time.Second, 2949 }, 2950 { 2951 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2952 Delay: 40 * time.Second, 2953 }, 2954 { 2955 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2956 Delay: 80 * time.Second, 2957 }, 2958 { 2959 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2960 Delay: 90 * time.Second, 2961 }, 2962 { 2963 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2964 Delay: 90 * time.Second, 2965 }, 2966 }, 2967 }}, 2968 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 2969 expectedRescheduleEligible: true, 2970 }, 2971 { 2972 desc: "fibonacci delay - no reschedule tracker", 2973 reschedulePolicy: &ReschedulePolicy{ 2974 DelayFunction: "fibonacci", 2975 Delay: 5 * time.Second, 2976 MaxDelay: 90 * time.Second, 2977 Unlimited: true, 2978 }, 2979 alloc: &Allocation{ 2980 ClientStatus: AllocClientStatusFailed, 2981 TaskStates: map[string]*TaskState{"foo": {State: "start", 2982 StartedAt: now.Add(-1 * time.Hour), 2983 FinishedAt: now.Add(-2 * time.Second)}}}, 2984 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2985 expectedRescheduleEligible: true, 2986 }, 2987 { 2988 desc: "fibonacci delay with reschedule tracker", 2989 reschedulePolicy: &ReschedulePolicy{ 2990 DelayFunction: "fibonacci", 2991 Delay: 5 * time.Second, 2992 MaxDelay: 90 * time.Second, 2993 Unlimited: true, 2994 }, 2995 alloc: &Allocation{ 2996 ClientStatus: AllocClientStatusFailed, 2997 TaskStates: map[string]*TaskState{"foo": {State: "start", 2998 StartedAt: now.Add(-1 * time.Hour), 2999 FinishedAt: now.Add(-2 * time.Second)}}, 3000 RescheduleTracker: &RescheduleTracker{ 3001 Events: []*RescheduleEvent{ 3002 { 3003 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3004 Delay: 5 * time.Second, 3005 }, 3006 { 3007 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 3008 Delay: 5 * time.Second, 3009 }, 3010 }, 3011 }}, 3012 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 3013 expectedRescheduleEligible: true, 3014 }, 3015 { 3016 desc: "fibonacci delay with more events", 3017 reschedulePolicy: &ReschedulePolicy{ 3018 DelayFunction: "fibonacci", 3019 Delay: 5 * time.Second, 3020 MaxDelay: 90 * time.Second, 3021 Unlimited: true, 3022 }, 3023 alloc: &Allocation{ 3024 ClientStatus: AllocClientStatusFailed, 3025 TaskStates: map[string]*TaskState{"foo": {State: "start", 3026 StartedAt: now.Add(-1 * time.Hour), 3027 FinishedAt: now.Add(-2 * time.Second)}}, 3028 RescheduleTracker: &RescheduleTracker{ 3029 Events: []*RescheduleEvent{ 3030 { 3031 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3032 Delay: 5 * time.Second, 3033 }, 3034 { 3035 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3036 Delay: 5 * time.Second, 3037 }, 3038 { 3039 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3040 Delay: 10 * time.Second, 3041 }, 3042 { 3043 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3044 Delay: 15 * time.Second, 3045 }, 3046 { 3047 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3048 Delay: 25 * time.Second, 3049 }, 3050 }, 3051 }}, 3052 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3053 expectedRescheduleEligible: true, 3054 }, 3055 { 3056 desc: "fibonacci delay with delay ceiling reached", 3057 reschedulePolicy: &ReschedulePolicy{ 3058 DelayFunction: "fibonacci", 3059 Delay: 5 * time.Second, 3060 MaxDelay: 50 * time.Second, 3061 Unlimited: true, 3062 }, 3063 alloc: &Allocation{ 3064 ClientStatus: AllocClientStatusFailed, 3065 TaskStates: map[string]*TaskState{"foo": {State: "start", 3066 StartedAt: now.Add(-1 * time.Hour), 3067 FinishedAt: now.Add(-15 * time.Second)}}, 3068 RescheduleTracker: &RescheduleTracker{ 3069 Events: []*RescheduleEvent{ 3070 { 3071 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3072 Delay: 5 * time.Second, 3073 }, 3074 { 3075 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3076 Delay: 5 * time.Second, 3077 }, 3078 { 3079 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3080 Delay: 10 * time.Second, 3081 }, 3082 { 3083 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3084 Delay: 15 * time.Second, 3085 }, 3086 { 3087 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3088 Delay: 25 * time.Second, 3089 }, 3090 { 3091 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3092 Delay: 40 * time.Second, 3093 }, 3094 }, 3095 }}, 3096 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 3097 expectedRescheduleEligible: true, 3098 }, 3099 { 3100 desc: "fibonacci delay with delay reset condition met", 3101 reschedulePolicy: &ReschedulePolicy{ 3102 DelayFunction: "fibonacci", 3103 Delay: 5 * time.Second, 3104 MaxDelay: 50 * time.Second, 3105 Unlimited: true, 3106 }, 3107 alloc: &Allocation{ 3108 ClientStatus: AllocClientStatusFailed, 3109 TaskStates: map[string]*TaskState{"foo": {State: "start", 3110 StartedAt: now.Add(-1 * time.Hour), 3111 FinishedAt: now.Add(-5 * time.Minute)}}, 3112 RescheduleTracker: &RescheduleTracker{ 3113 Events: []*RescheduleEvent{ 3114 { 3115 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3116 Delay: 5 * time.Second, 3117 }, 3118 { 3119 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3120 Delay: 5 * time.Second, 3121 }, 3122 { 3123 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3124 Delay: 10 * time.Second, 3125 }, 3126 { 3127 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3128 Delay: 15 * time.Second, 3129 }, 3130 { 3131 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3132 Delay: 25 * time.Second, 3133 }, 3134 { 3135 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3136 Delay: 40 * time.Second, 3137 }, 3138 }, 3139 }}, 3140 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 3141 expectedRescheduleEligible: true, 3142 }, 3143 { 3144 desc: "fibonacci delay with the most recent event that reset delay value", 3145 reschedulePolicy: &ReschedulePolicy{ 3146 DelayFunction: "fibonacci", 3147 Delay: 5 * time.Second, 3148 MaxDelay: 50 * time.Second, 3149 Unlimited: true, 3150 }, 3151 alloc: &Allocation{ 3152 ClientStatus: AllocClientStatusFailed, 3153 TaskStates: map[string]*TaskState{"foo": {State: "start", 3154 StartedAt: now.Add(-1 * time.Hour), 3155 FinishedAt: now.Add(-5 * time.Second)}}, 3156 RescheduleTracker: &RescheduleTracker{ 3157 Events: []*RescheduleEvent{ 3158 { 3159 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3160 Delay: 5 * time.Second, 3161 }, 3162 { 3163 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3164 Delay: 5 * time.Second, 3165 }, 3166 { 3167 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3168 Delay: 10 * time.Second, 3169 }, 3170 { 3171 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3172 Delay: 15 * time.Second, 3173 }, 3174 { 3175 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3176 Delay: 25 * time.Second, 3177 }, 3178 { 3179 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3180 Delay: 40 * time.Second, 3181 }, 3182 { 3183 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3184 Delay: 50 * time.Second, 3185 }, 3186 { 3187 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 3188 Delay: 5 * time.Second, 3189 }, 3190 }, 3191 }}, 3192 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 3193 expectedRescheduleEligible: true, 3194 }, 3195 } 3196 for _, tc := range testCases { 3197 t.Run(tc.desc, func(t *testing.T) { 3198 require := require.New(t) 3199 j := testJob() 3200 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 3201 tc.alloc.Job = j 3202 tc.alloc.TaskGroup = j.TaskGroups[0].Name 3203 reschedTime, allowed := tc.alloc.NextRescheduleTime() 3204 require.Equal(tc.expectedRescheduleEligible, allowed) 3205 require.Equal(tc.expectedRescheduleTime, reschedTime) 3206 }) 3207 } 3208 3209 } 3210 3211 func TestRescheduleTracker_Copy(t *testing.T) { 3212 type testCase struct { 3213 original *RescheduleTracker 3214 expected *RescheduleTracker 3215 } 3216 3217 cases := []testCase{ 3218 {nil, nil}, 3219 {&RescheduleTracker{Events: []*RescheduleEvent{ 3220 {RescheduleTime: 2, 3221 PrevAllocID: "12", 3222 PrevNodeID: "12", 3223 Delay: 30 * time.Second}, 3224 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 3225 {RescheduleTime: 2, 3226 PrevAllocID: "12", 3227 PrevNodeID: "12", 3228 Delay: 30 * time.Second}, 3229 }}}, 3230 } 3231 3232 for _, tc := range cases { 3233 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 3234 t.Fatalf("expected %v but got %v", *tc.expected, *got) 3235 } 3236 } 3237 } 3238 3239 func TestVault_Validate(t *testing.T) { 3240 v := &Vault{ 3241 Env: true, 3242 ChangeMode: VaultChangeModeNoop, 3243 } 3244 3245 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 3246 t.Fatalf("Expected policy list empty error") 3247 } 3248 3249 v.Policies = []string{"foo", "root"} 3250 v.ChangeMode = VaultChangeModeSignal 3251 3252 err := v.Validate() 3253 if err == nil { 3254 t.Fatalf("Expected validation errors") 3255 } 3256 3257 if !strings.Contains(err.Error(), "Signal must") { 3258 t.Fatalf("Expected signal empty error") 3259 } 3260 if !strings.Contains(err.Error(), "root") { 3261 t.Fatalf("Expected root error") 3262 } 3263 } 3264 3265 func TestParameterizedJobConfig_Validate(t *testing.T) { 3266 d := &ParameterizedJobConfig{ 3267 Payload: "foo", 3268 } 3269 3270 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 3271 t.Fatalf("Expected unknown payload requirement: %v", err) 3272 } 3273 3274 d.Payload = DispatchPayloadOptional 3275 d.MetaOptional = []string{"foo", "bar"} 3276 d.MetaRequired = []string{"bar", "baz"} 3277 3278 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 3279 t.Fatalf("Expected meta not being disjoint error: %v", err) 3280 } 3281 } 3282 3283 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 3284 job := testJob() 3285 job.ParameterizedJob = &ParameterizedJobConfig{ 3286 Payload: DispatchPayloadOptional, 3287 } 3288 job.Type = JobTypeSystem 3289 3290 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 3291 t.Fatalf("Expected bad scheduler tpye: %v", err) 3292 } 3293 } 3294 3295 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 3296 d := &ParameterizedJobConfig{} 3297 d.Canonicalize() 3298 if d.Payload != DispatchPayloadOptional { 3299 t.Fatalf("Canonicalize failed") 3300 } 3301 } 3302 3303 func TestDispatchPayloadConfig_Validate(t *testing.T) { 3304 d := &DispatchPayloadConfig{ 3305 File: "foo", 3306 } 3307 3308 // task/local/haha 3309 if err := d.Validate(); err != nil { 3310 t.Fatalf("bad: %v", err) 3311 } 3312 3313 // task/haha 3314 d.File = "../haha" 3315 if err := d.Validate(); err != nil { 3316 t.Fatalf("bad: %v", err) 3317 } 3318 3319 // ../haha 3320 d.File = "../../../haha" 3321 if err := d.Validate(); err == nil { 3322 t.Fatalf("bad: %v", err) 3323 } 3324 } 3325 3326 func TestIsRecoverable(t *testing.T) { 3327 if IsRecoverable(nil) { 3328 t.Errorf("nil should not be recoverable") 3329 } 3330 if IsRecoverable(NewRecoverableError(nil, true)) { 3331 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 3332 } 3333 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 3334 t.Errorf("Custom errors should not be recoverable") 3335 } 3336 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 3337 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 3338 } 3339 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 3340 t.Errorf("Explicitly recoverable errors *should* be recoverable") 3341 } 3342 } 3343 3344 func TestACLTokenValidate(t *testing.T) { 3345 tk := &ACLToken{} 3346 3347 // Missing a type 3348 err := tk.Validate() 3349 assert.NotNil(t, err) 3350 if !strings.Contains(err.Error(), "client or management") { 3351 t.Fatalf("bad: %v", err) 3352 } 3353 3354 // Missing policies 3355 tk.Type = ACLClientToken 3356 err = tk.Validate() 3357 assert.NotNil(t, err) 3358 if !strings.Contains(err.Error(), "missing policies") { 3359 t.Fatalf("bad: %v", err) 3360 } 3361 3362 // Invalid policies 3363 tk.Type = ACLManagementToken 3364 tk.Policies = []string{"foo"} 3365 err = tk.Validate() 3366 assert.NotNil(t, err) 3367 if !strings.Contains(err.Error(), "associated with policies") { 3368 t.Fatalf("bad: %v", err) 3369 } 3370 3371 // Name too long policies 3372 tk.Name = "" 3373 for i := 0; i < 8; i++ { 3374 tk.Name += uuid.Generate() 3375 } 3376 tk.Policies = nil 3377 err = tk.Validate() 3378 assert.NotNil(t, err) 3379 if !strings.Contains(err.Error(), "too long") { 3380 t.Fatalf("bad: %v", err) 3381 } 3382 3383 // Make it valid 3384 tk.Name = "foo" 3385 err = tk.Validate() 3386 assert.Nil(t, err) 3387 } 3388 3389 func TestACLTokenPolicySubset(t *testing.T) { 3390 tk := &ACLToken{ 3391 Type: ACLClientToken, 3392 Policies: []string{"foo", "bar", "baz"}, 3393 } 3394 3395 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3396 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3397 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3398 assert.Equal(t, true, tk.PolicySubset([]string{})) 3399 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 3400 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 3401 3402 tk = &ACLToken{ 3403 Type: ACLManagementToken, 3404 } 3405 3406 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3407 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3408 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3409 assert.Equal(t, true, tk.PolicySubset([]string{})) 3410 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 3411 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 3412 } 3413 3414 func TestACLTokenSetHash(t *testing.T) { 3415 tk := &ACLToken{ 3416 Name: "foo", 3417 Type: ACLClientToken, 3418 Policies: []string{"foo", "bar"}, 3419 Global: false, 3420 } 3421 out1 := tk.SetHash() 3422 assert.NotNil(t, out1) 3423 assert.NotNil(t, tk.Hash) 3424 assert.Equal(t, out1, tk.Hash) 3425 3426 tk.Policies = []string{"foo"} 3427 out2 := tk.SetHash() 3428 assert.NotNil(t, out2) 3429 assert.NotNil(t, tk.Hash) 3430 assert.Equal(t, out2, tk.Hash) 3431 assert.NotEqual(t, out1, out2) 3432 } 3433 3434 func TestACLPolicySetHash(t *testing.T) { 3435 ap := &ACLPolicy{ 3436 Name: "foo", 3437 Description: "great policy", 3438 Rules: "node { policy = \"read\" }", 3439 } 3440 out1 := ap.SetHash() 3441 assert.NotNil(t, out1) 3442 assert.NotNil(t, ap.Hash) 3443 assert.Equal(t, out1, ap.Hash) 3444 3445 ap.Rules = "node { policy = \"write\" }" 3446 out2 := ap.SetHash() 3447 assert.NotNil(t, out2) 3448 assert.NotNil(t, ap.Hash) 3449 assert.Equal(t, out2, ap.Hash) 3450 assert.NotEqual(t, out1, out2) 3451 } 3452 3453 func TestTaskEventPopulate(t *testing.T) { 3454 prepopulatedEvent := NewTaskEvent(TaskSetup) 3455 prepopulatedEvent.DisplayMessage = "Hola" 3456 testcases := []struct { 3457 event *TaskEvent 3458 expectedMsg string 3459 }{ 3460 {nil, ""}, 3461 {prepopulatedEvent, "Hola"}, 3462 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 3463 {NewTaskEvent(TaskStarted), "Task started by client"}, 3464 {NewTaskEvent(TaskReceived), "Task received by client"}, 3465 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 3466 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 3467 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 3468 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 3469 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 3470 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 3471 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 3472 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 3473 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 3474 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 3475 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 3476 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 3477 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 3478 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 3479 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 3480 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 3481 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 3482 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 3483 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 3484 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 3485 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 3486 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 3487 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 3488 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 3489 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 3490 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 3491 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 3492 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 3493 {NewTaskEvent("Unknown Type, No message"), ""}, 3494 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 3495 } 3496 3497 for _, tc := range testcases { 3498 tc.event.PopulateEventDisplayMessage() 3499 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 3500 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 3501 } 3502 } 3503 } 3504 3505 func TestNetworkResourcesEquals(t *testing.T) { 3506 require := require.New(t) 3507 var networkResourcesTest = []struct { 3508 input []*NetworkResource 3509 expected bool 3510 errorMsg string 3511 }{ 3512 { 3513 []*NetworkResource{ 3514 { 3515 IP: "10.0.0.1", 3516 MBits: 50, 3517 ReservedPorts: []Port{{"web", 80}}, 3518 }, 3519 { 3520 IP: "10.0.0.1", 3521 MBits: 50, 3522 ReservedPorts: []Port{{"web", 80}}, 3523 }, 3524 }, 3525 true, 3526 "Equal network resources should return true", 3527 }, 3528 { 3529 []*NetworkResource{ 3530 { 3531 IP: "10.0.0.0", 3532 MBits: 50, 3533 ReservedPorts: []Port{{"web", 80}}, 3534 }, 3535 { 3536 IP: "10.0.0.1", 3537 MBits: 50, 3538 ReservedPorts: []Port{{"web", 80}}, 3539 }, 3540 }, 3541 false, 3542 "Different IP addresses should return false", 3543 }, 3544 { 3545 []*NetworkResource{ 3546 { 3547 IP: "10.0.0.1", 3548 MBits: 40, 3549 ReservedPorts: []Port{{"web", 80}}, 3550 }, 3551 { 3552 IP: "10.0.0.1", 3553 MBits: 50, 3554 ReservedPorts: []Port{{"web", 80}}, 3555 }, 3556 }, 3557 false, 3558 "Different MBits values should return false", 3559 }, 3560 { 3561 []*NetworkResource{ 3562 { 3563 IP: "10.0.0.1", 3564 MBits: 50, 3565 ReservedPorts: []Port{{"web", 80}}, 3566 }, 3567 { 3568 IP: "10.0.0.1", 3569 MBits: 50, 3570 ReservedPorts: []Port{{"web", 80}, {"web", 80}}, 3571 }, 3572 }, 3573 false, 3574 "Different ReservedPorts lengths should return false", 3575 }, 3576 { 3577 []*NetworkResource{ 3578 { 3579 IP: "10.0.0.1", 3580 MBits: 50, 3581 ReservedPorts: []Port{{"web", 80}}, 3582 }, 3583 { 3584 IP: "10.0.0.1", 3585 MBits: 50, 3586 ReservedPorts: []Port{}, 3587 }, 3588 }, 3589 false, 3590 "Empty and non empty ReservedPorts values should return false", 3591 }, 3592 { 3593 []*NetworkResource{ 3594 { 3595 IP: "10.0.0.1", 3596 MBits: 50, 3597 ReservedPorts: []Port{{"web", 80}}, 3598 }, 3599 { 3600 IP: "10.0.0.1", 3601 MBits: 50, 3602 ReservedPorts: []Port{{"notweb", 80}}, 3603 }, 3604 }, 3605 false, 3606 "Different valued ReservedPorts values should return false", 3607 }, 3608 { 3609 []*NetworkResource{ 3610 { 3611 IP: "10.0.0.1", 3612 MBits: 50, 3613 DynamicPorts: []Port{{"web", 80}}, 3614 }, 3615 { 3616 IP: "10.0.0.1", 3617 MBits: 50, 3618 DynamicPorts: []Port{{"web", 80}, {"web", 80}}, 3619 }, 3620 }, 3621 false, 3622 "Different DynamicPorts lengths should return false", 3623 }, 3624 { 3625 []*NetworkResource{ 3626 { 3627 IP: "10.0.0.1", 3628 MBits: 50, 3629 DynamicPorts: []Port{{"web", 80}}, 3630 }, 3631 { 3632 IP: "10.0.0.1", 3633 MBits: 50, 3634 DynamicPorts: []Port{}, 3635 }, 3636 }, 3637 false, 3638 "Empty and non empty DynamicPorts values should return false", 3639 }, 3640 { 3641 []*NetworkResource{ 3642 { 3643 IP: "10.0.0.1", 3644 MBits: 50, 3645 DynamicPorts: []Port{{"web", 80}}, 3646 }, 3647 { 3648 IP: "10.0.0.1", 3649 MBits: 50, 3650 DynamicPorts: []Port{{"notweb", 80}}, 3651 }, 3652 }, 3653 false, 3654 "Different valued DynamicPorts values should return false", 3655 }, 3656 } 3657 for _, testCase := range networkResourcesTest { 3658 first := testCase.input[0] 3659 second := testCase.input[1] 3660 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 3661 } 3662 } 3663 3664 func TestNode_Canonicalize(t *testing.T) { 3665 t.Parallel() 3666 require := require.New(t) 3667 3668 // Make sure the eligiblity is set properly 3669 node := &Node{} 3670 node.Canonicalize() 3671 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 3672 3673 node = &Node{ 3674 Drain: true, 3675 } 3676 node.Canonicalize() 3677 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 3678 } 3679 3680 func TestNode_Copy(t *testing.T) { 3681 t.Parallel() 3682 require := require.New(t) 3683 3684 node := &Node{ 3685 ID: uuid.Generate(), 3686 SecretID: uuid.Generate(), 3687 Datacenter: "dc1", 3688 Name: "foobar", 3689 Attributes: map[string]string{ 3690 "kernel.name": "linux", 3691 "arch": "x86", 3692 "nomad.version": "0.5.0", 3693 "driver.exec": "1", 3694 "driver.mock_driver": "1", 3695 }, 3696 Resources: &Resources{ 3697 CPU: 4000, 3698 MemoryMB: 8192, 3699 DiskMB: 100 * 1024, 3700 IOPS: 150, 3701 Networks: []*NetworkResource{ 3702 { 3703 Device: "eth0", 3704 CIDR: "192.168.0.100/32", 3705 MBits: 1000, 3706 }, 3707 }, 3708 }, 3709 Reserved: &Resources{ 3710 CPU: 100, 3711 MemoryMB: 256, 3712 DiskMB: 4 * 1024, 3713 Networks: []*NetworkResource{ 3714 { 3715 Device: "eth0", 3716 IP: "192.168.0.100", 3717 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 3718 MBits: 1, 3719 }, 3720 }, 3721 }, 3722 Links: map[string]string{ 3723 "consul": "foobar.dc1", 3724 }, 3725 Meta: map[string]string{ 3726 "pci-dss": "true", 3727 "database": "mysql", 3728 "version": "5.6", 3729 }, 3730 NodeClass: "linux-medium-pci", 3731 Status: NodeStatusReady, 3732 SchedulingEligibility: NodeSchedulingEligible, 3733 Drivers: map[string]*DriverInfo{ 3734 "mock_driver": { 3735 Attributes: map[string]string{"running": "1"}, 3736 Detected: true, 3737 Healthy: true, 3738 HealthDescription: "Currently active", 3739 UpdateTime: time.Now(), 3740 }, 3741 }, 3742 } 3743 node.ComputeClass() 3744 3745 node2 := node.Copy() 3746 3747 require.Equal(node.Attributes, node2.Attributes) 3748 require.Equal(node.Resources, node2.Resources) 3749 require.Equal(node.Reserved, node2.Reserved) 3750 require.Equal(node.Links, node2.Links) 3751 require.Equal(node.Meta, node2.Meta) 3752 require.Equal(node.Events, node2.Events) 3753 require.Equal(node.DrainStrategy, node2.DrainStrategy) 3754 require.Equal(node.Drivers, node2.Drivers) 3755 }