github.com/quite/nomad@v0.8.6/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 ) 17 18 func TestJob_Validate(t *testing.T) { 19 j := &Job{} 20 err := j.Validate() 21 mErr := err.(*multierror.Error) 22 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 23 t.Fatalf("err: %s", err) 24 } 25 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 26 t.Fatalf("err: %s", err) 27 } 28 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 29 t.Fatalf("err: %s", err) 30 } 31 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 32 t.Fatalf("err: %s", err) 33 } 34 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 35 t.Fatalf("err: %s", err) 36 } 37 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 38 t.Fatalf("err: %s", err) 39 } 40 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 41 t.Fatalf("err: %s", err) 42 } 43 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 44 t.Fatalf("err: %s", err) 45 } 46 47 j = &Job{ 48 Type: "invalid-job-type", 49 } 50 err = j.Validate() 51 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 52 t.Errorf("expected %s but found: %v", expected, err) 53 } 54 55 j = &Job{ 56 Type: JobTypeService, 57 Periodic: &PeriodicConfig{ 58 Enabled: true, 59 }, 60 } 61 err = j.Validate() 62 mErr = err.(*multierror.Error) 63 if !strings.Contains(mErr.Error(), "Periodic") { 64 t.Fatalf("err: %s", err) 65 } 66 67 j = &Job{ 68 Region: "global", 69 ID: uuid.Generate(), 70 Namespace: "test", 71 Name: "my-job", 72 Type: JobTypeService, 73 Priority: 50, 74 Datacenters: []string{"dc1"}, 75 TaskGroups: []*TaskGroup{ 76 { 77 Name: "web", 78 RestartPolicy: &RestartPolicy{ 79 Interval: 5 * time.Minute, 80 Delay: 10 * time.Second, 81 Attempts: 10, 82 }, 83 }, 84 { 85 Name: "web", 86 RestartPolicy: &RestartPolicy{ 87 Interval: 5 * time.Minute, 88 Delay: 10 * time.Second, 89 Attempts: 10, 90 }, 91 }, 92 { 93 RestartPolicy: &RestartPolicy{ 94 Interval: 5 * time.Minute, 95 Delay: 10 * time.Second, 96 Attempts: 10, 97 }, 98 }, 99 }, 100 } 101 err = j.Validate() 102 mErr = err.(*multierror.Error) 103 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 104 t.Fatalf("err: %s", err) 105 } 106 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 107 t.Fatalf("err: %s", err) 108 } 109 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 110 t.Fatalf("err: %s", err) 111 } 112 } 113 114 func TestJob_Warnings(t *testing.T) { 115 cases := []struct { 116 Name string 117 Job *Job 118 Expected []string 119 }{ 120 { 121 Name: "Higher counts for update stanza", 122 Expected: []string{"max parallel count is greater"}, 123 Job: &Job{ 124 Type: JobTypeService, 125 TaskGroups: []*TaskGroup{ 126 { 127 Name: "foo", 128 Count: 2, 129 Update: &UpdateStrategy{ 130 MaxParallel: 10, 131 }, 132 }, 133 }, 134 }, 135 }, 136 } 137 138 for _, c := range cases { 139 t.Run(c.Name, func(t *testing.T) { 140 warnings := c.Job.Warnings() 141 if warnings == nil { 142 if len(c.Expected) == 0 { 143 return 144 } else { 145 t.Fatal("Got no warnings when they were expected") 146 } 147 } 148 149 a := warnings.Error() 150 for _, e := range c.Expected { 151 if !strings.Contains(a, e) { 152 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 153 } 154 } 155 }) 156 } 157 } 158 159 func TestJob_SpecChanged(t *testing.T) { 160 // Get a base test job 161 base := testJob() 162 163 // Only modify the indexes/mutable state of the job 164 mutatedBase := base.Copy() 165 mutatedBase.Status = "foo" 166 mutatedBase.ModifyIndex = base.ModifyIndex + 100 167 168 // changed contains a spec change that should be detected 169 change := base.Copy() 170 change.Priority = 99 171 172 cases := []struct { 173 Name string 174 Original *Job 175 New *Job 176 Changed bool 177 }{ 178 { 179 Name: "Same job except mutable indexes", 180 Changed: false, 181 Original: base, 182 New: mutatedBase, 183 }, 184 { 185 Name: "Different", 186 Changed: true, 187 Original: base, 188 New: change, 189 }, 190 } 191 192 for _, c := range cases { 193 t.Run(c.Name, func(t *testing.T) { 194 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 195 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 196 } 197 }) 198 } 199 } 200 201 func testJob() *Job { 202 return &Job{ 203 Region: "global", 204 ID: uuid.Generate(), 205 Namespace: "test", 206 Name: "my-job", 207 Type: JobTypeService, 208 Priority: 50, 209 AllAtOnce: false, 210 Datacenters: []string{"dc1"}, 211 Constraints: []*Constraint{ 212 { 213 LTarget: "$attr.kernel.name", 214 RTarget: "linux", 215 Operand: "=", 216 }, 217 }, 218 Periodic: &PeriodicConfig{ 219 Enabled: false, 220 }, 221 TaskGroups: []*TaskGroup{ 222 { 223 Name: "web", 224 Count: 10, 225 EphemeralDisk: DefaultEphemeralDisk(), 226 RestartPolicy: &RestartPolicy{ 227 Mode: RestartPolicyModeFail, 228 Attempts: 3, 229 Interval: 10 * time.Minute, 230 Delay: 1 * time.Minute, 231 }, 232 ReschedulePolicy: &ReschedulePolicy{ 233 Interval: 5 * time.Minute, 234 Attempts: 10, 235 Delay: 5 * time.Second, 236 DelayFunction: "constant", 237 }, 238 Tasks: []*Task{ 239 { 240 Name: "web", 241 Driver: "exec", 242 Config: map[string]interface{}{ 243 "command": "/bin/date", 244 }, 245 Env: map[string]string{ 246 "FOO": "bar", 247 }, 248 Artifacts: []*TaskArtifact{ 249 { 250 GetterSource: "http://foo.com", 251 }, 252 }, 253 Services: []*Service{ 254 { 255 Name: "${TASK}-frontend", 256 PortLabel: "http", 257 }, 258 }, 259 Resources: &Resources{ 260 CPU: 500, 261 MemoryMB: 256, 262 Networks: []*NetworkResource{ 263 { 264 MBits: 50, 265 DynamicPorts: []Port{{Label: "http"}}, 266 }, 267 }, 268 }, 269 LogConfig: &LogConfig{ 270 MaxFiles: 10, 271 MaxFileSizeMB: 1, 272 }, 273 }, 274 }, 275 Meta: map[string]string{ 276 "elb_check_type": "http", 277 "elb_check_interval": "30s", 278 "elb_check_min": "3", 279 }, 280 }, 281 }, 282 Meta: map[string]string{ 283 "owner": "armon", 284 }, 285 } 286 } 287 288 func TestJob_Copy(t *testing.T) { 289 j := testJob() 290 c := j.Copy() 291 if !reflect.DeepEqual(j, c) { 292 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 293 } 294 } 295 296 func TestJob_IsPeriodic(t *testing.T) { 297 j := &Job{ 298 Type: JobTypeService, 299 Periodic: &PeriodicConfig{ 300 Enabled: true, 301 }, 302 } 303 if !j.IsPeriodic() { 304 t.Fatalf("IsPeriodic() returned false on periodic job") 305 } 306 307 j = &Job{ 308 Type: JobTypeService, 309 } 310 if j.IsPeriodic() { 311 t.Fatalf("IsPeriodic() returned true on non-periodic job") 312 } 313 } 314 315 func TestJob_IsPeriodicActive(t *testing.T) { 316 cases := []struct { 317 job *Job 318 active bool 319 }{ 320 { 321 job: &Job{ 322 Type: JobTypeService, 323 Periodic: &PeriodicConfig{ 324 Enabled: true, 325 }, 326 }, 327 active: true, 328 }, 329 { 330 job: &Job{ 331 Type: JobTypeService, 332 Periodic: &PeriodicConfig{ 333 Enabled: false, 334 }, 335 }, 336 active: false, 337 }, 338 { 339 job: &Job{ 340 Type: JobTypeService, 341 Periodic: &PeriodicConfig{ 342 Enabled: true, 343 }, 344 Stop: true, 345 }, 346 active: false, 347 }, 348 { 349 job: &Job{ 350 Type: JobTypeService, 351 Periodic: &PeriodicConfig{ 352 Enabled: false, 353 }, 354 ParameterizedJob: &ParameterizedJobConfig{}, 355 }, 356 active: false, 357 }, 358 } 359 360 for i, c := range cases { 361 if act := c.job.IsPeriodicActive(); act != c.active { 362 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 363 } 364 } 365 } 366 367 func TestJob_SystemJob_Validate(t *testing.T) { 368 j := testJob() 369 j.Type = JobTypeSystem 370 j.TaskGroups[0].ReschedulePolicy = nil 371 j.Canonicalize() 372 373 err := j.Validate() 374 if err == nil || !strings.Contains(err.Error(), "exceed") { 375 t.Fatalf("expect error due to count") 376 } 377 378 j.TaskGroups[0].Count = 0 379 if err := j.Validate(); err != nil { 380 t.Fatalf("unexpected err: %v", err) 381 } 382 383 j.TaskGroups[0].Count = 1 384 if err := j.Validate(); err != nil { 385 t.Fatalf("unexpected err: %v", err) 386 } 387 } 388 389 func TestJob_VaultPolicies(t *testing.T) { 390 j0 := &Job{} 391 e0 := make(map[string]map[string]*Vault, 0) 392 393 vj1 := &Vault{ 394 Policies: []string{ 395 "p1", 396 "p2", 397 }, 398 } 399 vj2 := &Vault{ 400 Policies: []string{ 401 "p3", 402 "p4", 403 }, 404 } 405 vj3 := &Vault{ 406 Policies: []string{ 407 "p5", 408 }, 409 } 410 j1 := &Job{ 411 TaskGroups: []*TaskGroup{ 412 { 413 Name: "foo", 414 Tasks: []*Task{ 415 { 416 Name: "t1", 417 }, 418 { 419 Name: "t2", 420 Vault: vj1, 421 }, 422 }, 423 }, 424 { 425 Name: "bar", 426 Tasks: []*Task{ 427 { 428 Name: "t3", 429 Vault: vj2, 430 }, 431 { 432 Name: "t4", 433 Vault: vj3, 434 }, 435 }, 436 }, 437 }, 438 } 439 440 e1 := map[string]map[string]*Vault{ 441 "foo": { 442 "t2": vj1, 443 }, 444 "bar": { 445 "t3": vj2, 446 "t4": vj3, 447 }, 448 } 449 450 cases := []struct { 451 Job *Job 452 Expected map[string]map[string]*Vault 453 }{ 454 { 455 Job: j0, 456 Expected: e0, 457 }, 458 { 459 Job: j1, 460 Expected: e1, 461 }, 462 } 463 464 for i, c := range cases { 465 got := c.Job.VaultPolicies() 466 if !reflect.DeepEqual(got, c.Expected) { 467 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 468 } 469 } 470 } 471 472 func TestJob_RequiredSignals(t *testing.T) { 473 j0 := &Job{} 474 e0 := make(map[string]map[string][]string, 0) 475 476 vj1 := &Vault{ 477 Policies: []string{"p1"}, 478 ChangeMode: VaultChangeModeNoop, 479 } 480 vj2 := &Vault{ 481 Policies: []string{"p1"}, 482 ChangeMode: VaultChangeModeSignal, 483 ChangeSignal: "SIGUSR1", 484 } 485 tj1 := &Template{ 486 SourcePath: "foo", 487 DestPath: "bar", 488 ChangeMode: TemplateChangeModeNoop, 489 } 490 tj2 := &Template{ 491 SourcePath: "foo", 492 DestPath: "bar", 493 ChangeMode: TemplateChangeModeSignal, 494 ChangeSignal: "SIGUSR2", 495 } 496 j1 := &Job{ 497 TaskGroups: []*TaskGroup{ 498 { 499 Name: "foo", 500 Tasks: []*Task{ 501 { 502 Name: "t1", 503 }, 504 { 505 Name: "t2", 506 Vault: vj2, 507 Templates: []*Template{tj2}, 508 }, 509 }, 510 }, 511 { 512 Name: "bar", 513 Tasks: []*Task{ 514 { 515 Name: "t3", 516 Vault: vj1, 517 Templates: []*Template{tj1}, 518 }, 519 { 520 Name: "t4", 521 Vault: vj2, 522 }, 523 }, 524 }, 525 }, 526 } 527 528 e1 := map[string]map[string][]string{ 529 "foo": { 530 "t2": {"SIGUSR1", "SIGUSR2"}, 531 }, 532 "bar": { 533 "t4": {"SIGUSR1"}, 534 }, 535 } 536 537 j2 := &Job{ 538 TaskGroups: []*TaskGroup{ 539 { 540 Name: "foo", 541 Tasks: []*Task{ 542 { 543 Name: "t1", 544 KillSignal: "SIGQUIT", 545 }, 546 }, 547 }, 548 }, 549 } 550 551 e2 := map[string]map[string][]string{ 552 "foo": { 553 "t1": {"SIGQUIT"}, 554 }, 555 } 556 557 cases := []struct { 558 Job *Job 559 Expected map[string]map[string][]string 560 }{ 561 { 562 Job: j0, 563 Expected: e0, 564 }, 565 { 566 Job: j1, 567 Expected: e1, 568 }, 569 { 570 Job: j2, 571 Expected: e2, 572 }, 573 } 574 575 for i, c := range cases { 576 got := c.Job.RequiredSignals() 577 if !reflect.DeepEqual(got, c.Expected) { 578 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 579 } 580 } 581 } 582 583 func TestTaskGroup_Validate(t *testing.T) { 584 j := testJob() 585 tg := &TaskGroup{ 586 Count: -1, 587 RestartPolicy: &RestartPolicy{ 588 Interval: 5 * time.Minute, 589 Delay: 10 * time.Second, 590 Attempts: 10, 591 Mode: RestartPolicyModeDelay, 592 }, 593 ReschedulePolicy: &ReschedulePolicy{ 594 Interval: 5 * time.Minute, 595 Attempts: 5, 596 Delay: 5 * time.Second, 597 }, 598 } 599 err := tg.Validate(j) 600 mErr := err.(*multierror.Error) 601 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 602 t.Fatalf("err: %s", err) 603 } 604 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 605 t.Fatalf("err: %s", err) 606 } 607 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 608 t.Fatalf("err: %s", err) 609 } 610 611 tg = &TaskGroup{ 612 Tasks: []*Task{ 613 { 614 Name: "task-a", 615 Resources: &Resources{ 616 Networks: []*NetworkResource{ 617 { 618 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 619 }, 620 }, 621 }, 622 }, 623 { 624 Name: "task-b", 625 Resources: &Resources{ 626 Networks: []*NetworkResource{ 627 { 628 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 629 }, 630 }, 631 }, 632 }, 633 }, 634 } 635 err = tg.Validate(&Job{}) 636 expected := `Static port 123 already reserved by task-a:foo` 637 if !strings.Contains(err.Error(), expected) { 638 t.Errorf("expected %s but found: %v", expected, err) 639 } 640 641 tg = &TaskGroup{ 642 Tasks: []*Task{ 643 { 644 Name: "task-a", 645 Resources: &Resources{ 646 Networks: []*NetworkResource{ 647 { 648 ReservedPorts: []Port{ 649 {Label: "foo", Value: 123}, 650 {Label: "bar", Value: 123}, 651 }, 652 }, 653 }, 654 }, 655 }, 656 }, 657 } 658 err = tg.Validate(&Job{}) 659 expected = `Static port 123 already reserved by task-a:foo` 660 if !strings.Contains(err.Error(), expected) { 661 t.Errorf("expected %s but found: %v", expected, err) 662 } 663 664 tg = &TaskGroup{ 665 Name: "web", 666 Count: 1, 667 Tasks: []*Task{ 668 {Name: "web", Leader: true}, 669 {Name: "web", Leader: true}, 670 {}, 671 }, 672 RestartPolicy: &RestartPolicy{ 673 Interval: 5 * time.Minute, 674 Delay: 10 * time.Second, 675 Attempts: 10, 676 Mode: RestartPolicyModeDelay, 677 }, 678 ReschedulePolicy: &ReschedulePolicy{ 679 Interval: 5 * time.Minute, 680 Attempts: 10, 681 Delay: 5 * time.Second, 682 DelayFunction: "constant", 683 }, 684 } 685 686 err = tg.Validate(j) 687 mErr = err.(*multierror.Error) 688 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 689 t.Fatalf("err: %s", err) 690 } 691 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 692 t.Fatalf("err: %s", err) 693 } 694 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 695 t.Fatalf("err: %s", err) 696 } 697 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 698 t.Fatalf("err: %s", err) 699 } 700 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 701 t.Fatalf("err: %s", err) 702 } 703 704 tg = &TaskGroup{ 705 Name: "web", 706 Count: 1, 707 Tasks: []*Task{ 708 {Name: "web", Leader: true}, 709 }, 710 Update: DefaultUpdateStrategy.Copy(), 711 } 712 j.Type = JobTypeBatch 713 err = tg.Validate(j) 714 if !strings.Contains(err.Error(), "does not allow update block") { 715 t.Fatalf("err: %s", err) 716 } 717 718 tg = &TaskGroup{ 719 Count: -1, 720 RestartPolicy: &RestartPolicy{ 721 Interval: 5 * time.Minute, 722 Delay: 10 * time.Second, 723 Attempts: 10, 724 Mode: RestartPolicyModeDelay, 725 }, 726 ReschedulePolicy: &ReschedulePolicy{ 727 Interval: 5 * time.Minute, 728 Attempts: 5, 729 Delay: 5 * time.Second, 730 }, 731 } 732 j.Type = JobTypeSystem 733 err = tg.Validate(j) 734 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 735 t.Fatalf("err: %s", err) 736 } 737 } 738 739 func TestTask_Validate(t *testing.T) { 740 task := &Task{} 741 ephemeralDisk := DefaultEphemeralDisk() 742 err := task.Validate(ephemeralDisk) 743 mErr := err.(*multierror.Error) 744 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 745 t.Fatalf("err: %s", err) 746 } 747 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 748 t.Fatalf("err: %s", err) 749 } 750 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 751 t.Fatalf("err: %s", err) 752 } 753 754 task = &Task{Name: "web/foo"} 755 err = task.Validate(ephemeralDisk) 756 mErr = err.(*multierror.Error) 757 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 758 t.Fatalf("err: %s", err) 759 } 760 761 task = &Task{ 762 Name: "web", 763 Driver: "docker", 764 Resources: &Resources{ 765 CPU: 100, 766 MemoryMB: 100, 767 IOPS: 10, 768 }, 769 LogConfig: DefaultLogConfig(), 770 } 771 ephemeralDisk.SizeMB = 200 772 err = task.Validate(ephemeralDisk) 773 if err != nil { 774 t.Fatalf("err: %s", err) 775 } 776 777 task.Constraints = append(task.Constraints, 778 &Constraint{ 779 Operand: ConstraintDistinctHosts, 780 }, 781 &Constraint{ 782 Operand: ConstraintDistinctProperty, 783 LTarget: "${meta.rack}", 784 }) 785 786 err = task.Validate(ephemeralDisk) 787 mErr = err.(*multierror.Error) 788 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 789 t.Fatalf("err: %s", err) 790 } 791 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 792 t.Fatalf("err: %s", err) 793 } 794 } 795 796 func TestTask_Validate_Services(t *testing.T) { 797 s1 := &Service{ 798 Name: "service-name", 799 PortLabel: "bar", 800 Checks: []*ServiceCheck{ 801 { 802 Name: "check-name", 803 Type: ServiceCheckTCP, 804 Interval: 0 * time.Second, 805 }, 806 { 807 Name: "check-name", 808 Type: ServiceCheckTCP, 809 Timeout: 2 * time.Second, 810 }, 811 { 812 Name: "check-name", 813 Type: ServiceCheckTCP, 814 Interval: 1 * time.Second, 815 }, 816 }, 817 } 818 819 s2 := &Service{ 820 Name: "service-name", 821 PortLabel: "bar", 822 } 823 824 s3 := &Service{ 825 Name: "service-A", 826 PortLabel: "a", 827 } 828 s4 := &Service{ 829 Name: "service-A", 830 PortLabel: "b", 831 } 832 833 ephemeralDisk := DefaultEphemeralDisk() 834 ephemeralDisk.SizeMB = 200 835 task := &Task{ 836 Name: "web", 837 Driver: "docker", 838 Resources: &Resources{ 839 CPU: 100, 840 MemoryMB: 100, 841 IOPS: 10, 842 }, 843 Services: []*Service{s1, s2}, 844 } 845 846 task1 := &Task{ 847 Name: "web", 848 Driver: "docker", 849 Resources: DefaultResources(), 850 Services: []*Service{s3, s4}, 851 LogConfig: DefaultLogConfig(), 852 } 853 task1.Resources.Networks = []*NetworkResource{ 854 { 855 MBits: 10, 856 DynamicPorts: []Port{ 857 { 858 Label: "a", 859 Value: 1000, 860 }, 861 { 862 Label: "b", 863 Value: 2000, 864 }, 865 }, 866 }, 867 } 868 869 err := task.Validate(ephemeralDisk) 870 if err == nil { 871 t.Fatal("expected an error") 872 } 873 874 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 875 t.Fatalf("err: %v", err) 876 } 877 878 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 879 t.Fatalf("err: %v", err) 880 } 881 882 if !strings.Contains(err.Error(), "missing required value interval") { 883 t.Fatalf("err: %v", err) 884 } 885 886 if !strings.Contains(err.Error(), "cannot be less than") { 887 t.Fatalf("err: %v", err) 888 } 889 890 if err = task1.Validate(ephemeralDisk); err != nil { 891 t.Fatalf("err : %v", err) 892 } 893 } 894 895 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 896 ephemeralDisk := DefaultEphemeralDisk() 897 getTask := func(s *Service) *Task { 898 task := &Task{ 899 Name: "web", 900 Driver: "docker", 901 Resources: DefaultResources(), 902 Services: []*Service{s}, 903 LogConfig: DefaultLogConfig(), 904 } 905 task.Resources.Networks = []*NetworkResource{ 906 { 907 MBits: 10, 908 DynamicPorts: []Port{ 909 { 910 Label: "http", 911 Value: 80, 912 }, 913 }, 914 }, 915 } 916 return task 917 } 918 919 cases := []*Service{ 920 { 921 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 922 Name: "DriverModeWithLabel", 923 PortLabel: "http", 924 AddressMode: AddressModeDriver, 925 }, 926 { 927 Name: "DriverModeWithPort", 928 PortLabel: "80", 929 AddressMode: AddressModeDriver, 930 }, 931 { 932 Name: "HostModeWithLabel", 933 PortLabel: "http", 934 AddressMode: AddressModeHost, 935 }, 936 { 937 Name: "HostModeWithoutLabel", 938 AddressMode: AddressModeHost, 939 }, 940 { 941 Name: "DriverModeWithoutLabel", 942 AddressMode: AddressModeDriver, 943 }, 944 } 945 946 for _, service := range cases { 947 task := getTask(service) 948 t.Run(service.Name, func(t *testing.T) { 949 if err := task.Validate(ephemeralDisk); err != nil { 950 t.Fatalf("unexpected err: %v", err) 951 } 952 }) 953 } 954 } 955 956 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 957 ephemeralDisk := DefaultEphemeralDisk() 958 getTask := func(s *Service) *Task { 959 task := &Task{ 960 Name: "web", 961 Driver: "docker", 962 Resources: DefaultResources(), 963 Services: []*Service{s}, 964 LogConfig: DefaultLogConfig(), 965 } 966 task.Resources.Networks = []*NetworkResource{ 967 { 968 MBits: 10, 969 DynamicPorts: []Port{ 970 { 971 Label: "http", 972 Value: 80, 973 }, 974 }, 975 }, 976 } 977 return task 978 } 979 980 cases := []*Service{ 981 { 982 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 983 Name: "DriverModeWithLabel", 984 PortLabel: "asdf", 985 AddressMode: AddressModeDriver, 986 }, 987 { 988 Name: "HostModeWithLabel", 989 PortLabel: "asdf", 990 AddressMode: AddressModeHost, 991 }, 992 { 993 Name: "HostModeWithPort", 994 PortLabel: "80", 995 AddressMode: AddressModeHost, 996 }, 997 } 998 999 for _, service := range cases { 1000 task := getTask(service) 1001 t.Run(service.Name, func(t *testing.T) { 1002 err := task.Validate(ephemeralDisk) 1003 if err == nil { 1004 t.Fatalf("expected an error") 1005 } 1006 //t.Logf("err: %v", err) 1007 }) 1008 } 1009 } 1010 1011 func TestTask_Validate_Service_Check(t *testing.T) { 1012 1013 invalidCheck := ServiceCheck{ 1014 Name: "check-name", 1015 Command: "/bin/true", 1016 Type: ServiceCheckScript, 1017 Interval: 10 * time.Second, 1018 } 1019 1020 err := invalidCheck.validate() 1021 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1022 t.Fatalf("expected a timeout validation error but received: %q", err) 1023 } 1024 1025 check1 := ServiceCheck{ 1026 Name: "check-name", 1027 Type: ServiceCheckTCP, 1028 Interval: 10 * time.Second, 1029 Timeout: 2 * time.Second, 1030 } 1031 1032 if err := check1.validate(); err != nil { 1033 t.Fatalf("err: %v", err) 1034 } 1035 1036 check1.InitialStatus = "foo" 1037 err = check1.validate() 1038 if err == nil { 1039 t.Fatal("Expected an error") 1040 } 1041 1042 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1043 t.Fatalf("err: %v", err) 1044 } 1045 1046 check1.InitialStatus = api.HealthCritical 1047 err = check1.validate() 1048 if err != nil { 1049 t.Fatalf("err: %v", err) 1050 } 1051 1052 check1.InitialStatus = api.HealthPassing 1053 err = check1.validate() 1054 if err != nil { 1055 t.Fatalf("err: %v", err) 1056 } 1057 1058 check1.InitialStatus = "" 1059 err = check1.validate() 1060 if err != nil { 1061 t.Fatalf("err: %v", err) 1062 } 1063 1064 check2 := ServiceCheck{ 1065 Name: "check-name-2", 1066 Type: ServiceCheckHTTP, 1067 Interval: 10 * time.Second, 1068 Timeout: 2 * time.Second, 1069 Path: "/foo/bar", 1070 } 1071 1072 err = check2.validate() 1073 if err != nil { 1074 t.Fatalf("err: %v", err) 1075 } 1076 1077 check2.Path = "" 1078 err = check2.validate() 1079 if err == nil { 1080 t.Fatal("Expected an error") 1081 } 1082 if !strings.Contains(err.Error(), "valid http path") { 1083 t.Fatalf("err: %v", err) 1084 } 1085 1086 check2.Path = "http://www.example.com" 1087 err = check2.validate() 1088 if err == nil { 1089 t.Fatal("Expected an error") 1090 } 1091 if !strings.Contains(err.Error(), "relative http path") { 1092 t.Fatalf("err: %v", err) 1093 } 1094 } 1095 1096 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1097 // inherit address mode but do inherit ports. 1098 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1099 getTask := func(s *Service) *Task { 1100 return &Task{ 1101 Resources: &Resources{ 1102 Networks: []*NetworkResource{ 1103 { 1104 DynamicPorts: []Port{ 1105 { 1106 Label: "http", 1107 Value: 9999, 1108 }, 1109 }, 1110 }, 1111 }, 1112 }, 1113 Services: []*Service{s}, 1114 } 1115 } 1116 1117 cases := []struct { 1118 Service *Service 1119 ErrContains string 1120 }{ 1121 { 1122 Service: &Service{ 1123 Name: "invalid-driver", 1124 PortLabel: "80", 1125 AddressMode: "host", 1126 }, 1127 ErrContains: `port label "80" referenced`, 1128 }, 1129 { 1130 Service: &Service{ 1131 Name: "http-driver-fail-1", 1132 PortLabel: "80", 1133 AddressMode: "driver", 1134 Checks: []*ServiceCheck{ 1135 { 1136 Name: "invalid-check-1", 1137 Type: "tcp", 1138 Interval: time.Second, 1139 Timeout: time.Second, 1140 }, 1141 }, 1142 }, 1143 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1144 }, 1145 { 1146 Service: &Service{ 1147 Name: "http-driver-fail-2", 1148 PortLabel: "80", 1149 AddressMode: "driver", 1150 Checks: []*ServiceCheck{ 1151 { 1152 Name: "invalid-check-2", 1153 Type: "tcp", 1154 PortLabel: "80", 1155 Interval: time.Second, 1156 Timeout: time.Second, 1157 }, 1158 }, 1159 }, 1160 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1161 }, 1162 { 1163 Service: &Service{ 1164 Name: "http-driver-fail-3", 1165 PortLabel: "80", 1166 AddressMode: "driver", 1167 Checks: []*ServiceCheck{ 1168 { 1169 Name: "invalid-check-3", 1170 Type: "tcp", 1171 PortLabel: "missing-port-label", 1172 Interval: time.Second, 1173 Timeout: time.Second, 1174 }, 1175 }, 1176 }, 1177 ErrContains: `port label "missing-port-label" referenced`, 1178 }, 1179 { 1180 Service: &Service{ 1181 Name: "http-driver-passes", 1182 PortLabel: "80", 1183 AddressMode: "driver", 1184 Checks: []*ServiceCheck{ 1185 { 1186 Name: "valid-script-check", 1187 Type: "script", 1188 Command: "ok", 1189 Interval: time.Second, 1190 Timeout: time.Second, 1191 }, 1192 { 1193 Name: "valid-host-check", 1194 Type: "tcp", 1195 PortLabel: "http", 1196 Interval: time.Second, 1197 Timeout: time.Second, 1198 }, 1199 { 1200 Name: "valid-driver-check", 1201 Type: "tcp", 1202 AddressMode: "driver", 1203 Interval: time.Second, 1204 Timeout: time.Second, 1205 }, 1206 }, 1207 }, 1208 }, 1209 { 1210 Service: &Service{ 1211 Name: "empty-address-3673-passes-1", 1212 Checks: []*ServiceCheck{ 1213 { 1214 Name: "valid-port-label", 1215 Type: "tcp", 1216 PortLabel: "http", 1217 Interval: time.Second, 1218 Timeout: time.Second, 1219 }, 1220 { 1221 Name: "empty-is-ok", 1222 Type: "script", 1223 Command: "ok", 1224 Interval: time.Second, 1225 Timeout: time.Second, 1226 }, 1227 }, 1228 }, 1229 }, 1230 { 1231 Service: &Service{ 1232 Name: "empty-address-3673-passes-2", 1233 }, 1234 }, 1235 { 1236 Service: &Service{ 1237 Name: "empty-address-3673-fails", 1238 Checks: []*ServiceCheck{ 1239 { 1240 Name: "empty-is-not-ok", 1241 Type: "tcp", 1242 Interval: time.Second, 1243 Timeout: time.Second, 1244 }, 1245 }, 1246 }, 1247 ErrContains: `invalid: check requires a port but neither check nor service`, 1248 }, 1249 } 1250 1251 for _, tc := range cases { 1252 tc := tc 1253 task := getTask(tc.Service) 1254 t.Run(tc.Service.Name, func(t *testing.T) { 1255 err := validateServices(task) 1256 if err == nil && tc.ErrContains == "" { 1257 // Ok! 1258 return 1259 } 1260 if err == nil { 1261 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1262 } 1263 if !strings.Contains(err.Error(), tc.ErrContains) { 1264 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1265 } 1266 }) 1267 } 1268 } 1269 1270 func TestTask_Validate_Service_Check_GRPC(t *testing.T) { 1271 t.Parallel() 1272 // Bad (no port) 1273 invalidGRPC := &ServiceCheck{ 1274 Type: ServiceCheckGRPC, 1275 Interval: time.Second, 1276 Timeout: time.Second, 1277 } 1278 service := &Service{ 1279 Name: "test", 1280 Checks: []*ServiceCheck{invalidGRPC}, 1281 } 1282 1283 assert.Error(t, service.Validate()) 1284 1285 // Good 1286 service.Checks[0] = &ServiceCheck{ 1287 Type: ServiceCheckGRPC, 1288 Interval: time.Second, 1289 Timeout: time.Second, 1290 PortLabel: "some-port-label", 1291 } 1292 1293 assert.NoError(t, service.Validate()) 1294 } 1295 1296 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1297 t.Parallel() 1298 invalidCheckRestart := &CheckRestart{ 1299 Limit: -1, 1300 Grace: -1, 1301 } 1302 1303 err := invalidCheckRestart.Validate() 1304 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1305 assert.Len(t, err.(*multierror.Error).Errors, 2) 1306 1307 validCheckRestart := &CheckRestart{} 1308 assert.Nil(t, validCheckRestart.Validate()) 1309 1310 validCheckRestart.Limit = 1 1311 validCheckRestart.Grace = 1 1312 assert.Nil(t, validCheckRestart.Validate()) 1313 } 1314 1315 func TestTask_Validate_LogConfig(t *testing.T) { 1316 task := &Task{ 1317 LogConfig: DefaultLogConfig(), 1318 } 1319 ephemeralDisk := &EphemeralDisk{ 1320 SizeMB: 1, 1321 } 1322 1323 err := task.Validate(ephemeralDisk) 1324 mErr := err.(*multierror.Error) 1325 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1326 t.Fatalf("err: %s", err) 1327 } 1328 } 1329 1330 func TestTask_Validate_Template(t *testing.T) { 1331 1332 bad := &Template{} 1333 task := &Task{ 1334 Templates: []*Template{bad}, 1335 } 1336 ephemeralDisk := &EphemeralDisk{ 1337 SizeMB: 1, 1338 } 1339 1340 err := task.Validate(ephemeralDisk) 1341 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1342 t.Fatalf("err: %s", err) 1343 } 1344 1345 // Have two templates that share the same destination 1346 good := &Template{ 1347 SourcePath: "foo", 1348 DestPath: "local/foo", 1349 ChangeMode: "noop", 1350 } 1351 1352 task.Templates = []*Template{good, good} 1353 err = task.Validate(ephemeralDisk) 1354 if !strings.Contains(err.Error(), "same destination as") { 1355 t.Fatalf("err: %s", err) 1356 } 1357 1358 // Env templates can't use signals 1359 task.Templates = []*Template{ 1360 { 1361 Envvars: true, 1362 ChangeMode: "signal", 1363 }, 1364 } 1365 1366 err = task.Validate(ephemeralDisk) 1367 if err == nil { 1368 t.Fatalf("expected error from Template.Validate") 1369 } 1370 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1371 t.Errorf("expected to find %q but found %v", expected, err) 1372 } 1373 } 1374 1375 func TestTemplate_Validate(t *testing.T) { 1376 cases := []struct { 1377 Tmpl *Template 1378 Fail bool 1379 ContainsErrs []string 1380 }{ 1381 { 1382 Tmpl: &Template{}, 1383 Fail: true, 1384 ContainsErrs: []string{ 1385 "specify a source path", 1386 "specify a destination", 1387 TemplateChangeModeInvalidError.Error(), 1388 }, 1389 }, 1390 { 1391 Tmpl: &Template{ 1392 Splay: -100, 1393 }, 1394 Fail: true, 1395 ContainsErrs: []string{ 1396 "positive splay", 1397 }, 1398 }, 1399 { 1400 Tmpl: &Template{ 1401 ChangeMode: "foo", 1402 }, 1403 Fail: true, 1404 ContainsErrs: []string{ 1405 TemplateChangeModeInvalidError.Error(), 1406 }, 1407 }, 1408 { 1409 Tmpl: &Template{ 1410 ChangeMode: "signal", 1411 }, 1412 Fail: true, 1413 ContainsErrs: []string{ 1414 "specify signal value", 1415 }, 1416 }, 1417 { 1418 Tmpl: &Template{ 1419 SourcePath: "foo", 1420 DestPath: "../../root", 1421 ChangeMode: "noop", 1422 }, 1423 Fail: true, 1424 ContainsErrs: []string{ 1425 "destination escapes", 1426 }, 1427 }, 1428 { 1429 Tmpl: &Template{ 1430 SourcePath: "foo", 1431 DestPath: "local/foo", 1432 ChangeMode: "noop", 1433 }, 1434 Fail: false, 1435 }, 1436 { 1437 Tmpl: &Template{ 1438 SourcePath: "foo", 1439 DestPath: "local/foo", 1440 ChangeMode: "noop", 1441 Perms: "0444", 1442 }, 1443 Fail: false, 1444 }, 1445 { 1446 Tmpl: &Template{ 1447 SourcePath: "foo", 1448 DestPath: "local/foo", 1449 ChangeMode: "noop", 1450 Perms: "zza", 1451 }, 1452 Fail: true, 1453 ContainsErrs: []string{ 1454 "as octal", 1455 }, 1456 }, 1457 } 1458 1459 for i, c := range cases { 1460 err := c.Tmpl.Validate() 1461 if err != nil { 1462 if !c.Fail { 1463 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 1464 } 1465 1466 e := err.Error() 1467 for _, exp := range c.ContainsErrs { 1468 if !strings.Contains(e, exp) { 1469 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 1470 } 1471 } 1472 } else if c.Fail { 1473 t.Fatalf("Case %d: should have failed: %v", i+1, err) 1474 } 1475 } 1476 } 1477 1478 func TestConstraint_Validate(t *testing.T) { 1479 c := &Constraint{} 1480 err := c.Validate() 1481 mErr := err.(*multierror.Error) 1482 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 1483 t.Fatalf("err: %s", err) 1484 } 1485 1486 c = &Constraint{ 1487 LTarget: "$attr.kernel.name", 1488 RTarget: "linux", 1489 Operand: "=", 1490 } 1491 err = c.Validate() 1492 if err != nil { 1493 t.Fatalf("err: %v", err) 1494 } 1495 1496 // Perform additional regexp validation 1497 c.Operand = ConstraintRegex 1498 c.RTarget = "(foo" 1499 err = c.Validate() 1500 mErr = err.(*multierror.Error) 1501 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 1502 t.Fatalf("err: %s", err) 1503 } 1504 1505 // Perform version validation 1506 c.Operand = ConstraintVersion 1507 c.RTarget = "~> foo" 1508 err = c.Validate() 1509 mErr = err.(*multierror.Error) 1510 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 1511 t.Fatalf("err: %s", err) 1512 } 1513 1514 // Perform distinct_property validation 1515 c.Operand = ConstraintDistinctProperty 1516 c.RTarget = "0" 1517 err = c.Validate() 1518 mErr = err.(*multierror.Error) 1519 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 1520 t.Fatalf("err: %s", err) 1521 } 1522 1523 c.RTarget = "-1" 1524 err = c.Validate() 1525 mErr = err.(*multierror.Error) 1526 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 1527 t.Fatalf("err: %s", err) 1528 } 1529 1530 // Perform distinct_hosts validation 1531 c.Operand = ConstraintDistinctHosts 1532 c.LTarget = "" 1533 c.RTarget = "" 1534 if err := c.Validate(); err != nil { 1535 t.Fatalf("expected valid constraint: %v", err) 1536 } 1537 1538 // Perform set_contains validation 1539 c.Operand = ConstraintSetContains 1540 c.RTarget = "" 1541 err = c.Validate() 1542 mErr = err.(*multierror.Error) 1543 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 1544 t.Fatalf("err: %s", err) 1545 } 1546 1547 // Perform LTarget validation 1548 c.Operand = ConstraintRegex 1549 c.RTarget = "foo" 1550 c.LTarget = "" 1551 err = c.Validate() 1552 mErr = err.(*multierror.Error) 1553 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 1554 t.Fatalf("err: %s", err) 1555 } 1556 1557 // Perform constraint type validation 1558 c.Operand = "foo" 1559 err = c.Validate() 1560 mErr = err.(*multierror.Error) 1561 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 1562 t.Fatalf("err: %s", err) 1563 } 1564 } 1565 1566 func TestUpdateStrategy_Validate(t *testing.T) { 1567 u := &UpdateStrategy{ 1568 MaxParallel: 0, 1569 HealthCheck: "foo", 1570 MinHealthyTime: -10, 1571 HealthyDeadline: -15, 1572 ProgressDeadline: -25, 1573 AutoRevert: false, 1574 Canary: -1, 1575 } 1576 1577 err := u.Validate() 1578 mErr := err.(*multierror.Error) 1579 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 1580 t.Fatalf("err: %s", err) 1581 } 1582 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") { 1583 t.Fatalf("err: %s", err) 1584 } 1585 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 1586 t.Fatalf("err: %s", err) 1587 } 1588 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 1589 t.Fatalf("err: %s", err) 1590 } 1591 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 1592 t.Fatalf("err: %s", err) 1593 } 1594 if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") { 1595 t.Fatalf("err: %s", err) 1596 } 1597 if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") { 1598 t.Fatalf("err: %s", err) 1599 } 1600 if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") { 1601 t.Fatalf("err: %s", err) 1602 } 1603 } 1604 1605 func TestResource_NetIndex(t *testing.T) { 1606 r := &Resources{ 1607 Networks: []*NetworkResource{ 1608 {Device: "eth0"}, 1609 {Device: "lo0"}, 1610 {Device: ""}, 1611 }, 1612 } 1613 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 1614 t.Fatalf("Bad: %d", idx) 1615 } 1616 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 1617 t.Fatalf("Bad: %d", idx) 1618 } 1619 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 1620 t.Fatalf("Bad: %d", idx) 1621 } 1622 } 1623 1624 func TestResource_Superset(t *testing.T) { 1625 r1 := &Resources{ 1626 CPU: 2000, 1627 MemoryMB: 2048, 1628 DiskMB: 10000, 1629 IOPS: 100, 1630 } 1631 r2 := &Resources{ 1632 CPU: 2000, 1633 MemoryMB: 1024, 1634 DiskMB: 5000, 1635 IOPS: 50, 1636 } 1637 1638 if s, _ := r1.Superset(r1); !s { 1639 t.Fatalf("bad") 1640 } 1641 if s, _ := r1.Superset(r2); !s { 1642 t.Fatalf("bad") 1643 } 1644 if s, _ := r2.Superset(r1); s { 1645 t.Fatalf("bad") 1646 } 1647 if s, _ := r2.Superset(r2); !s { 1648 t.Fatalf("bad") 1649 } 1650 } 1651 1652 func TestResource_Add(t *testing.T) { 1653 r1 := &Resources{ 1654 CPU: 2000, 1655 MemoryMB: 2048, 1656 DiskMB: 10000, 1657 IOPS: 100, 1658 Networks: []*NetworkResource{ 1659 { 1660 CIDR: "10.0.0.0/8", 1661 MBits: 100, 1662 ReservedPorts: []Port{{"ssh", 22}}, 1663 }, 1664 }, 1665 } 1666 r2 := &Resources{ 1667 CPU: 2000, 1668 MemoryMB: 1024, 1669 DiskMB: 5000, 1670 IOPS: 50, 1671 Networks: []*NetworkResource{ 1672 { 1673 IP: "10.0.0.1", 1674 MBits: 50, 1675 ReservedPorts: []Port{{"web", 80}}, 1676 }, 1677 }, 1678 } 1679 1680 err := r1.Add(r2) 1681 if err != nil { 1682 t.Fatalf("Err: %v", err) 1683 } 1684 1685 expect := &Resources{ 1686 CPU: 3000, 1687 MemoryMB: 3072, 1688 DiskMB: 15000, 1689 IOPS: 150, 1690 Networks: []*NetworkResource{ 1691 { 1692 CIDR: "10.0.0.0/8", 1693 MBits: 150, 1694 ReservedPorts: []Port{{"ssh", 22}, {"web", 80}}, 1695 }, 1696 }, 1697 } 1698 1699 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1700 t.Fatalf("bad: %#v %#v", expect, r1) 1701 } 1702 } 1703 1704 func TestResource_Add_Network(t *testing.T) { 1705 r1 := &Resources{} 1706 r2 := &Resources{ 1707 Networks: []*NetworkResource{ 1708 { 1709 MBits: 50, 1710 DynamicPorts: []Port{{"http", 0}, {"https", 0}}, 1711 }, 1712 }, 1713 } 1714 r3 := &Resources{ 1715 Networks: []*NetworkResource{ 1716 { 1717 MBits: 25, 1718 DynamicPorts: []Port{{"admin", 0}}, 1719 }, 1720 }, 1721 } 1722 1723 err := r1.Add(r2) 1724 if err != nil { 1725 t.Fatalf("Err: %v", err) 1726 } 1727 err = r1.Add(r3) 1728 if err != nil { 1729 t.Fatalf("Err: %v", err) 1730 } 1731 1732 expect := &Resources{ 1733 Networks: []*NetworkResource{ 1734 { 1735 MBits: 75, 1736 DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}}, 1737 }, 1738 }, 1739 } 1740 1741 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1742 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 1743 } 1744 } 1745 1746 func TestEncodeDecode(t *testing.T) { 1747 type FooRequest struct { 1748 Foo string 1749 Bar int 1750 Baz bool 1751 } 1752 arg := &FooRequest{ 1753 Foo: "test", 1754 Bar: 42, 1755 Baz: true, 1756 } 1757 buf, err := Encode(1, arg) 1758 if err != nil { 1759 t.Fatalf("err: %v", err) 1760 } 1761 1762 var out FooRequest 1763 err = Decode(buf[1:], &out) 1764 if err != nil { 1765 t.Fatalf("err: %v", err) 1766 } 1767 1768 if !reflect.DeepEqual(arg, &out) { 1769 t.Fatalf("bad: %#v %#v", arg, out) 1770 } 1771 } 1772 1773 func BenchmarkEncodeDecode(b *testing.B) { 1774 job := testJob() 1775 1776 for i := 0; i < b.N; i++ { 1777 buf, err := Encode(1, job) 1778 if err != nil { 1779 b.Fatalf("err: %v", err) 1780 } 1781 1782 var out Job 1783 err = Decode(buf[1:], &out) 1784 if err != nil { 1785 b.Fatalf("err: %v", err) 1786 } 1787 } 1788 } 1789 1790 func TestInvalidServiceCheck(t *testing.T) { 1791 s := Service{ 1792 Name: "service-name", 1793 PortLabel: "bar", 1794 Checks: []*ServiceCheck{ 1795 { 1796 Name: "check-name", 1797 Type: "lol", 1798 }, 1799 }, 1800 } 1801 if err := s.Validate(); err == nil { 1802 t.Fatalf("Service should be invalid (invalid type)") 1803 } 1804 1805 s = Service{ 1806 Name: "service.name", 1807 PortLabel: "bar", 1808 } 1809 if err := s.ValidateName(s.Name); err == nil { 1810 t.Fatalf("Service should be invalid (contains a dot): %v", err) 1811 } 1812 1813 s = Service{ 1814 Name: "-my-service", 1815 PortLabel: "bar", 1816 } 1817 if err := s.Validate(); err == nil { 1818 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 1819 } 1820 1821 s = Service{ 1822 Name: "my-service-${NOMAD_META_FOO}", 1823 PortLabel: "bar", 1824 } 1825 if err := s.Validate(); err != nil { 1826 t.Fatalf("Service should be valid: %v", err) 1827 } 1828 1829 s = Service{ 1830 Name: "my_service-${NOMAD_META_FOO}", 1831 PortLabel: "bar", 1832 } 1833 if err := s.Validate(); err == nil { 1834 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 1835 } 1836 1837 s = Service{ 1838 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 1839 PortLabel: "bar", 1840 } 1841 if err := s.ValidateName(s.Name); err == nil { 1842 t.Fatalf("Service should be invalid (too long): %v", err) 1843 } 1844 1845 s = Service{ 1846 Name: "service-name", 1847 Checks: []*ServiceCheck{ 1848 { 1849 Name: "check-tcp", 1850 Type: ServiceCheckTCP, 1851 Interval: 5 * time.Second, 1852 Timeout: 2 * time.Second, 1853 }, 1854 { 1855 Name: "check-http", 1856 Type: ServiceCheckHTTP, 1857 Path: "/foo", 1858 Interval: 5 * time.Second, 1859 Timeout: 2 * time.Second, 1860 }, 1861 }, 1862 } 1863 if err := s.Validate(); err == nil { 1864 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 1865 } 1866 1867 s = Service{ 1868 Name: "service-name", 1869 Checks: []*ServiceCheck{ 1870 { 1871 Name: "check-script", 1872 Type: ServiceCheckScript, 1873 Command: "/bin/date", 1874 Interval: 5 * time.Second, 1875 Timeout: 2 * time.Second, 1876 }, 1877 }, 1878 } 1879 if err := s.Validate(); err != nil { 1880 t.Fatalf("un-expected error: %v", err) 1881 } 1882 } 1883 1884 func TestDistinctCheckID(t *testing.T) { 1885 c1 := ServiceCheck{ 1886 Name: "web-health", 1887 Type: "http", 1888 Path: "/health", 1889 Interval: 2 * time.Second, 1890 Timeout: 3 * time.Second, 1891 } 1892 c2 := ServiceCheck{ 1893 Name: "web-health", 1894 Type: "http", 1895 Path: "/health1", 1896 Interval: 2 * time.Second, 1897 Timeout: 3 * time.Second, 1898 } 1899 1900 c3 := ServiceCheck{ 1901 Name: "web-health", 1902 Type: "http", 1903 Path: "/health", 1904 Interval: 4 * time.Second, 1905 Timeout: 3 * time.Second, 1906 } 1907 serviceID := "123" 1908 c1Hash := c1.Hash(serviceID) 1909 c2Hash := c2.Hash(serviceID) 1910 c3Hash := c3.Hash(serviceID) 1911 1912 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 1913 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 1914 } 1915 1916 } 1917 1918 func TestService_Canonicalize(t *testing.T) { 1919 job := "example" 1920 taskGroup := "cache" 1921 task := "redis" 1922 1923 s := Service{ 1924 Name: "${TASK}-db", 1925 } 1926 1927 s.Canonicalize(job, taskGroup, task) 1928 if s.Name != "redis-db" { 1929 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 1930 } 1931 1932 s.Name = "db" 1933 s.Canonicalize(job, taskGroup, task) 1934 if s.Name != "db" { 1935 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 1936 } 1937 1938 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 1939 s.Canonicalize(job, taskGroup, task) 1940 if s.Name != "example-cache-redis-db" { 1941 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 1942 } 1943 1944 s.Name = "${BASE}-db" 1945 s.Canonicalize(job, taskGroup, task) 1946 if s.Name != "example-cache-redis-db" { 1947 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 1948 } 1949 1950 } 1951 1952 func TestJob_ExpandServiceNames(t *testing.T) { 1953 j := &Job{ 1954 Name: "my-job", 1955 TaskGroups: []*TaskGroup{ 1956 { 1957 Name: "web", 1958 Tasks: []*Task{ 1959 { 1960 Name: "frontend", 1961 Services: []*Service{ 1962 { 1963 Name: "${BASE}-default", 1964 }, 1965 { 1966 Name: "jmx", 1967 }, 1968 }, 1969 }, 1970 }, 1971 }, 1972 { 1973 Name: "admin", 1974 Tasks: []*Task{ 1975 { 1976 Name: "admin-web", 1977 }, 1978 }, 1979 }, 1980 }, 1981 } 1982 1983 j.Canonicalize() 1984 1985 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 1986 if service1Name != "my-job-web-frontend-default" { 1987 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 1988 } 1989 1990 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 1991 if service2Name != "jmx" { 1992 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 1993 } 1994 1995 } 1996 1997 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 1998 // Create a config that is enabled but with no interval specified. 1999 p := &PeriodicConfig{Enabled: true} 2000 if err := p.Validate(); err == nil { 2001 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 2002 } 2003 2004 // Create a config that is enabled, with a spec but no type specified. 2005 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 2006 if err := p.Validate(); err == nil { 2007 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 2008 } 2009 2010 // Create a config that is enabled, with a spec type but no spec specified. 2011 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 2012 if err := p.Validate(); err == nil { 2013 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 2014 } 2015 2016 // Create a config that is enabled, with a bad time zone. 2017 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 2018 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 2019 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 2020 } 2021 } 2022 2023 func TestPeriodicConfig_InvalidCron(t *testing.T) { 2024 specs := []string{"foo", "* *", "@foo"} 2025 for _, spec := range specs { 2026 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2027 p.Canonicalize() 2028 if err := p.Validate(); err == nil { 2029 t.Fatal("Invalid cron spec") 2030 } 2031 } 2032 } 2033 2034 func TestPeriodicConfig_ValidCron(t *testing.T) { 2035 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2036 for _, spec := range specs { 2037 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2038 p.Canonicalize() 2039 if err := p.Validate(); err != nil { 2040 t.Fatal("Passed valid cron") 2041 } 2042 } 2043 } 2044 2045 func TestPeriodicConfig_NextCron(t *testing.T) { 2046 require := require.New(t) 2047 2048 type testExpectation struct { 2049 Time time.Time 2050 HasError bool 2051 ErrorMsg string 2052 } 2053 2054 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2055 specs := []string{"0 0 29 2 * 1980", 2056 "*/5 * * * *", 2057 "1 15-0 * * 1-5"} 2058 expected := []*testExpectation{ 2059 { 2060 Time: time.Time{}, 2061 HasError: false, 2062 }, 2063 { 2064 Time: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2065 HasError: false, 2066 }, 2067 { 2068 Time: time.Time{}, 2069 HasError: true, 2070 ErrorMsg: "failed parsing cron expression", 2071 }, 2072 } 2073 2074 for i, spec := range specs { 2075 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2076 p.Canonicalize() 2077 n, err := p.Next(from) 2078 nextExpected := expected[i] 2079 2080 require.Equal(nextExpected.Time, n) 2081 require.Equal(err != nil, nextExpected.HasError) 2082 if err != nil { 2083 require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg)) 2084 } 2085 } 2086 } 2087 2088 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2089 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2090 for _, zone := range zones { 2091 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2092 p.Canonicalize() 2093 if err := p.Validate(); err != nil { 2094 t.Fatalf("Valid tz errored: %v", err) 2095 } 2096 } 2097 } 2098 2099 func TestPeriodicConfig_DST(t *testing.T) { 2100 require := require.New(t) 2101 2102 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2103 p := &PeriodicConfig{ 2104 Enabled: true, 2105 SpecType: PeriodicSpecCron, 2106 Spec: "0 2 11-12 3 * 2017", 2107 TimeZone: "America/Los_Angeles", 2108 } 2109 p.Canonicalize() 2110 2111 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2112 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2113 2114 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2115 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2116 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2117 2118 n1, err := p.Next(t1) 2119 require.Nil(err) 2120 2121 n2, err := p.Next(t2) 2122 require.Nil(err) 2123 2124 require.Equal(e1, n1.UTC()) 2125 require.Equal(e2, n2.UTC()) 2126 } 2127 2128 func TestRestartPolicy_Validate(t *testing.T) { 2129 // Policy with acceptable restart options passes 2130 p := &RestartPolicy{ 2131 Mode: RestartPolicyModeFail, 2132 Attempts: 0, 2133 Interval: 5 * time.Second, 2134 } 2135 if err := p.Validate(); err != nil { 2136 t.Fatalf("err: %v", err) 2137 } 2138 2139 // Policy with ambiguous restart options fails 2140 p = &RestartPolicy{ 2141 Mode: RestartPolicyModeDelay, 2142 Attempts: 0, 2143 Interval: 5 * time.Second, 2144 } 2145 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 2146 t.Fatalf("expect ambiguity error, got: %v", err) 2147 } 2148 2149 // Bad policy mode fails 2150 p = &RestartPolicy{ 2151 Mode: "nope", 2152 Attempts: 1, 2153 Interval: 5 * time.Second, 2154 } 2155 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 2156 t.Fatalf("expect mode error, got: %v", err) 2157 } 2158 2159 // Fails when attempts*delay does not fit inside interval 2160 p = &RestartPolicy{ 2161 Mode: RestartPolicyModeDelay, 2162 Attempts: 3, 2163 Delay: 5 * time.Second, 2164 Interval: 5 * time.Second, 2165 } 2166 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 2167 t.Fatalf("expect restart interval error, got: %v", err) 2168 } 2169 2170 // Fails when interval is to small 2171 p = &RestartPolicy{ 2172 Mode: RestartPolicyModeDelay, 2173 Attempts: 3, 2174 Delay: 5 * time.Second, 2175 Interval: 2 * time.Second, 2176 } 2177 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 2178 t.Fatalf("expect interval too small error, got: %v", err) 2179 } 2180 } 2181 2182 func TestReschedulePolicy_Validate(t *testing.T) { 2183 type testCase struct { 2184 desc string 2185 ReschedulePolicy *ReschedulePolicy 2186 errors []error 2187 } 2188 2189 testCases := []testCase{ 2190 { 2191 desc: "Nil", 2192 }, 2193 { 2194 desc: "Disabled", 2195 ReschedulePolicy: &ReschedulePolicy{ 2196 Attempts: 0, 2197 Interval: 0 * time.Second}, 2198 }, 2199 { 2200 desc: "Disabled", 2201 ReschedulePolicy: &ReschedulePolicy{ 2202 Attempts: -1, 2203 Interval: 5 * time.Minute}, 2204 }, 2205 { 2206 desc: "Valid Linear Delay", 2207 ReschedulePolicy: &ReschedulePolicy{ 2208 Attempts: 1, 2209 Interval: 5 * time.Minute, 2210 Delay: 10 * time.Second, 2211 DelayFunction: "constant"}, 2212 }, 2213 { 2214 desc: "Valid Exponential Delay", 2215 ReschedulePolicy: &ReschedulePolicy{ 2216 Attempts: 5, 2217 Interval: 1 * time.Hour, 2218 Delay: 30 * time.Second, 2219 MaxDelay: 5 * time.Minute, 2220 DelayFunction: "exponential"}, 2221 }, 2222 { 2223 desc: "Valid Fibonacci Delay", 2224 ReschedulePolicy: &ReschedulePolicy{ 2225 Attempts: 5, 2226 Interval: 15 * time.Minute, 2227 Delay: 10 * time.Second, 2228 MaxDelay: 5 * time.Minute, 2229 DelayFunction: "fibonacci"}, 2230 }, 2231 { 2232 desc: "Invalid delay function", 2233 ReschedulePolicy: &ReschedulePolicy{ 2234 Attempts: 1, 2235 Interval: 1 * time.Second, 2236 DelayFunction: "blah"}, 2237 errors: []error{ 2238 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2239 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2240 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 2241 }, 2242 }, 2243 { 2244 desc: "Invalid delay ceiling", 2245 ReschedulePolicy: &ReschedulePolicy{ 2246 Attempts: 1, 2247 Interval: 8 * time.Second, 2248 DelayFunction: "exponential", 2249 Delay: 15 * time.Second, 2250 MaxDelay: 5 * time.Second}, 2251 errors: []error{ 2252 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 2253 15*time.Second, 5*time.Second), 2254 }, 2255 }, 2256 { 2257 desc: "Invalid delay and interval", 2258 ReschedulePolicy: &ReschedulePolicy{ 2259 Attempts: 1, 2260 Interval: 1 * time.Second, 2261 DelayFunction: "constant"}, 2262 errors: []error{ 2263 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2264 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2265 }, 2266 }, { 2267 // Should suggest 2h40m as the interval 2268 desc: "Invalid Attempts - linear delay", 2269 ReschedulePolicy: &ReschedulePolicy{ 2270 Attempts: 10, 2271 Interval: 1 * time.Hour, 2272 Delay: 20 * time.Minute, 2273 DelayFunction: "constant", 2274 }, 2275 errors: []error{ 2276 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 2277 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 2278 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2279 200*time.Minute, 10), 2280 }, 2281 }, 2282 { 2283 // Should suggest 4h40m as the interval 2284 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 2285 desc: "Invalid Attempts - exponential delay", 2286 ReschedulePolicy: &ReschedulePolicy{ 2287 Attempts: 10, 2288 Interval: 30 * time.Minute, 2289 Delay: 5 * time.Minute, 2290 MaxDelay: 40 * time.Minute, 2291 DelayFunction: "exponential", 2292 }, 2293 errors: []error{ 2294 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2295 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 2296 "exponential", 40*time.Minute), 2297 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2298 280*time.Minute, 10), 2299 }, 2300 }, 2301 { 2302 // Should suggest 8h as the interval 2303 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 2304 desc: "Invalid Attempts - fibonacci delay", 2305 ReschedulePolicy: &ReschedulePolicy{ 2306 Attempts: 10, 2307 Interval: 1 * time.Hour, 2308 Delay: 20 * time.Minute, 2309 MaxDelay: 80 * time.Minute, 2310 DelayFunction: "fibonacci", 2311 }, 2312 errors: []error{ 2313 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2314 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 2315 "fibonacci", 80*time.Minute), 2316 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2317 480*time.Minute, 10), 2318 }, 2319 }, 2320 { 2321 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 2322 ReschedulePolicy: &ReschedulePolicy{ 2323 Attempts: 1, 2324 Unlimited: true, 2325 DelayFunction: "exponential", 2326 Delay: 5 * time.Minute, 2327 MaxDelay: 1 * time.Hour, 2328 }, 2329 errors: []error{ 2330 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 2331 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 2332 }, 2333 }, 2334 { 2335 desc: "Invalid Unlimited config", 2336 ReschedulePolicy: &ReschedulePolicy{ 2337 Attempts: 1, 2338 Interval: 1 * time.Second, 2339 Unlimited: true, 2340 DelayFunction: "exponential", 2341 }, 2342 errors: []error{ 2343 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2344 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2345 }, 2346 }, 2347 { 2348 desc: "Valid Unlimited config", 2349 ReschedulePolicy: &ReschedulePolicy{ 2350 Unlimited: true, 2351 DelayFunction: "exponential", 2352 Delay: 5 * time.Second, 2353 MaxDelay: 1 * time.Hour, 2354 }, 2355 }, 2356 } 2357 2358 for _, tc := range testCases { 2359 t.Run(tc.desc, func(t *testing.T) { 2360 require := require.New(t) 2361 gotErr := tc.ReschedulePolicy.Validate() 2362 if tc.errors != nil { 2363 // Validate all errors 2364 for _, err := range tc.errors { 2365 require.Contains(gotErr.Error(), err.Error()) 2366 } 2367 } else { 2368 require.Nil(gotErr) 2369 } 2370 }) 2371 } 2372 } 2373 2374 func TestAllocation_Index(t *testing.T) { 2375 a1 := Allocation{ 2376 Name: "example.cache[1]", 2377 TaskGroup: "cache", 2378 JobID: "example", 2379 Job: &Job{ 2380 ID: "example", 2381 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 2382 } 2383 e1 := uint(1) 2384 a2 := a1.Copy() 2385 a2.Name = "example.cache[713127]" 2386 e2 := uint(713127) 2387 2388 if a1.Index() != e1 || a2.Index() != e2 { 2389 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 2390 } 2391 } 2392 2393 func TestTaskArtifact_Validate_Source(t *testing.T) { 2394 valid := &TaskArtifact{GetterSource: "google.com"} 2395 if err := valid.Validate(); err != nil { 2396 t.Fatalf("unexpected error: %v", err) 2397 } 2398 } 2399 2400 func TestTaskArtifact_Validate_Dest(t *testing.T) { 2401 valid := &TaskArtifact{GetterSource: "google.com"} 2402 if err := valid.Validate(); err != nil { 2403 t.Fatalf("unexpected error: %v", err) 2404 } 2405 2406 valid.RelativeDest = "local/" 2407 if err := valid.Validate(); err != nil { 2408 t.Fatalf("unexpected error: %v", err) 2409 } 2410 2411 valid.RelativeDest = "local/.." 2412 if err := valid.Validate(); err != nil { 2413 t.Fatalf("unexpected error: %v", err) 2414 } 2415 2416 valid.RelativeDest = "local/../../.." 2417 if err := valid.Validate(); err == nil { 2418 t.Fatalf("expected error: %v", err) 2419 } 2420 } 2421 2422 func TestAllocation_ShouldMigrate(t *testing.T) { 2423 alloc := Allocation{ 2424 PreviousAllocation: "123", 2425 TaskGroup: "foo", 2426 Job: &Job{ 2427 TaskGroups: []*TaskGroup{ 2428 { 2429 Name: "foo", 2430 EphemeralDisk: &EphemeralDisk{ 2431 Migrate: true, 2432 Sticky: true, 2433 }, 2434 }, 2435 }, 2436 }, 2437 } 2438 2439 if !alloc.ShouldMigrate() { 2440 t.Fatalf("bad: %v", alloc) 2441 } 2442 2443 alloc1 := Allocation{ 2444 PreviousAllocation: "123", 2445 TaskGroup: "foo", 2446 Job: &Job{ 2447 TaskGroups: []*TaskGroup{ 2448 { 2449 Name: "foo", 2450 EphemeralDisk: &EphemeralDisk{}, 2451 }, 2452 }, 2453 }, 2454 } 2455 2456 if alloc1.ShouldMigrate() { 2457 t.Fatalf("bad: %v", alloc) 2458 } 2459 2460 alloc2 := Allocation{ 2461 PreviousAllocation: "123", 2462 TaskGroup: "foo", 2463 Job: &Job{ 2464 TaskGroups: []*TaskGroup{ 2465 { 2466 Name: "foo", 2467 EphemeralDisk: &EphemeralDisk{ 2468 Sticky: false, 2469 Migrate: true, 2470 }, 2471 }, 2472 }, 2473 }, 2474 } 2475 2476 if alloc2.ShouldMigrate() { 2477 t.Fatalf("bad: %v", alloc) 2478 } 2479 2480 alloc3 := Allocation{ 2481 PreviousAllocation: "123", 2482 TaskGroup: "foo", 2483 Job: &Job{ 2484 TaskGroups: []*TaskGroup{ 2485 { 2486 Name: "foo", 2487 }, 2488 }, 2489 }, 2490 } 2491 2492 if alloc3.ShouldMigrate() { 2493 t.Fatalf("bad: %v", alloc) 2494 } 2495 2496 // No previous 2497 alloc4 := Allocation{ 2498 TaskGroup: "foo", 2499 Job: &Job{ 2500 TaskGroups: []*TaskGroup{ 2501 { 2502 Name: "foo", 2503 EphemeralDisk: &EphemeralDisk{ 2504 Migrate: true, 2505 Sticky: true, 2506 }, 2507 }, 2508 }, 2509 }, 2510 } 2511 2512 if alloc4.ShouldMigrate() { 2513 t.Fatalf("bad: %v", alloc4) 2514 } 2515 } 2516 2517 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 2518 cases := []struct { 2519 Input *TaskArtifact 2520 Err bool 2521 }{ 2522 { 2523 &TaskArtifact{ 2524 GetterSource: "foo.com", 2525 GetterOptions: map[string]string{ 2526 "checksum": "no-type", 2527 }, 2528 }, 2529 true, 2530 }, 2531 { 2532 &TaskArtifact{ 2533 GetterSource: "foo.com", 2534 GetterOptions: map[string]string{ 2535 "checksum": "md5:toosmall", 2536 }, 2537 }, 2538 true, 2539 }, 2540 { 2541 &TaskArtifact{ 2542 GetterSource: "foo.com", 2543 GetterOptions: map[string]string{ 2544 "checksum": "invalid:type", 2545 }, 2546 }, 2547 true, 2548 }, 2549 } 2550 2551 for i, tc := range cases { 2552 err := tc.Input.Validate() 2553 if (err != nil) != tc.Err { 2554 t.Fatalf("case %d: %v", i, err) 2555 continue 2556 } 2557 } 2558 } 2559 2560 func TestAllocation_Terminated(t *testing.T) { 2561 type desiredState struct { 2562 ClientStatus string 2563 DesiredStatus string 2564 Terminated bool 2565 } 2566 2567 harness := []desiredState{ 2568 { 2569 ClientStatus: AllocClientStatusPending, 2570 DesiredStatus: AllocDesiredStatusStop, 2571 Terminated: false, 2572 }, 2573 { 2574 ClientStatus: AllocClientStatusRunning, 2575 DesiredStatus: AllocDesiredStatusStop, 2576 Terminated: false, 2577 }, 2578 { 2579 ClientStatus: AllocClientStatusFailed, 2580 DesiredStatus: AllocDesiredStatusStop, 2581 Terminated: true, 2582 }, 2583 { 2584 ClientStatus: AllocClientStatusFailed, 2585 DesiredStatus: AllocDesiredStatusRun, 2586 Terminated: true, 2587 }, 2588 } 2589 2590 for _, state := range harness { 2591 alloc := Allocation{} 2592 alloc.DesiredStatus = state.DesiredStatus 2593 alloc.ClientStatus = state.ClientStatus 2594 if alloc.Terminated() != state.Terminated { 2595 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 2596 } 2597 } 2598 } 2599 2600 func TestAllocation_ShouldReschedule(t *testing.T) { 2601 type testCase struct { 2602 Desc string 2603 FailTime time.Time 2604 ClientStatus string 2605 DesiredStatus string 2606 ReschedulePolicy *ReschedulePolicy 2607 RescheduleTrackers []*RescheduleEvent 2608 ShouldReschedule bool 2609 } 2610 2611 fail := time.Now() 2612 2613 harness := []testCase{ 2614 { 2615 Desc: "Reschedule when desired state is stop", 2616 ClientStatus: AllocClientStatusPending, 2617 DesiredStatus: AllocDesiredStatusStop, 2618 FailTime: fail, 2619 ReschedulePolicy: nil, 2620 ShouldReschedule: false, 2621 }, 2622 { 2623 Desc: "Disabled rescheduling", 2624 ClientStatus: AllocClientStatusFailed, 2625 DesiredStatus: AllocDesiredStatusRun, 2626 FailTime: fail, 2627 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 2628 ShouldReschedule: false, 2629 }, 2630 { 2631 Desc: "Reschedule when client status is complete", 2632 ClientStatus: AllocClientStatusComplete, 2633 DesiredStatus: AllocDesiredStatusRun, 2634 FailTime: fail, 2635 ReschedulePolicy: nil, 2636 ShouldReschedule: false, 2637 }, 2638 { 2639 Desc: "Reschedule with nil reschedule policy", 2640 ClientStatus: AllocClientStatusFailed, 2641 DesiredStatus: AllocDesiredStatusRun, 2642 FailTime: fail, 2643 ReschedulePolicy: nil, 2644 ShouldReschedule: false, 2645 }, 2646 { 2647 Desc: "Reschedule with unlimited and attempts >0", 2648 ClientStatus: AllocClientStatusFailed, 2649 DesiredStatus: AllocDesiredStatusRun, 2650 FailTime: fail, 2651 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 2652 ShouldReschedule: true, 2653 }, 2654 { 2655 Desc: "Reschedule when client status is complete", 2656 ClientStatus: AllocClientStatusComplete, 2657 DesiredStatus: AllocDesiredStatusRun, 2658 FailTime: fail, 2659 ReschedulePolicy: nil, 2660 ShouldReschedule: false, 2661 }, 2662 { 2663 Desc: "Reschedule with policy when client status complete", 2664 ClientStatus: AllocClientStatusComplete, 2665 DesiredStatus: AllocDesiredStatusRun, 2666 FailTime: fail, 2667 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 2668 ShouldReschedule: false, 2669 }, 2670 { 2671 Desc: "Reschedule with no previous attempts", 2672 ClientStatus: AllocClientStatusFailed, 2673 DesiredStatus: AllocDesiredStatusRun, 2674 FailTime: fail, 2675 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 2676 ShouldReschedule: true, 2677 }, 2678 { 2679 Desc: "Reschedule with leftover attempts", 2680 ClientStatus: AllocClientStatusFailed, 2681 DesiredStatus: AllocDesiredStatusRun, 2682 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 2683 FailTime: fail, 2684 RescheduleTrackers: []*RescheduleEvent{ 2685 { 2686 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 2687 }, 2688 }, 2689 ShouldReschedule: true, 2690 }, 2691 { 2692 Desc: "Reschedule with too old previous attempts", 2693 ClientStatus: AllocClientStatusFailed, 2694 DesiredStatus: AllocDesiredStatusRun, 2695 FailTime: fail, 2696 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 2697 RescheduleTrackers: []*RescheduleEvent{ 2698 { 2699 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 2700 }, 2701 }, 2702 ShouldReschedule: true, 2703 }, 2704 { 2705 Desc: "Reschedule with no leftover attempts", 2706 ClientStatus: AllocClientStatusFailed, 2707 DesiredStatus: AllocDesiredStatusRun, 2708 FailTime: fail, 2709 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 2710 RescheduleTrackers: []*RescheduleEvent{ 2711 { 2712 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 2713 }, 2714 { 2715 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 2716 }, 2717 }, 2718 ShouldReschedule: false, 2719 }, 2720 } 2721 2722 for _, state := range harness { 2723 alloc := Allocation{} 2724 alloc.DesiredStatus = state.DesiredStatus 2725 alloc.ClientStatus = state.ClientStatus 2726 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 2727 2728 t.Run(state.Desc, func(t *testing.T) { 2729 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 2730 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 2731 } 2732 }) 2733 2734 } 2735 } 2736 2737 func TestAllocation_LastEventTime(t *testing.T) { 2738 type testCase struct { 2739 desc string 2740 taskState map[string]*TaskState 2741 expectedLastEventTime time.Time 2742 } 2743 2744 t1 := time.Now().UTC() 2745 2746 testCases := []testCase{ 2747 { 2748 desc: "nil task state", 2749 expectedLastEventTime: t1, 2750 }, 2751 { 2752 desc: "empty task state", 2753 taskState: make(map[string]*TaskState), 2754 expectedLastEventTime: t1, 2755 }, 2756 { 2757 desc: "Finished At not set", 2758 taskState: map[string]*TaskState{"foo": {State: "start", 2759 StartedAt: t1.Add(-2 * time.Hour)}}, 2760 expectedLastEventTime: t1, 2761 }, 2762 { 2763 desc: "One finished ", 2764 taskState: map[string]*TaskState{"foo": {State: "start", 2765 StartedAt: t1.Add(-2 * time.Hour), 2766 FinishedAt: t1.Add(-1 * time.Hour)}}, 2767 expectedLastEventTime: t1.Add(-1 * time.Hour), 2768 }, 2769 { 2770 desc: "Multiple task groups", 2771 taskState: map[string]*TaskState{"foo": {State: "start", 2772 StartedAt: t1.Add(-2 * time.Hour), 2773 FinishedAt: t1.Add(-1 * time.Hour)}, 2774 "bar": {State: "start", 2775 StartedAt: t1.Add(-2 * time.Hour), 2776 FinishedAt: t1.Add(-40 * time.Minute)}}, 2777 expectedLastEventTime: t1.Add(-40 * time.Minute), 2778 }, 2779 { 2780 desc: "No finishedAt set, one task event, should use modify time", 2781 taskState: map[string]*TaskState{"foo": { 2782 State: "run", 2783 StartedAt: t1.Add(-2 * time.Hour), 2784 Events: []*TaskEvent{ 2785 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 2786 }}, 2787 }, 2788 expectedLastEventTime: t1, 2789 }, 2790 } 2791 for _, tc := range testCases { 2792 t.Run(tc.desc, func(t *testing.T) { 2793 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 2794 alloc.TaskStates = tc.taskState 2795 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 2796 }) 2797 } 2798 } 2799 2800 func TestAllocation_NextDelay(t *testing.T) { 2801 type testCase struct { 2802 desc string 2803 reschedulePolicy *ReschedulePolicy 2804 alloc *Allocation 2805 expectedRescheduleTime time.Time 2806 expectedRescheduleEligible bool 2807 } 2808 now := time.Now() 2809 testCases := []testCase{ 2810 { 2811 desc: "Allocation hasn't failed yet", 2812 reschedulePolicy: &ReschedulePolicy{ 2813 DelayFunction: "constant", 2814 Delay: 5 * time.Second, 2815 }, 2816 alloc: &Allocation{}, 2817 expectedRescheduleTime: time.Time{}, 2818 expectedRescheduleEligible: false, 2819 }, 2820 { 2821 desc: "Allocation has no reschedule policy", 2822 alloc: &Allocation{}, 2823 expectedRescheduleTime: time.Time{}, 2824 expectedRescheduleEligible: false, 2825 }, 2826 { 2827 desc: "Allocation lacks task state", 2828 reschedulePolicy: &ReschedulePolicy{ 2829 DelayFunction: "constant", 2830 Delay: 5 * time.Second, 2831 Unlimited: true, 2832 }, 2833 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 2834 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 2835 expectedRescheduleEligible: true, 2836 }, 2837 { 2838 desc: "linear delay, unlimited restarts, no reschedule tracker", 2839 reschedulePolicy: &ReschedulePolicy{ 2840 DelayFunction: "constant", 2841 Delay: 5 * time.Second, 2842 Unlimited: true, 2843 }, 2844 alloc: &Allocation{ 2845 ClientStatus: AllocClientStatusFailed, 2846 TaskStates: map[string]*TaskState{"foo": {State: "dead", 2847 StartedAt: now.Add(-1 * time.Hour), 2848 FinishedAt: now.Add(-2 * time.Second)}}, 2849 }, 2850 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2851 expectedRescheduleEligible: true, 2852 }, 2853 { 2854 desc: "linear delay with reschedule tracker", 2855 reschedulePolicy: &ReschedulePolicy{ 2856 DelayFunction: "constant", 2857 Delay: 5 * time.Second, 2858 Interval: 10 * time.Minute, 2859 Attempts: 2, 2860 }, 2861 alloc: &Allocation{ 2862 ClientStatus: AllocClientStatusFailed, 2863 TaskStates: map[string]*TaskState{"foo": {State: "start", 2864 StartedAt: now.Add(-1 * time.Hour), 2865 FinishedAt: now.Add(-2 * time.Second)}}, 2866 RescheduleTracker: &RescheduleTracker{ 2867 Events: []*RescheduleEvent{{ 2868 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 2869 Delay: 5 * time.Second, 2870 }}, 2871 }}, 2872 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2873 expectedRescheduleEligible: true, 2874 }, 2875 { 2876 desc: "linear delay with reschedule tracker, attempts exhausted", 2877 reschedulePolicy: &ReschedulePolicy{ 2878 DelayFunction: "constant", 2879 Delay: 5 * time.Second, 2880 Interval: 10 * time.Minute, 2881 Attempts: 2, 2882 }, 2883 alloc: &Allocation{ 2884 ClientStatus: AllocClientStatusFailed, 2885 TaskStates: map[string]*TaskState{"foo": {State: "start", 2886 StartedAt: now.Add(-1 * time.Hour), 2887 FinishedAt: now.Add(-2 * time.Second)}}, 2888 RescheduleTracker: &RescheduleTracker{ 2889 Events: []*RescheduleEvent{ 2890 { 2891 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 2892 Delay: 5 * time.Second, 2893 }, 2894 { 2895 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 2896 Delay: 5 * time.Second, 2897 }, 2898 }, 2899 }}, 2900 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2901 expectedRescheduleEligible: false, 2902 }, 2903 { 2904 desc: "exponential delay - no reschedule tracker", 2905 reschedulePolicy: &ReschedulePolicy{ 2906 DelayFunction: "exponential", 2907 Delay: 5 * time.Second, 2908 MaxDelay: 90 * time.Second, 2909 Unlimited: true, 2910 }, 2911 alloc: &Allocation{ 2912 ClientStatus: AllocClientStatusFailed, 2913 TaskStates: map[string]*TaskState{"foo": {State: "start", 2914 StartedAt: now.Add(-1 * time.Hour), 2915 FinishedAt: now.Add(-2 * time.Second)}}, 2916 }, 2917 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2918 expectedRescheduleEligible: true, 2919 }, 2920 { 2921 desc: "exponential delay with reschedule tracker", 2922 reschedulePolicy: &ReschedulePolicy{ 2923 DelayFunction: "exponential", 2924 Delay: 5 * time.Second, 2925 MaxDelay: 90 * time.Second, 2926 Unlimited: true, 2927 }, 2928 alloc: &Allocation{ 2929 ClientStatus: AllocClientStatusFailed, 2930 TaskStates: map[string]*TaskState{"foo": {State: "start", 2931 StartedAt: now.Add(-1 * time.Hour), 2932 FinishedAt: now.Add(-2 * time.Second)}}, 2933 RescheduleTracker: &RescheduleTracker{ 2934 Events: []*RescheduleEvent{ 2935 { 2936 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2937 Delay: 5 * time.Second, 2938 }, 2939 { 2940 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2941 Delay: 10 * time.Second, 2942 }, 2943 { 2944 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2945 Delay: 20 * time.Second, 2946 }, 2947 }, 2948 }}, 2949 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 2950 expectedRescheduleEligible: true, 2951 }, 2952 { 2953 desc: "exponential delay with delay ceiling reached", 2954 reschedulePolicy: &ReschedulePolicy{ 2955 DelayFunction: "exponential", 2956 Delay: 5 * time.Second, 2957 MaxDelay: 90 * time.Second, 2958 Unlimited: true, 2959 }, 2960 alloc: &Allocation{ 2961 ClientStatus: AllocClientStatusFailed, 2962 TaskStates: map[string]*TaskState{"foo": {State: "start", 2963 StartedAt: now.Add(-1 * time.Hour), 2964 FinishedAt: now.Add(-15 * time.Second)}}, 2965 RescheduleTracker: &RescheduleTracker{ 2966 Events: []*RescheduleEvent{ 2967 { 2968 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2969 Delay: 5 * time.Second, 2970 }, 2971 { 2972 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2973 Delay: 10 * time.Second, 2974 }, 2975 { 2976 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2977 Delay: 20 * time.Second, 2978 }, 2979 { 2980 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2981 Delay: 40 * time.Second, 2982 }, 2983 { 2984 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 2985 Delay: 80 * time.Second, 2986 }, 2987 }, 2988 }}, 2989 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 2990 expectedRescheduleEligible: true, 2991 }, 2992 { 2993 // Test case where most recent reschedule ran longer than delay ceiling 2994 desc: "exponential delay, delay ceiling reset condition met", 2995 reschedulePolicy: &ReschedulePolicy{ 2996 DelayFunction: "exponential", 2997 Delay: 5 * time.Second, 2998 MaxDelay: 90 * time.Second, 2999 Unlimited: true, 3000 }, 3001 alloc: &Allocation{ 3002 ClientStatus: AllocClientStatusFailed, 3003 TaskStates: map[string]*TaskState{"foo": {State: "start", 3004 StartedAt: now.Add(-1 * time.Hour), 3005 FinishedAt: now.Add(-15 * time.Minute)}}, 3006 RescheduleTracker: &RescheduleTracker{ 3007 Events: []*RescheduleEvent{ 3008 { 3009 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3010 Delay: 5 * time.Second, 3011 }, 3012 { 3013 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3014 Delay: 10 * time.Second, 3015 }, 3016 { 3017 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3018 Delay: 20 * time.Second, 3019 }, 3020 { 3021 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3022 Delay: 40 * time.Second, 3023 }, 3024 { 3025 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3026 Delay: 80 * time.Second, 3027 }, 3028 { 3029 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3030 Delay: 90 * time.Second, 3031 }, 3032 { 3033 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3034 Delay: 90 * time.Second, 3035 }, 3036 }, 3037 }}, 3038 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 3039 expectedRescheduleEligible: true, 3040 }, 3041 { 3042 desc: "fibonacci delay - no reschedule tracker", 3043 reschedulePolicy: &ReschedulePolicy{ 3044 DelayFunction: "fibonacci", 3045 Delay: 5 * time.Second, 3046 MaxDelay: 90 * time.Second, 3047 Unlimited: true, 3048 }, 3049 alloc: &Allocation{ 3050 ClientStatus: AllocClientStatusFailed, 3051 TaskStates: map[string]*TaskState{"foo": {State: "start", 3052 StartedAt: now.Add(-1 * time.Hour), 3053 FinishedAt: now.Add(-2 * time.Second)}}}, 3054 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3055 expectedRescheduleEligible: true, 3056 }, 3057 { 3058 desc: "fibonacci delay with reschedule tracker", 3059 reschedulePolicy: &ReschedulePolicy{ 3060 DelayFunction: "fibonacci", 3061 Delay: 5 * time.Second, 3062 MaxDelay: 90 * time.Second, 3063 Unlimited: true, 3064 }, 3065 alloc: &Allocation{ 3066 ClientStatus: AllocClientStatusFailed, 3067 TaskStates: map[string]*TaskState{"foo": {State: "start", 3068 StartedAt: now.Add(-1 * time.Hour), 3069 FinishedAt: now.Add(-2 * time.Second)}}, 3070 RescheduleTracker: &RescheduleTracker{ 3071 Events: []*RescheduleEvent{ 3072 { 3073 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3074 Delay: 5 * time.Second, 3075 }, 3076 { 3077 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 3078 Delay: 5 * time.Second, 3079 }, 3080 }, 3081 }}, 3082 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 3083 expectedRescheduleEligible: true, 3084 }, 3085 { 3086 desc: "fibonacci delay with more events", 3087 reschedulePolicy: &ReschedulePolicy{ 3088 DelayFunction: "fibonacci", 3089 Delay: 5 * time.Second, 3090 MaxDelay: 90 * time.Second, 3091 Unlimited: true, 3092 }, 3093 alloc: &Allocation{ 3094 ClientStatus: AllocClientStatusFailed, 3095 TaskStates: map[string]*TaskState{"foo": {State: "start", 3096 StartedAt: now.Add(-1 * time.Hour), 3097 FinishedAt: now.Add(-2 * time.Second)}}, 3098 RescheduleTracker: &RescheduleTracker{ 3099 Events: []*RescheduleEvent{ 3100 { 3101 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3102 Delay: 5 * time.Second, 3103 }, 3104 { 3105 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3106 Delay: 5 * time.Second, 3107 }, 3108 { 3109 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3110 Delay: 10 * time.Second, 3111 }, 3112 { 3113 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3114 Delay: 15 * time.Second, 3115 }, 3116 { 3117 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3118 Delay: 25 * time.Second, 3119 }, 3120 }, 3121 }}, 3122 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3123 expectedRescheduleEligible: true, 3124 }, 3125 { 3126 desc: "fibonacci delay with delay ceiling reached", 3127 reschedulePolicy: &ReschedulePolicy{ 3128 DelayFunction: "fibonacci", 3129 Delay: 5 * time.Second, 3130 MaxDelay: 50 * time.Second, 3131 Unlimited: true, 3132 }, 3133 alloc: &Allocation{ 3134 ClientStatus: AllocClientStatusFailed, 3135 TaskStates: map[string]*TaskState{"foo": {State: "start", 3136 StartedAt: now.Add(-1 * time.Hour), 3137 FinishedAt: now.Add(-15 * time.Second)}}, 3138 RescheduleTracker: &RescheduleTracker{ 3139 Events: []*RescheduleEvent{ 3140 { 3141 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3142 Delay: 5 * time.Second, 3143 }, 3144 { 3145 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3146 Delay: 5 * time.Second, 3147 }, 3148 { 3149 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3150 Delay: 10 * time.Second, 3151 }, 3152 { 3153 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3154 Delay: 15 * time.Second, 3155 }, 3156 { 3157 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3158 Delay: 25 * time.Second, 3159 }, 3160 { 3161 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3162 Delay: 40 * time.Second, 3163 }, 3164 }, 3165 }}, 3166 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 3167 expectedRescheduleEligible: true, 3168 }, 3169 { 3170 desc: "fibonacci delay with delay reset condition met", 3171 reschedulePolicy: &ReschedulePolicy{ 3172 DelayFunction: "fibonacci", 3173 Delay: 5 * time.Second, 3174 MaxDelay: 50 * time.Second, 3175 Unlimited: true, 3176 }, 3177 alloc: &Allocation{ 3178 ClientStatus: AllocClientStatusFailed, 3179 TaskStates: map[string]*TaskState{"foo": {State: "start", 3180 StartedAt: now.Add(-1 * time.Hour), 3181 FinishedAt: now.Add(-5 * time.Minute)}}, 3182 RescheduleTracker: &RescheduleTracker{ 3183 Events: []*RescheduleEvent{ 3184 { 3185 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3186 Delay: 5 * time.Second, 3187 }, 3188 { 3189 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3190 Delay: 5 * time.Second, 3191 }, 3192 { 3193 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3194 Delay: 10 * time.Second, 3195 }, 3196 { 3197 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3198 Delay: 15 * time.Second, 3199 }, 3200 { 3201 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3202 Delay: 25 * time.Second, 3203 }, 3204 { 3205 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3206 Delay: 40 * time.Second, 3207 }, 3208 }, 3209 }}, 3210 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 3211 expectedRescheduleEligible: true, 3212 }, 3213 { 3214 desc: "fibonacci delay with the most recent event that reset delay value", 3215 reschedulePolicy: &ReschedulePolicy{ 3216 DelayFunction: "fibonacci", 3217 Delay: 5 * time.Second, 3218 MaxDelay: 50 * time.Second, 3219 Unlimited: true, 3220 }, 3221 alloc: &Allocation{ 3222 ClientStatus: AllocClientStatusFailed, 3223 TaskStates: map[string]*TaskState{"foo": {State: "start", 3224 StartedAt: now.Add(-1 * time.Hour), 3225 FinishedAt: now.Add(-5 * time.Second)}}, 3226 RescheduleTracker: &RescheduleTracker{ 3227 Events: []*RescheduleEvent{ 3228 { 3229 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3230 Delay: 5 * time.Second, 3231 }, 3232 { 3233 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3234 Delay: 5 * time.Second, 3235 }, 3236 { 3237 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3238 Delay: 10 * time.Second, 3239 }, 3240 { 3241 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3242 Delay: 15 * time.Second, 3243 }, 3244 { 3245 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3246 Delay: 25 * time.Second, 3247 }, 3248 { 3249 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3250 Delay: 40 * time.Second, 3251 }, 3252 { 3253 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3254 Delay: 50 * time.Second, 3255 }, 3256 { 3257 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 3258 Delay: 5 * time.Second, 3259 }, 3260 }, 3261 }}, 3262 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 3263 expectedRescheduleEligible: true, 3264 }, 3265 } 3266 for _, tc := range testCases { 3267 t.Run(tc.desc, func(t *testing.T) { 3268 require := require.New(t) 3269 j := testJob() 3270 if tc.reschedulePolicy != nil { 3271 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 3272 } 3273 tc.alloc.Job = j 3274 tc.alloc.TaskGroup = j.TaskGroups[0].Name 3275 reschedTime, allowed := tc.alloc.NextRescheduleTime() 3276 require.Equal(tc.expectedRescheduleEligible, allowed) 3277 require.Equal(tc.expectedRescheduleTime, reschedTime) 3278 }) 3279 } 3280 3281 } 3282 3283 func TestRescheduleTracker_Copy(t *testing.T) { 3284 type testCase struct { 3285 original *RescheduleTracker 3286 expected *RescheduleTracker 3287 } 3288 3289 cases := []testCase{ 3290 {nil, nil}, 3291 {&RescheduleTracker{Events: []*RescheduleEvent{ 3292 {RescheduleTime: 2, 3293 PrevAllocID: "12", 3294 PrevNodeID: "12", 3295 Delay: 30 * time.Second}, 3296 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 3297 {RescheduleTime: 2, 3298 PrevAllocID: "12", 3299 PrevNodeID: "12", 3300 Delay: 30 * time.Second}, 3301 }}}, 3302 } 3303 3304 for _, tc := range cases { 3305 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 3306 t.Fatalf("expected %v but got %v", *tc.expected, *got) 3307 } 3308 } 3309 } 3310 3311 func TestVault_Validate(t *testing.T) { 3312 v := &Vault{ 3313 Env: true, 3314 ChangeMode: VaultChangeModeNoop, 3315 } 3316 3317 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 3318 t.Fatalf("Expected policy list empty error") 3319 } 3320 3321 v.Policies = []string{"foo", "root"} 3322 v.ChangeMode = VaultChangeModeSignal 3323 3324 err := v.Validate() 3325 if err == nil { 3326 t.Fatalf("Expected validation errors") 3327 } 3328 3329 if !strings.Contains(err.Error(), "Signal must") { 3330 t.Fatalf("Expected signal empty error") 3331 } 3332 if !strings.Contains(err.Error(), "root") { 3333 t.Fatalf("Expected root error") 3334 } 3335 } 3336 3337 func TestParameterizedJobConfig_Validate(t *testing.T) { 3338 d := &ParameterizedJobConfig{ 3339 Payload: "foo", 3340 } 3341 3342 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 3343 t.Fatalf("Expected unknown payload requirement: %v", err) 3344 } 3345 3346 d.Payload = DispatchPayloadOptional 3347 d.MetaOptional = []string{"foo", "bar"} 3348 d.MetaRequired = []string{"bar", "baz"} 3349 3350 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 3351 t.Fatalf("Expected meta not being disjoint error: %v", err) 3352 } 3353 } 3354 3355 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 3356 job := testJob() 3357 job.ParameterizedJob = &ParameterizedJobConfig{ 3358 Payload: DispatchPayloadOptional, 3359 } 3360 job.Type = JobTypeSystem 3361 3362 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 3363 t.Fatalf("Expected bad scheduler tpye: %v", err) 3364 } 3365 } 3366 3367 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 3368 d := &ParameterizedJobConfig{} 3369 d.Canonicalize() 3370 if d.Payload != DispatchPayloadOptional { 3371 t.Fatalf("Canonicalize failed") 3372 } 3373 } 3374 3375 func TestDispatchPayloadConfig_Validate(t *testing.T) { 3376 d := &DispatchPayloadConfig{ 3377 File: "foo", 3378 } 3379 3380 // task/local/haha 3381 if err := d.Validate(); err != nil { 3382 t.Fatalf("bad: %v", err) 3383 } 3384 3385 // task/haha 3386 d.File = "../haha" 3387 if err := d.Validate(); err != nil { 3388 t.Fatalf("bad: %v", err) 3389 } 3390 3391 // ../haha 3392 d.File = "../../../haha" 3393 if err := d.Validate(); err == nil { 3394 t.Fatalf("bad: %v", err) 3395 } 3396 } 3397 3398 func TestIsRecoverable(t *testing.T) { 3399 if IsRecoverable(nil) { 3400 t.Errorf("nil should not be recoverable") 3401 } 3402 if IsRecoverable(NewRecoverableError(nil, true)) { 3403 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 3404 } 3405 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 3406 t.Errorf("Custom errors should not be recoverable") 3407 } 3408 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 3409 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 3410 } 3411 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 3412 t.Errorf("Explicitly recoverable errors *should* be recoverable") 3413 } 3414 } 3415 3416 func TestACLTokenValidate(t *testing.T) { 3417 tk := &ACLToken{} 3418 3419 // Missing a type 3420 err := tk.Validate() 3421 assert.NotNil(t, err) 3422 if !strings.Contains(err.Error(), "client or management") { 3423 t.Fatalf("bad: %v", err) 3424 } 3425 3426 // Missing policies 3427 tk.Type = ACLClientToken 3428 err = tk.Validate() 3429 assert.NotNil(t, err) 3430 if !strings.Contains(err.Error(), "missing policies") { 3431 t.Fatalf("bad: %v", err) 3432 } 3433 3434 // Invalid policies 3435 tk.Type = ACLManagementToken 3436 tk.Policies = []string{"foo"} 3437 err = tk.Validate() 3438 assert.NotNil(t, err) 3439 if !strings.Contains(err.Error(), "associated with policies") { 3440 t.Fatalf("bad: %v", err) 3441 } 3442 3443 // Name too long policies 3444 tk.Name = "" 3445 for i := 0; i < 8; i++ { 3446 tk.Name += uuid.Generate() 3447 } 3448 tk.Policies = nil 3449 err = tk.Validate() 3450 assert.NotNil(t, err) 3451 if !strings.Contains(err.Error(), "too long") { 3452 t.Fatalf("bad: %v", err) 3453 } 3454 3455 // Make it valid 3456 tk.Name = "foo" 3457 err = tk.Validate() 3458 assert.Nil(t, err) 3459 } 3460 3461 func TestACLTokenPolicySubset(t *testing.T) { 3462 tk := &ACLToken{ 3463 Type: ACLClientToken, 3464 Policies: []string{"foo", "bar", "baz"}, 3465 } 3466 3467 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3468 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3469 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3470 assert.Equal(t, true, tk.PolicySubset([]string{})) 3471 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 3472 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 3473 3474 tk = &ACLToken{ 3475 Type: ACLManagementToken, 3476 } 3477 3478 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3479 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3480 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3481 assert.Equal(t, true, tk.PolicySubset([]string{})) 3482 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 3483 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 3484 } 3485 3486 func TestACLTokenSetHash(t *testing.T) { 3487 tk := &ACLToken{ 3488 Name: "foo", 3489 Type: ACLClientToken, 3490 Policies: []string{"foo", "bar"}, 3491 Global: false, 3492 } 3493 out1 := tk.SetHash() 3494 assert.NotNil(t, out1) 3495 assert.NotNil(t, tk.Hash) 3496 assert.Equal(t, out1, tk.Hash) 3497 3498 tk.Policies = []string{"foo"} 3499 out2 := tk.SetHash() 3500 assert.NotNil(t, out2) 3501 assert.NotNil(t, tk.Hash) 3502 assert.Equal(t, out2, tk.Hash) 3503 assert.NotEqual(t, out1, out2) 3504 } 3505 3506 func TestACLPolicySetHash(t *testing.T) { 3507 ap := &ACLPolicy{ 3508 Name: "foo", 3509 Description: "great policy", 3510 Rules: "node { policy = \"read\" }", 3511 } 3512 out1 := ap.SetHash() 3513 assert.NotNil(t, out1) 3514 assert.NotNil(t, ap.Hash) 3515 assert.Equal(t, out1, ap.Hash) 3516 3517 ap.Rules = "node { policy = \"write\" }" 3518 out2 := ap.SetHash() 3519 assert.NotNil(t, out2) 3520 assert.NotNil(t, ap.Hash) 3521 assert.Equal(t, out2, ap.Hash) 3522 assert.NotEqual(t, out1, out2) 3523 } 3524 3525 func TestTaskEventPopulate(t *testing.T) { 3526 prepopulatedEvent := NewTaskEvent(TaskSetup) 3527 prepopulatedEvent.DisplayMessage = "Hola" 3528 testcases := []struct { 3529 event *TaskEvent 3530 expectedMsg string 3531 }{ 3532 {nil, ""}, 3533 {prepopulatedEvent, "Hola"}, 3534 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 3535 {NewTaskEvent(TaskStarted), "Task started by client"}, 3536 {NewTaskEvent(TaskReceived), "Task received by client"}, 3537 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 3538 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 3539 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 3540 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 3541 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 3542 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 3543 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 3544 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 3545 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 3546 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 3547 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 3548 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 3549 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 3550 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 3551 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 3552 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 3553 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 3554 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 3555 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 3556 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 3557 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 3558 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 3559 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 3560 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 3561 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 3562 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 3563 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 3564 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 3565 {NewTaskEvent("Unknown Type, No message"), ""}, 3566 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 3567 } 3568 3569 for _, tc := range testcases { 3570 tc.event.PopulateEventDisplayMessage() 3571 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 3572 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 3573 } 3574 } 3575 } 3576 3577 func TestNetworkResourcesEquals(t *testing.T) { 3578 require := require.New(t) 3579 var networkResourcesTest = []struct { 3580 input []*NetworkResource 3581 expected bool 3582 errorMsg string 3583 }{ 3584 { 3585 []*NetworkResource{ 3586 { 3587 IP: "10.0.0.1", 3588 MBits: 50, 3589 ReservedPorts: []Port{{"web", 80}}, 3590 }, 3591 { 3592 IP: "10.0.0.1", 3593 MBits: 50, 3594 ReservedPorts: []Port{{"web", 80}}, 3595 }, 3596 }, 3597 true, 3598 "Equal network resources should return true", 3599 }, 3600 { 3601 []*NetworkResource{ 3602 { 3603 IP: "10.0.0.0", 3604 MBits: 50, 3605 ReservedPorts: []Port{{"web", 80}}, 3606 }, 3607 { 3608 IP: "10.0.0.1", 3609 MBits: 50, 3610 ReservedPorts: []Port{{"web", 80}}, 3611 }, 3612 }, 3613 false, 3614 "Different IP addresses should return false", 3615 }, 3616 { 3617 []*NetworkResource{ 3618 { 3619 IP: "10.0.0.1", 3620 MBits: 40, 3621 ReservedPorts: []Port{{"web", 80}}, 3622 }, 3623 { 3624 IP: "10.0.0.1", 3625 MBits: 50, 3626 ReservedPorts: []Port{{"web", 80}}, 3627 }, 3628 }, 3629 false, 3630 "Different MBits values should return false", 3631 }, 3632 { 3633 []*NetworkResource{ 3634 { 3635 IP: "10.0.0.1", 3636 MBits: 50, 3637 ReservedPorts: []Port{{"web", 80}}, 3638 }, 3639 { 3640 IP: "10.0.0.1", 3641 MBits: 50, 3642 ReservedPorts: []Port{{"web", 80}, {"web", 80}}, 3643 }, 3644 }, 3645 false, 3646 "Different ReservedPorts lengths should return false", 3647 }, 3648 { 3649 []*NetworkResource{ 3650 { 3651 IP: "10.0.0.1", 3652 MBits: 50, 3653 ReservedPorts: []Port{{"web", 80}}, 3654 }, 3655 { 3656 IP: "10.0.0.1", 3657 MBits: 50, 3658 ReservedPorts: []Port{}, 3659 }, 3660 }, 3661 false, 3662 "Empty and non empty ReservedPorts values should return false", 3663 }, 3664 { 3665 []*NetworkResource{ 3666 { 3667 IP: "10.0.0.1", 3668 MBits: 50, 3669 ReservedPorts: []Port{{"web", 80}}, 3670 }, 3671 { 3672 IP: "10.0.0.1", 3673 MBits: 50, 3674 ReservedPorts: []Port{{"notweb", 80}}, 3675 }, 3676 }, 3677 false, 3678 "Different valued ReservedPorts values should return false", 3679 }, 3680 { 3681 []*NetworkResource{ 3682 { 3683 IP: "10.0.0.1", 3684 MBits: 50, 3685 DynamicPorts: []Port{{"web", 80}}, 3686 }, 3687 { 3688 IP: "10.0.0.1", 3689 MBits: 50, 3690 DynamicPorts: []Port{{"web", 80}, {"web", 80}}, 3691 }, 3692 }, 3693 false, 3694 "Different DynamicPorts lengths should return false", 3695 }, 3696 { 3697 []*NetworkResource{ 3698 { 3699 IP: "10.0.0.1", 3700 MBits: 50, 3701 DynamicPorts: []Port{{"web", 80}}, 3702 }, 3703 { 3704 IP: "10.0.0.1", 3705 MBits: 50, 3706 DynamicPorts: []Port{}, 3707 }, 3708 }, 3709 false, 3710 "Empty and non empty DynamicPorts values should return false", 3711 }, 3712 { 3713 []*NetworkResource{ 3714 { 3715 IP: "10.0.0.1", 3716 MBits: 50, 3717 DynamicPorts: []Port{{"web", 80}}, 3718 }, 3719 { 3720 IP: "10.0.0.1", 3721 MBits: 50, 3722 DynamicPorts: []Port{{"notweb", 80}}, 3723 }, 3724 }, 3725 false, 3726 "Different valued DynamicPorts values should return false", 3727 }, 3728 } 3729 for _, testCase := range networkResourcesTest { 3730 first := testCase.input[0] 3731 second := testCase.input[1] 3732 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 3733 } 3734 } 3735 3736 func TestNode_Canonicalize(t *testing.T) { 3737 t.Parallel() 3738 require := require.New(t) 3739 3740 // Make sure the eligiblity is set properly 3741 node := &Node{} 3742 node.Canonicalize() 3743 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 3744 3745 node = &Node{ 3746 Drain: true, 3747 } 3748 node.Canonicalize() 3749 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 3750 } 3751 3752 func TestNode_Copy(t *testing.T) { 3753 t.Parallel() 3754 require := require.New(t) 3755 3756 node := &Node{ 3757 ID: uuid.Generate(), 3758 SecretID: uuid.Generate(), 3759 Datacenter: "dc1", 3760 Name: "foobar", 3761 Attributes: map[string]string{ 3762 "kernel.name": "linux", 3763 "arch": "x86", 3764 "nomad.version": "0.5.0", 3765 "driver.exec": "1", 3766 "driver.mock_driver": "1", 3767 }, 3768 Resources: &Resources{ 3769 CPU: 4000, 3770 MemoryMB: 8192, 3771 DiskMB: 100 * 1024, 3772 IOPS: 150, 3773 Networks: []*NetworkResource{ 3774 { 3775 Device: "eth0", 3776 CIDR: "192.168.0.100/32", 3777 MBits: 1000, 3778 }, 3779 }, 3780 }, 3781 Reserved: &Resources{ 3782 CPU: 100, 3783 MemoryMB: 256, 3784 DiskMB: 4 * 1024, 3785 Networks: []*NetworkResource{ 3786 { 3787 Device: "eth0", 3788 IP: "192.168.0.100", 3789 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 3790 MBits: 1, 3791 }, 3792 }, 3793 }, 3794 Links: map[string]string{ 3795 "consul": "foobar.dc1", 3796 }, 3797 Meta: map[string]string{ 3798 "pci-dss": "true", 3799 "database": "mysql", 3800 "version": "5.6", 3801 }, 3802 NodeClass: "linux-medium-pci", 3803 Status: NodeStatusReady, 3804 SchedulingEligibility: NodeSchedulingEligible, 3805 Drivers: map[string]*DriverInfo{ 3806 "mock_driver": { 3807 Attributes: map[string]string{"running": "1"}, 3808 Detected: true, 3809 Healthy: true, 3810 HealthDescription: "Currently active", 3811 UpdateTime: time.Now(), 3812 }, 3813 }, 3814 } 3815 node.ComputeClass() 3816 3817 node2 := node.Copy() 3818 3819 require.Equal(node.Attributes, node2.Attributes) 3820 require.Equal(node.Resources, node2.Resources) 3821 require.Equal(node.Reserved, node2.Reserved) 3822 require.Equal(node.Links, node2.Links) 3823 require.Equal(node.Meta, node2.Meta) 3824 require.Equal(node.Events, node2.Events) 3825 require.Equal(node.DrainStrategy, node2.DrainStrategy) 3826 require.Equal(node.Drivers, node2.Drivers) 3827 }