github.com/emate/nomad@v0.8.2-wo-binpacking/nomad/structs/structs_test.go (about) 1 package structs 2 3 import ( 4 "fmt" 5 "os" 6 "reflect" 7 "strings" 8 "testing" 9 "time" 10 11 "github.com/hashicorp/consul/api" 12 "github.com/hashicorp/go-multierror" 13 "github.com/hashicorp/nomad/helper/uuid" 14 "github.com/stretchr/testify/assert" 15 "github.com/stretchr/testify/require" 16 ) 17 18 func TestJob_Validate(t *testing.T) { 19 j := &Job{} 20 err := j.Validate() 21 mErr := err.(*multierror.Error) 22 if !strings.Contains(mErr.Errors[0].Error(), "job region") { 23 t.Fatalf("err: %s", err) 24 } 25 if !strings.Contains(mErr.Errors[1].Error(), "job ID") { 26 t.Fatalf("err: %s", err) 27 } 28 if !strings.Contains(mErr.Errors[2].Error(), "job name") { 29 t.Fatalf("err: %s", err) 30 } 31 if !strings.Contains(mErr.Errors[3].Error(), "namespace") { 32 t.Fatalf("err: %s", err) 33 } 34 if !strings.Contains(mErr.Errors[4].Error(), "job type") { 35 t.Fatalf("err: %s", err) 36 } 37 if !strings.Contains(mErr.Errors[5].Error(), "priority") { 38 t.Fatalf("err: %s", err) 39 } 40 if !strings.Contains(mErr.Errors[6].Error(), "datacenters") { 41 t.Fatalf("err: %s", err) 42 } 43 if !strings.Contains(mErr.Errors[7].Error(), "task groups") { 44 t.Fatalf("err: %s", err) 45 } 46 47 j = &Job{ 48 Type: "invalid-job-type", 49 } 50 err = j.Validate() 51 if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) { 52 t.Errorf("expected %s but found: %v", expected, err) 53 } 54 55 j = &Job{ 56 Type: JobTypeService, 57 Periodic: &PeriodicConfig{ 58 Enabled: true, 59 }, 60 } 61 err = j.Validate() 62 mErr = err.(*multierror.Error) 63 if !strings.Contains(mErr.Error(), "Periodic") { 64 t.Fatalf("err: %s", err) 65 } 66 67 j = &Job{ 68 Region: "global", 69 ID: uuid.Generate(), 70 Namespace: "test", 71 Name: "my-job", 72 Type: JobTypeService, 73 Priority: 50, 74 Datacenters: []string{"dc1"}, 75 TaskGroups: []*TaskGroup{ 76 { 77 Name: "web", 78 RestartPolicy: &RestartPolicy{ 79 Interval: 5 * time.Minute, 80 Delay: 10 * time.Second, 81 Attempts: 10, 82 }, 83 }, 84 { 85 Name: "web", 86 RestartPolicy: &RestartPolicy{ 87 Interval: 5 * time.Minute, 88 Delay: 10 * time.Second, 89 Attempts: 10, 90 }, 91 }, 92 { 93 RestartPolicy: &RestartPolicy{ 94 Interval: 5 * time.Minute, 95 Delay: 10 * time.Second, 96 Attempts: 10, 97 }, 98 }, 99 }, 100 } 101 err = j.Validate() 102 mErr = err.(*multierror.Error) 103 if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") { 104 t.Fatalf("err: %s", err) 105 } 106 if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") { 107 t.Fatalf("err: %s", err) 108 } 109 if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") { 110 t.Fatalf("err: %s", err) 111 } 112 } 113 114 func TestJob_Warnings(t *testing.T) { 115 cases := []struct { 116 Name string 117 Job *Job 118 Expected []string 119 }{ 120 { 121 Name: "Higher counts for update stanza", 122 Expected: []string{"max parallel count is greater"}, 123 Job: &Job{ 124 Type: JobTypeService, 125 TaskGroups: []*TaskGroup{ 126 { 127 Name: "foo", 128 Count: 2, 129 Update: &UpdateStrategy{ 130 MaxParallel: 10, 131 }, 132 }, 133 }, 134 }, 135 }, 136 } 137 138 for _, c := range cases { 139 t.Run(c.Name, func(t *testing.T) { 140 warnings := c.Job.Warnings() 141 if warnings == nil { 142 if len(c.Expected) == 0 { 143 return 144 } else { 145 t.Fatal("Got no warnings when they were expected") 146 } 147 } 148 149 a := warnings.Error() 150 for _, e := range c.Expected { 151 if !strings.Contains(a, e) { 152 t.Fatalf("Got warnings %q; didn't contain %q", a, e) 153 } 154 } 155 }) 156 } 157 } 158 159 func TestJob_SpecChanged(t *testing.T) { 160 // Get a base test job 161 base := testJob() 162 163 // Only modify the indexes/mutable state of the job 164 mutatedBase := base.Copy() 165 mutatedBase.Status = "foo" 166 mutatedBase.ModifyIndex = base.ModifyIndex + 100 167 168 // changed contains a spec change that should be detected 169 change := base.Copy() 170 change.Priority = 99 171 172 cases := []struct { 173 Name string 174 Original *Job 175 New *Job 176 Changed bool 177 }{ 178 { 179 Name: "Same job except mutable indexes", 180 Changed: false, 181 Original: base, 182 New: mutatedBase, 183 }, 184 { 185 Name: "Different", 186 Changed: true, 187 Original: base, 188 New: change, 189 }, 190 } 191 192 for _, c := range cases { 193 t.Run(c.Name, func(t *testing.T) { 194 if actual := c.Original.SpecChanged(c.New); actual != c.Changed { 195 t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed) 196 } 197 }) 198 } 199 } 200 201 func testJob() *Job { 202 return &Job{ 203 Region: "global", 204 ID: uuid.Generate(), 205 Namespace: "test", 206 Name: "my-job", 207 Type: JobTypeService, 208 Priority: 50, 209 AllAtOnce: false, 210 Datacenters: []string{"dc1"}, 211 Constraints: []*Constraint{ 212 { 213 LTarget: "$attr.kernel.name", 214 RTarget: "linux", 215 Operand: "=", 216 }, 217 }, 218 Periodic: &PeriodicConfig{ 219 Enabled: false, 220 }, 221 TaskGroups: []*TaskGroup{ 222 { 223 Name: "web", 224 Count: 10, 225 EphemeralDisk: DefaultEphemeralDisk(), 226 RestartPolicy: &RestartPolicy{ 227 Mode: RestartPolicyModeFail, 228 Attempts: 3, 229 Interval: 10 * time.Minute, 230 Delay: 1 * time.Minute, 231 }, 232 ReschedulePolicy: &ReschedulePolicy{ 233 Interval: 5 * time.Minute, 234 Attempts: 10, 235 Delay: 5 * time.Second, 236 DelayFunction: "constant", 237 }, 238 Tasks: []*Task{ 239 { 240 Name: "web", 241 Driver: "exec", 242 Config: map[string]interface{}{ 243 "command": "/bin/date", 244 }, 245 Env: map[string]string{ 246 "FOO": "bar", 247 }, 248 Artifacts: []*TaskArtifact{ 249 { 250 GetterSource: "http://foo.com", 251 }, 252 }, 253 Services: []*Service{ 254 { 255 Name: "${TASK}-frontend", 256 PortLabel: "http", 257 }, 258 }, 259 Resources: &Resources{ 260 CPU: 500, 261 MemoryMB: 256, 262 Networks: []*NetworkResource{ 263 { 264 MBits: 50, 265 DynamicPorts: []Port{{Label: "http"}}, 266 }, 267 }, 268 }, 269 LogConfig: &LogConfig{ 270 MaxFiles: 10, 271 MaxFileSizeMB: 1, 272 }, 273 }, 274 }, 275 Meta: map[string]string{ 276 "elb_check_type": "http", 277 "elb_check_interval": "30s", 278 "elb_check_min": "3", 279 }, 280 }, 281 }, 282 Meta: map[string]string{ 283 "owner": "armon", 284 }, 285 } 286 } 287 288 func TestJob_Copy(t *testing.T) { 289 j := testJob() 290 c := j.Copy() 291 if !reflect.DeepEqual(j, c) { 292 t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j) 293 } 294 } 295 296 func TestJob_IsPeriodic(t *testing.T) { 297 j := &Job{ 298 Type: JobTypeService, 299 Periodic: &PeriodicConfig{ 300 Enabled: true, 301 }, 302 } 303 if !j.IsPeriodic() { 304 t.Fatalf("IsPeriodic() returned false on periodic job") 305 } 306 307 j = &Job{ 308 Type: JobTypeService, 309 } 310 if j.IsPeriodic() { 311 t.Fatalf("IsPeriodic() returned true on non-periodic job") 312 } 313 } 314 315 func TestJob_IsPeriodicActive(t *testing.T) { 316 cases := []struct { 317 job *Job 318 active bool 319 }{ 320 { 321 job: &Job{ 322 Type: JobTypeService, 323 Periodic: &PeriodicConfig{ 324 Enabled: true, 325 }, 326 }, 327 active: true, 328 }, 329 { 330 job: &Job{ 331 Type: JobTypeService, 332 Periodic: &PeriodicConfig{ 333 Enabled: false, 334 }, 335 }, 336 active: false, 337 }, 338 { 339 job: &Job{ 340 Type: JobTypeService, 341 Periodic: &PeriodicConfig{ 342 Enabled: true, 343 }, 344 Stop: true, 345 }, 346 active: false, 347 }, 348 { 349 job: &Job{ 350 Type: JobTypeService, 351 Periodic: &PeriodicConfig{ 352 Enabled: false, 353 }, 354 ParameterizedJob: &ParameterizedJobConfig{}, 355 }, 356 active: false, 357 }, 358 } 359 360 for i, c := range cases { 361 if act := c.job.IsPeriodicActive(); act != c.active { 362 t.Fatalf("case %d failed: got %v; want %v", i, act, c.active) 363 } 364 } 365 } 366 367 func TestJob_SystemJob_Validate(t *testing.T) { 368 j := testJob() 369 j.Type = JobTypeSystem 370 j.TaskGroups[0].ReschedulePolicy = nil 371 j.Canonicalize() 372 373 err := j.Validate() 374 if err == nil || !strings.Contains(err.Error(), "exceed") { 375 t.Fatalf("expect error due to count") 376 } 377 378 j.TaskGroups[0].Count = 0 379 if err := j.Validate(); err != nil { 380 t.Fatalf("unexpected err: %v", err) 381 } 382 383 j.TaskGroups[0].Count = 1 384 if err := j.Validate(); err != nil { 385 t.Fatalf("unexpected err: %v", err) 386 } 387 } 388 389 func TestJob_VaultPolicies(t *testing.T) { 390 j0 := &Job{} 391 e0 := make(map[string]map[string]*Vault, 0) 392 393 vj1 := &Vault{ 394 Policies: []string{ 395 "p1", 396 "p2", 397 }, 398 } 399 vj2 := &Vault{ 400 Policies: []string{ 401 "p3", 402 "p4", 403 }, 404 } 405 vj3 := &Vault{ 406 Policies: []string{ 407 "p5", 408 }, 409 } 410 j1 := &Job{ 411 TaskGroups: []*TaskGroup{ 412 { 413 Name: "foo", 414 Tasks: []*Task{ 415 { 416 Name: "t1", 417 }, 418 { 419 Name: "t2", 420 Vault: vj1, 421 }, 422 }, 423 }, 424 { 425 Name: "bar", 426 Tasks: []*Task{ 427 { 428 Name: "t3", 429 Vault: vj2, 430 }, 431 { 432 Name: "t4", 433 Vault: vj3, 434 }, 435 }, 436 }, 437 }, 438 } 439 440 e1 := map[string]map[string]*Vault{ 441 "foo": { 442 "t2": vj1, 443 }, 444 "bar": { 445 "t3": vj2, 446 "t4": vj3, 447 }, 448 } 449 450 cases := []struct { 451 Job *Job 452 Expected map[string]map[string]*Vault 453 }{ 454 { 455 Job: j0, 456 Expected: e0, 457 }, 458 { 459 Job: j1, 460 Expected: e1, 461 }, 462 } 463 464 for i, c := range cases { 465 got := c.Job.VaultPolicies() 466 if !reflect.DeepEqual(got, c.Expected) { 467 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 468 } 469 } 470 } 471 472 func TestJob_RequiredSignals(t *testing.T) { 473 j0 := &Job{} 474 e0 := make(map[string]map[string][]string, 0) 475 476 vj1 := &Vault{ 477 Policies: []string{"p1"}, 478 ChangeMode: VaultChangeModeNoop, 479 } 480 vj2 := &Vault{ 481 Policies: []string{"p1"}, 482 ChangeMode: VaultChangeModeSignal, 483 ChangeSignal: "SIGUSR1", 484 } 485 tj1 := &Template{ 486 SourcePath: "foo", 487 DestPath: "bar", 488 ChangeMode: TemplateChangeModeNoop, 489 } 490 tj2 := &Template{ 491 SourcePath: "foo", 492 DestPath: "bar", 493 ChangeMode: TemplateChangeModeSignal, 494 ChangeSignal: "SIGUSR2", 495 } 496 j1 := &Job{ 497 TaskGroups: []*TaskGroup{ 498 { 499 Name: "foo", 500 Tasks: []*Task{ 501 { 502 Name: "t1", 503 }, 504 { 505 Name: "t2", 506 Vault: vj2, 507 Templates: []*Template{tj2}, 508 }, 509 }, 510 }, 511 { 512 Name: "bar", 513 Tasks: []*Task{ 514 { 515 Name: "t3", 516 Vault: vj1, 517 Templates: []*Template{tj1}, 518 }, 519 { 520 Name: "t4", 521 Vault: vj2, 522 }, 523 }, 524 }, 525 }, 526 } 527 528 e1 := map[string]map[string][]string{ 529 "foo": { 530 "t2": {"SIGUSR1", "SIGUSR2"}, 531 }, 532 "bar": { 533 "t4": {"SIGUSR1"}, 534 }, 535 } 536 537 j2 := &Job{ 538 TaskGroups: []*TaskGroup{ 539 { 540 Name: "foo", 541 Tasks: []*Task{ 542 { 543 Name: "t1", 544 KillSignal: "SIGQUIT", 545 }, 546 }, 547 }, 548 }, 549 } 550 551 e2 := map[string]map[string][]string{ 552 "foo": { 553 "t1": {"SIGQUIT"}, 554 }, 555 } 556 557 cases := []struct { 558 Job *Job 559 Expected map[string]map[string][]string 560 }{ 561 { 562 Job: j0, 563 Expected: e0, 564 }, 565 { 566 Job: j1, 567 Expected: e1, 568 }, 569 { 570 Job: j2, 571 Expected: e2, 572 }, 573 } 574 575 for i, c := range cases { 576 got := c.Job.RequiredSignals() 577 if !reflect.DeepEqual(got, c.Expected) { 578 t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected) 579 } 580 } 581 } 582 583 func TestTaskGroup_Validate(t *testing.T) { 584 j := testJob() 585 tg := &TaskGroup{ 586 Count: -1, 587 RestartPolicy: &RestartPolicy{ 588 Interval: 5 * time.Minute, 589 Delay: 10 * time.Second, 590 Attempts: 10, 591 Mode: RestartPolicyModeDelay, 592 }, 593 ReschedulePolicy: &ReschedulePolicy{ 594 Interval: 5 * time.Minute, 595 Attempts: 5, 596 Delay: 5 * time.Second, 597 }, 598 } 599 err := tg.Validate(j) 600 mErr := err.(*multierror.Error) 601 if !strings.Contains(mErr.Errors[0].Error(), "group name") { 602 t.Fatalf("err: %s", err) 603 } 604 if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") { 605 t.Fatalf("err: %s", err) 606 } 607 if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") { 608 t.Fatalf("err: %s", err) 609 } 610 611 tg = &TaskGroup{ 612 Tasks: []*Task{ 613 { 614 Name: "task-a", 615 Resources: &Resources{ 616 Networks: []*NetworkResource{ 617 { 618 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 619 }, 620 }, 621 }, 622 }, 623 { 624 Name: "task-b", 625 Resources: &Resources{ 626 Networks: []*NetworkResource{ 627 { 628 ReservedPorts: []Port{{Label: "foo", Value: 123}}, 629 }, 630 }, 631 }, 632 }, 633 }, 634 } 635 err = tg.Validate(&Job{}) 636 expected := `Static port 123 already reserved by task-a:foo` 637 if !strings.Contains(err.Error(), expected) { 638 t.Errorf("expected %s but found: %v", expected, err) 639 } 640 641 tg = &TaskGroup{ 642 Tasks: []*Task{ 643 { 644 Name: "task-a", 645 Resources: &Resources{ 646 Networks: []*NetworkResource{ 647 { 648 ReservedPorts: []Port{ 649 {Label: "foo", Value: 123}, 650 {Label: "bar", Value: 123}, 651 }, 652 }, 653 }, 654 }, 655 }, 656 }, 657 } 658 err = tg.Validate(&Job{}) 659 expected = `Static port 123 already reserved by task-a:foo` 660 if !strings.Contains(err.Error(), expected) { 661 t.Errorf("expected %s but found: %v", expected, err) 662 } 663 664 tg = &TaskGroup{ 665 Name: "web", 666 Count: 1, 667 Tasks: []*Task{ 668 {Name: "web", Leader: true}, 669 {Name: "web", Leader: true}, 670 {}, 671 }, 672 RestartPolicy: &RestartPolicy{ 673 Interval: 5 * time.Minute, 674 Delay: 10 * time.Second, 675 Attempts: 10, 676 Mode: RestartPolicyModeDelay, 677 }, 678 ReschedulePolicy: &ReschedulePolicy{ 679 Interval: 5 * time.Minute, 680 Attempts: 10, 681 Delay: 5 * time.Second, 682 DelayFunction: "constant", 683 }, 684 } 685 686 err = tg.Validate(j) 687 mErr = err.(*multierror.Error) 688 if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") { 689 t.Fatalf("err: %s", err) 690 } 691 if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") { 692 t.Fatalf("err: %s", err) 693 } 694 if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") { 695 t.Fatalf("err: %s", err) 696 } 697 if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") { 698 t.Fatalf("err: %s", err) 699 } 700 if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") { 701 t.Fatalf("err: %s", err) 702 } 703 704 tg = &TaskGroup{ 705 Name: "web", 706 Count: 1, 707 Tasks: []*Task{ 708 {Name: "web", Leader: true}, 709 }, 710 Update: DefaultUpdateStrategy.Copy(), 711 } 712 j.Type = JobTypeBatch 713 err = tg.Validate(j) 714 if !strings.Contains(err.Error(), "does not allow update block") { 715 t.Fatalf("err: %s", err) 716 } 717 718 tg = &TaskGroup{ 719 Count: -1, 720 RestartPolicy: &RestartPolicy{ 721 Interval: 5 * time.Minute, 722 Delay: 10 * time.Second, 723 Attempts: 10, 724 Mode: RestartPolicyModeDelay, 725 }, 726 ReschedulePolicy: &ReschedulePolicy{ 727 Interval: 5 * time.Minute, 728 Attempts: 5, 729 Delay: 5 * time.Second, 730 }, 731 } 732 j.Type = JobTypeSystem 733 err = tg.Validate(j) 734 if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") { 735 t.Fatalf("err: %s", err) 736 } 737 } 738 739 func TestTask_Validate(t *testing.T) { 740 task := &Task{} 741 ephemeralDisk := DefaultEphemeralDisk() 742 err := task.Validate(ephemeralDisk) 743 mErr := err.(*multierror.Error) 744 if !strings.Contains(mErr.Errors[0].Error(), "task name") { 745 t.Fatalf("err: %s", err) 746 } 747 if !strings.Contains(mErr.Errors[1].Error(), "task driver") { 748 t.Fatalf("err: %s", err) 749 } 750 if !strings.Contains(mErr.Errors[2].Error(), "task resources") { 751 t.Fatalf("err: %s", err) 752 } 753 754 task = &Task{Name: "web/foo"} 755 err = task.Validate(ephemeralDisk) 756 mErr = err.(*multierror.Error) 757 if !strings.Contains(mErr.Errors[0].Error(), "slashes") { 758 t.Fatalf("err: %s", err) 759 } 760 761 task = &Task{ 762 Name: "web", 763 Driver: "docker", 764 Resources: &Resources{ 765 CPU: 100, 766 MemoryMB: 100, 767 IOPS: 10, 768 }, 769 LogConfig: DefaultLogConfig(), 770 } 771 ephemeralDisk.SizeMB = 200 772 err = task.Validate(ephemeralDisk) 773 if err != nil { 774 t.Fatalf("err: %s", err) 775 } 776 777 task.Constraints = append(task.Constraints, 778 &Constraint{ 779 Operand: ConstraintDistinctHosts, 780 }, 781 &Constraint{ 782 Operand: ConstraintDistinctProperty, 783 LTarget: "${meta.rack}", 784 }) 785 786 err = task.Validate(ephemeralDisk) 787 mErr = err.(*multierror.Error) 788 if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") { 789 t.Fatalf("err: %s", err) 790 } 791 if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") { 792 t.Fatalf("err: %s", err) 793 } 794 } 795 796 func TestTask_Validate_Services(t *testing.T) { 797 s1 := &Service{ 798 Name: "service-name", 799 PortLabel: "bar", 800 Checks: []*ServiceCheck{ 801 { 802 Name: "check-name", 803 Type: ServiceCheckTCP, 804 Interval: 0 * time.Second, 805 }, 806 { 807 Name: "check-name", 808 Type: ServiceCheckTCP, 809 Timeout: 2 * time.Second, 810 }, 811 { 812 Name: "check-name", 813 Type: ServiceCheckTCP, 814 Interval: 1 * time.Second, 815 }, 816 }, 817 } 818 819 s2 := &Service{ 820 Name: "service-name", 821 PortLabel: "bar", 822 } 823 824 s3 := &Service{ 825 Name: "service-A", 826 PortLabel: "a", 827 } 828 s4 := &Service{ 829 Name: "service-A", 830 PortLabel: "b", 831 } 832 833 ephemeralDisk := DefaultEphemeralDisk() 834 ephemeralDisk.SizeMB = 200 835 task := &Task{ 836 Name: "web", 837 Driver: "docker", 838 Resources: &Resources{ 839 CPU: 100, 840 MemoryMB: 100, 841 IOPS: 10, 842 }, 843 Services: []*Service{s1, s2}, 844 } 845 846 task1 := &Task{ 847 Name: "web", 848 Driver: "docker", 849 Resources: DefaultResources(), 850 Services: []*Service{s3, s4}, 851 LogConfig: DefaultLogConfig(), 852 } 853 task1.Resources.Networks = []*NetworkResource{ 854 { 855 MBits: 10, 856 DynamicPorts: []Port{ 857 { 858 Label: "a", 859 Value: 1000, 860 }, 861 { 862 Label: "b", 863 Value: 2000, 864 }, 865 }, 866 }, 867 } 868 869 err := task.Validate(ephemeralDisk) 870 if err == nil { 871 t.Fatal("expected an error") 872 } 873 874 if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") { 875 t.Fatalf("err: %v", err) 876 } 877 878 if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") { 879 t.Fatalf("err: %v", err) 880 } 881 882 if !strings.Contains(err.Error(), "missing required value interval") { 883 t.Fatalf("err: %v", err) 884 } 885 886 if !strings.Contains(err.Error(), "cannot be less than") { 887 t.Fatalf("err: %v", err) 888 } 889 890 if err = task1.Validate(ephemeralDisk); err != nil { 891 t.Fatalf("err : %v", err) 892 } 893 } 894 895 func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) { 896 ephemeralDisk := DefaultEphemeralDisk() 897 getTask := func(s *Service) *Task { 898 task := &Task{ 899 Name: "web", 900 Driver: "docker", 901 Resources: DefaultResources(), 902 Services: []*Service{s}, 903 LogConfig: DefaultLogConfig(), 904 } 905 task.Resources.Networks = []*NetworkResource{ 906 { 907 MBits: 10, 908 DynamicPorts: []Port{ 909 { 910 Label: "http", 911 Value: 80, 912 }, 913 }, 914 }, 915 } 916 return task 917 } 918 919 cases := []*Service{ 920 { 921 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 922 Name: "DriverModeWithLabel", 923 PortLabel: "http", 924 AddressMode: AddressModeDriver, 925 }, 926 { 927 Name: "DriverModeWithPort", 928 PortLabel: "80", 929 AddressMode: AddressModeDriver, 930 }, 931 { 932 Name: "HostModeWithLabel", 933 PortLabel: "http", 934 AddressMode: AddressModeHost, 935 }, 936 { 937 Name: "HostModeWithoutLabel", 938 AddressMode: AddressModeHost, 939 }, 940 { 941 Name: "DriverModeWithoutLabel", 942 AddressMode: AddressModeDriver, 943 }, 944 } 945 946 for _, service := range cases { 947 task := getTask(service) 948 t.Run(service.Name, func(t *testing.T) { 949 if err := task.Validate(ephemeralDisk); err != nil { 950 t.Fatalf("unexpected err: %v", err) 951 } 952 }) 953 } 954 } 955 956 func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) { 957 ephemeralDisk := DefaultEphemeralDisk() 958 getTask := func(s *Service) *Task { 959 task := &Task{ 960 Name: "web", 961 Driver: "docker", 962 Resources: DefaultResources(), 963 Services: []*Service{s}, 964 LogConfig: DefaultLogConfig(), 965 } 966 task.Resources.Networks = []*NetworkResource{ 967 { 968 MBits: 10, 969 DynamicPorts: []Port{ 970 { 971 Label: "http", 972 Value: 80, 973 }, 974 }, 975 }, 976 } 977 return task 978 } 979 980 cases := []*Service{ 981 { 982 // https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177 983 Name: "DriverModeWithLabel", 984 PortLabel: "asdf", 985 AddressMode: AddressModeDriver, 986 }, 987 { 988 Name: "HostModeWithLabel", 989 PortLabel: "asdf", 990 AddressMode: AddressModeHost, 991 }, 992 { 993 Name: "HostModeWithPort", 994 PortLabel: "80", 995 AddressMode: AddressModeHost, 996 }, 997 } 998 999 for _, service := range cases { 1000 task := getTask(service) 1001 t.Run(service.Name, func(t *testing.T) { 1002 err := task.Validate(ephemeralDisk) 1003 if err == nil { 1004 t.Fatalf("expected an error") 1005 } 1006 //t.Logf("err: %v", err) 1007 }) 1008 } 1009 } 1010 1011 func TestTask_Validate_Service_Check(t *testing.T) { 1012 1013 invalidCheck := ServiceCheck{ 1014 Name: "check-name", 1015 Command: "/bin/true", 1016 Type: ServiceCheckScript, 1017 Interval: 10 * time.Second, 1018 } 1019 1020 err := invalidCheck.validate() 1021 if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") { 1022 t.Fatalf("expected a timeout validation error but received: %q", err) 1023 } 1024 1025 check1 := ServiceCheck{ 1026 Name: "check-name", 1027 Type: ServiceCheckTCP, 1028 Interval: 10 * time.Second, 1029 Timeout: 2 * time.Second, 1030 } 1031 1032 if err := check1.validate(); err != nil { 1033 t.Fatalf("err: %v", err) 1034 } 1035 1036 check1.InitialStatus = "foo" 1037 err = check1.validate() 1038 if err == nil { 1039 t.Fatal("Expected an error") 1040 } 1041 1042 if !strings.Contains(err.Error(), "invalid initial check state (foo)") { 1043 t.Fatalf("err: %v", err) 1044 } 1045 1046 check1.InitialStatus = api.HealthCritical 1047 err = check1.validate() 1048 if err != nil { 1049 t.Fatalf("err: %v", err) 1050 } 1051 1052 check1.InitialStatus = api.HealthPassing 1053 err = check1.validate() 1054 if err != nil { 1055 t.Fatalf("err: %v", err) 1056 } 1057 1058 check1.InitialStatus = "" 1059 err = check1.validate() 1060 if err != nil { 1061 t.Fatalf("err: %v", err) 1062 } 1063 1064 check2 := ServiceCheck{ 1065 Name: "check-name-2", 1066 Type: ServiceCheckHTTP, 1067 Interval: 10 * time.Second, 1068 Timeout: 2 * time.Second, 1069 Path: "/foo/bar", 1070 } 1071 1072 err = check2.validate() 1073 if err != nil { 1074 t.Fatalf("err: %v", err) 1075 } 1076 1077 check2.Path = "" 1078 err = check2.validate() 1079 if err == nil { 1080 t.Fatal("Expected an error") 1081 } 1082 if !strings.Contains(err.Error(), "valid http path") { 1083 t.Fatalf("err: %v", err) 1084 } 1085 1086 check2.Path = "http://www.example.com" 1087 err = check2.validate() 1088 if err == nil { 1089 t.Fatal("Expected an error") 1090 } 1091 if !strings.Contains(err.Error(), "relative http path") { 1092 t.Fatalf("err: %v", err) 1093 } 1094 } 1095 1096 // TestTask_Validate_Service_Check_AddressMode asserts that checks do not 1097 // inherit address mode but do inherit ports. 1098 func TestTask_Validate_Service_Check_AddressMode(t *testing.T) { 1099 getTask := func(s *Service) *Task { 1100 return &Task{ 1101 Resources: &Resources{ 1102 Networks: []*NetworkResource{ 1103 { 1104 DynamicPorts: []Port{ 1105 { 1106 Label: "http", 1107 Value: 9999, 1108 }, 1109 }, 1110 }, 1111 }, 1112 }, 1113 Services: []*Service{s}, 1114 } 1115 } 1116 1117 cases := []struct { 1118 Service *Service 1119 ErrContains string 1120 }{ 1121 { 1122 Service: &Service{ 1123 Name: "invalid-driver", 1124 PortLabel: "80", 1125 AddressMode: "host", 1126 }, 1127 ErrContains: `port label "80" referenced`, 1128 }, 1129 { 1130 Service: &Service{ 1131 Name: "http-driver-fail-1", 1132 PortLabel: "80", 1133 AddressMode: "driver", 1134 Checks: []*ServiceCheck{ 1135 { 1136 Name: "invalid-check-1", 1137 Type: "tcp", 1138 Interval: time.Second, 1139 Timeout: time.Second, 1140 }, 1141 }, 1142 }, 1143 ErrContains: `check "invalid-check-1" cannot use a numeric port`, 1144 }, 1145 { 1146 Service: &Service{ 1147 Name: "http-driver-fail-2", 1148 PortLabel: "80", 1149 AddressMode: "driver", 1150 Checks: []*ServiceCheck{ 1151 { 1152 Name: "invalid-check-2", 1153 Type: "tcp", 1154 PortLabel: "80", 1155 Interval: time.Second, 1156 Timeout: time.Second, 1157 }, 1158 }, 1159 }, 1160 ErrContains: `check "invalid-check-2" cannot use a numeric port`, 1161 }, 1162 { 1163 Service: &Service{ 1164 Name: "http-driver-fail-3", 1165 PortLabel: "80", 1166 AddressMode: "driver", 1167 Checks: []*ServiceCheck{ 1168 { 1169 Name: "invalid-check-3", 1170 Type: "tcp", 1171 PortLabel: "missing-port-label", 1172 Interval: time.Second, 1173 Timeout: time.Second, 1174 }, 1175 }, 1176 }, 1177 ErrContains: `port label "missing-port-label" referenced`, 1178 }, 1179 { 1180 Service: &Service{ 1181 Name: "http-driver-passes", 1182 PortLabel: "80", 1183 AddressMode: "driver", 1184 Checks: []*ServiceCheck{ 1185 { 1186 Name: "valid-script-check", 1187 Type: "script", 1188 Command: "ok", 1189 Interval: time.Second, 1190 Timeout: time.Second, 1191 }, 1192 { 1193 Name: "valid-host-check", 1194 Type: "tcp", 1195 PortLabel: "http", 1196 Interval: time.Second, 1197 Timeout: time.Second, 1198 }, 1199 { 1200 Name: "valid-driver-check", 1201 Type: "tcp", 1202 AddressMode: "driver", 1203 Interval: time.Second, 1204 Timeout: time.Second, 1205 }, 1206 }, 1207 }, 1208 }, 1209 { 1210 Service: &Service{ 1211 Name: "empty-address-3673-passes-1", 1212 Checks: []*ServiceCheck{ 1213 { 1214 Name: "valid-port-label", 1215 Type: "tcp", 1216 PortLabel: "http", 1217 Interval: time.Second, 1218 Timeout: time.Second, 1219 }, 1220 { 1221 Name: "empty-is-ok", 1222 Type: "script", 1223 Command: "ok", 1224 Interval: time.Second, 1225 Timeout: time.Second, 1226 }, 1227 }, 1228 }, 1229 }, 1230 { 1231 Service: &Service{ 1232 Name: "empty-address-3673-passes-2", 1233 }, 1234 }, 1235 { 1236 Service: &Service{ 1237 Name: "empty-address-3673-fails", 1238 Checks: []*ServiceCheck{ 1239 { 1240 Name: "empty-is-not-ok", 1241 Type: "tcp", 1242 Interval: time.Second, 1243 Timeout: time.Second, 1244 }, 1245 }, 1246 }, 1247 ErrContains: `invalid: check requires a port but neither check nor service`, 1248 }, 1249 } 1250 1251 for _, tc := range cases { 1252 tc := tc 1253 task := getTask(tc.Service) 1254 t.Run(tc.Service.Name, func(t *testing.T) { 1255 err := validateServices(task) 1256 if err == nil && tc.ErrContains == "" { 1257 // Ok! 1258 return 1259 } 1260 if err == nil { 1261 t.Fatalf("no error returned. expected: %s", tc.ErrContains) 1262 } 1263 if !strings.Contains(err.Error(), tc.ErrContains) { 1264 t.Fatalf("expected %q but found: %v", tc.ErrContains, err) 1265 } 1266 }) 1267 } 1268 } 1269 1270 func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) { 1271 invalidCheckRestart := &CheckRestart{ 1272 Limit: -1, 1273 Grace: -1, 1274 } 1275 1276 err := invalidCheckRestart.Validate() 1277 assert.NotNil(t, err, "invalidateCheckRestart.Validate()") 1278 assert.Len(t, err.(*multierror.Error).Errors, 2) 1279 1280 validCheckRestart := &CheckRestart{} 1281 assert.Nil(t, validCheckRestart.Validate()) 1282 1283 validCheckRestart.Limit = 1 1284 validCheckRestart.Grace = 1 1285 assert.Nil(t, validCheckRestart.Validate()) 1286 } 1287 1288 func TestTask_Validate_LogConfig(t *testing.T) { 1289 task := &Task{ 1290 LogConfig: DefaultLogConfig(), 1291 } 1292 ephemeralDisk := &EphemeralDisk{ 1293 SizeMB: 1, 1294 } 1295 1296 err := task.Validate(ephemeralDisk) 1297 mErr := err.(*multierror.Error) 1298 if !strings.Contains(mErr.Errors[3].Error(), "log storage") { 1299 t.Fatalf("err: %s", err) 1300 } 1301 } 1302 1303 func TestTask_Validate_Template(t *testing.T) { 1304 1305 bad := &Template{} 1306 task := &Task{ 1307 Templates: []*Template{bad}, 1308 } 1309 ephemeralDisk := &EphemeralDisk{ 1310 SizeMB: 1, 1311 } 1312 1313 err := task.Validate(ephemeralDisk) 1314 if !strings.Contains(err.Error(), "Template 1 validation failed") { 1315 t.Fatalf("err: %s", err) 1316 } 1317 1318 // Have two templates that share the same destination 1319 good := &Template{ 1320 SourcePath: "foo", 1321 DestPath: "local/foo", 1322 ChangeMode: "noop", 1323 } 1324 1325 task.Templates = []*Template{good, good} 1326 err = task.Validate(ephemeralDisk) 1327 if !strings.Contains(err.Error(), "same destination as") { 1328 t.Fatalf("err: %s", err) 1329 } 1330 1331 // Env templates can't use signals 1332 task.Templates = []*Template{ 1333 { 1334 Envvars: true, 1335 ChangeMode: "signal", 1336 }, 1337 } 1338 1339 err = task.Validate(ephemeralDisk) 1340 if err == nil { 1341 t.Fatalf("expected error from Template.Validate") 1342 } 1343 if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) { 1344 t.Errorf("expected to find %q but found %v", expected, err) 1345 } 1346 } 1347 1348 func TestTemplate_Validate(t *testing.T) { 1349 cases := []struct { 1350 Tmpl *Template 1351 Fail bool 1352 ContainsErrs []string 1353 }{ 1354 { 1355 Tmpl: &Template{}, 1356 Fail: true, 1357 ContainsErrs: []string{ 1358 "specify a source path", 1359 "specify a destination", 1360 TemplateChangeModeInvalidError.Error(), 1361 }, 1362 }, 1363 { 1364 Tmpl: &Template{ 1365 Splay: -100, 1366 }, 1367 Fail: true, 1368 ContainsErrs: []string{ 1369 "positive splay", 1370 }, 1371 }, 1372 { 1373 Tmpl: &Template{ 1374 ChangeMode: "foo", 1375 }, 1376 Fail: true, 1377 ContainsErrs: []string{ 1378 TemplateChangeModeInvalidError.Error(), 1379 }, 1380 }, 1381 { 1382 Tmpl: &Template{ 1383 ChangeMode: "signal", 1384 }, 1385 Fail: true, 1386 ContainsErrs: []string{ 1387 "specify signal value", 1388 }, 1389 }, 1390 { 1391 Tmpl: &Template{ 1392 SourcePath: "foo", 1393 DestPath: "../../root", 1394 ChangeMode: "noop", 1395 }, 1396 Fail: true, 1397 ContainsErrs: []string{ 1398 "destination escapes", 1399 }, 1400 }, 1401 { 1402 Tmpl: &Template{ 1403 SourcePath: "foo", 1404 DestPath: "local/foo", 1405 ChangeMode: "noop", 1406 }, 1407 Fail: false, 1408 }, 1409 { 1410 Tmpl: &Template{ 1411 SourcePath: "foo", 1412 DestPath: "local/foo", 1413 ChangeMode: "noop", 1414 Perms: "0444", 1415 }, 1416 Fail: false, 1417 }, 1418 { 1419 Tmpl: &Template{ 1420 SourcePath: "foo", 1421 DestPath: "local/foo", 1422 ChangeMode: "noop", 1423 Perms: "zza", 1424 }, 1425 Fail: true, 1426 ContainsErrs: []string{ 1427 "as octal", 1428 }, 1429 }, 1430 } 1431 1432 for i, c := range cases { 1433 err := c.Tmpl.Validate() 1434 if err != nil { 1435 if !c.Fail { 1436 t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err) 1437 } 1438 1439 e := err.Error() 1440 for _, exp := range c.ContainsErrs { 1441 if !strings.Contains(e, exp) { 1442 t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e) 1443 } 1444 } 1445 } else if c.Fail { 1446 t.Fatalf("Case %d: should have failed: %v", i+1, err) 1447 } 1448 } 1449 } 1450 1451 func TestConstraint_Validate(t *testing.T) { 1452 c := &Constraint{} 1453 err := c.Validate() 1454 mErr := err.(*multierror.Error) 1455 if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") { 1456 t.Fatalf("err: %s", err) 1457 } 1458 1459 c = &Constraint{ 1460 LTarget: "$attr.kernel.name", 1461 RTarget: "linux", 1462 Operand: "=", 1463 } 1464 err = c.Validate() 1465 if err != nil { 1466 t.Fatalf("err: %v", err) 1467 } 1468 1469 // Perform additional regexp validation 1470 c.Operand = ConstraintRegex 1471 c.RTarget = "(foo" 1472 err = c.Validate() 1473 mErr = err.(*multierror.Error) 1474 if !strings.Contains(mErr.Errors[0].Error(), "missing closing") { 1475 t.Fatalf("err: %s", err) 1476 } 1477 1478 // Perform version validation 1479 c.Operand = ConstraintVersion 1480 c.RTarget = "~> foo" 1481 err = c.Validate() 1482 mErr = err.(*multierror.Error) 1483 if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") { 1484 t.Fatalf("err: %s", err) 1485 } 1486 1487 // Perform distinct_property validation 1488 c.Operand = ConstraintDistinctProperty 1489 c.RTarget = "0" 1490 err = c.Validate() 1491 mErr = err.(*multierror.Error) 1492 if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") { 1493 t.Fatalf("err: %s", err) 1494 } 1495 1496 c.RTarget = "-1" 1497 err = c.Validate() 1498 mErr = err.(*multierror.Error) 1499 if !strings.Contains(mErr.Errors[0].Error(), "to uint64") { 1500 t.Fatalf("err: %s", err) 1501 } 1502 1503 // Perform distinct_hosts validation 1504 c.Operand = ConstraintDistinctHosts 1505 c.LTarget = "" 1506 c.RTarget = "" 1507 if err := c.Validate(); err != nil { 1508 t.Fatalf("expected valid constraint: %v", err) 1509 } 1510 1511 // Perform set_contains validation 1512 c.Operand = ConstraintSetContains 1513 c.RTarget = "" 1514 err = c.Validate() 1515 mErr = err.(*multierror.Error) 1516 if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") { 1517 t.Fatalf("err: %s", err) 1518 } 1519 1520 // Perform LTarget validation 1521 c.Operand = ConstraintRegex 1522 c.RTarget = "foo" 1523 c.LTarget = "" 1524 err = c.Validate() 1525 mErr = err.(*multierror.Error) 1526 if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") { 1527 t.Fatalf("err: %s", err) 1528 } 1529 1530 // Perform constraint type validation 1531 c.Operand = "foo" 1532 err = c.Validate() 1533 mErr = err.(*multierror.Error) 1534 if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") { 1535 t.Fatalf("err: %s", err) 1536 } 1537 } 1538 1539 func TestUpdateStrategy_Validate(t *testing.T) { 1540 u := &UpdateStrategy{ 1541 MaxParallel: 0, 1542 HealthCheck: "foo", 1543 MinHealthyTime: -10, 1544 HealthyDeadline: -15, 1545 AutoRevert: false, 1546 Canary: -1, 1547 } 1548 1549 err := u.Validate() 1550 mErr := err.(*multierror.Error) 1551 if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") { 1552 t.Fatalf("err: %s", err) 1553 } 1554 if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") { 1555 t.Fatalf("err: %s", err) 1556 } 1557 if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") { 1558 t.Fatalf("err: %s", err) 1559 } 1560 if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") { 1561 t.Fatalf("err: %s", err) 1562 } 1563 if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") { 1564 t.Fatalf("err: %s", err) 1565 } 1566 if !strings.Contains(mErr.Errors[5].Error(), "Minimum healthy time must be less than healthy deadline") { 1567 t.Fatalf("err: %s", err) 1568 } 1569 } 1570 1571 func TestResource_NetIndex(t *testing.T) { 1572 r := &Resources{ 1573 Networks: []*NetworkResource{ 1574 {Device: "eth0"}, 1575 {Device: "lo0"}, 1576 {Device: ""}, 1577 }, 1578 } 1579 if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 { 1580 t.Fatalf("Bad: %d", idx) 1581 } 1582 if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 { 1583 t.Fatalf("Bad: %d", idx) 1584 } 1585 if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 { 1586 t.Fatalf("Bad: %d", idx) 1587 } 1588 } 1589 1590 func TestResource_Superset(t *testing.T) { 1591 r1 := &Resources{ 1592 CPU: 2000, 1593 MemoryMB: 2048, 1594 DiskMB: 10000, 1595 IOPS: 100, 1596 } 1597 r2 := &Resources{ 1598 CPU: 2000, 1599 MemoryMB: 1024, 1600 DiskMB: 5000, 1601 IOPS: 50, 1602 } 1603 1604 if s, _ := r1.Superset(r1); !s { 1605 t.Fatalf("bad") 1606 } 1607 if s, _ := r1.Superset(r2); !s { 1608 t.Fatalf("bad") 1609 } 1610 if s, _ := r2.Superset(r1); s { 1611 t.Fatalf("bad") 1612 } 1613 if s, _ := r2.Superset(r2); !s { 1614 t.Fatalf("bad") 1615 } 1616 } 1617 1618 func TestResource_Add(t *testing.T) { 1619 r1 := &Resources{ 1620 CPU: 2000, 1621 MemoryMB: 2048, 1622 DiskMB: 10000, 1623 IOPS: 100, 1624 Networks: []*NetworkResource{ 1625 { 1626 CIDR: "10.0.0.0/8", 1627 MBits: 100, 1628 ReservedPorts: []Port{{"ssh", 22}}, 1629 }, 1630 }, 1631 } 1632 r2 := &Resources{ 1633 CPU: 2000, 1634 MemoryMB: 1024, 1635 DiskMB: 5000, 1636 IOPS: 50, 1637 Networks: []*NetworkResource{ 1638 { 1639 IP: "10.0.0.1", 1640 MBits: 50, 1641 ReservedPorts: []Port{{"web", 80}}, 1642 }, 1643 }, 1644 } 1645 1646 err := r1.Add(r2) 1647 if err != nil { 1648 t.Fatalf("Err: %v", err) 1649 } 1650 1651 expect := &Resources{ 1652 CPU: 3000, 1653 MemoryMB: 3072, 1654 DiskMB: 15000, 1655 IOPS: 150, 1656 Networks: []*NetworkResource{ 1657 { 1658 CIDR: "10.0.0.0/8", 1659 MBits: 150, 1660 ReservedPorts: []Port{{"ssh", 22}, {"web", 80}}, 1661 }, 1662 }, 1663 } 1664 1665 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1666 t.Fatalf("bad: %#v %#v", expect, r1) 1667 } 1668 } 1669 1670 func TestResource_Add_Network(t *testing.T) { 1671 r1 := &Resources{} 1672 r2 := &Resources{ 1673 Networks: []*NetworkResource{ 1674 { 1675 MBits: 50, 1676 DynamicPorts: []Port{{"http", 0}, {"https", 0}}, 1677 }, 1678 }, 1679 } 1680 r3 := &Resources{ 1681 Networks: []*NetworkResource{ 1682 { 1683 MBits: 25, 1684 DynamicPorts: []Port{{"admin", 0}}, 1685 }, 1686 }, 1687 } 1688 1689 err := r1.Add(r2) 1690 if err != nil { 1691 t.Fatalf("Err: %v", err) 1692 } 1693 err = r1.Add(r3) 1694 if err != nil { 1695 t.Fatalf("Err: %v", err) 1696 } 1697 1698 expect := &Resources{ 1699 Networks: []*NetworkResource{ 1700 { 1701 MBits: 75, 1702 DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}}, 1703 }, 1704 }, 1705 } 1706 1707 if !reflect.DeepEqual(expect.Networks, r1.Networks) { 1708 t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0]) 1709 } 1710 } 1711 1712 func TestEncodeDecode(t *testing.T) { 1713 type FooRequest struct { 1714 Foo string 1715 Bar int 1716 Baz bool 1717 } 1718 arg := &FooRequest{ 1719 Foo: "test", 1720 Bar: 42, 1721 Baz: true, 1722 } 1723 buf, err := Encode(1, arg) 1724 if err != nil { 1725 t.Fatalf("err: %v", err) 1726 } 1727 1728 var out FooRequest 1729 err = Decode(buf[1:], &out) 1730 if err != nil { 1731 t.Fatalf("err: %v", err) 1732 } 1733 1734 if !reflect.DeepEqual(arg, &out) { 1735 t.Fatalf("bad: %#v %#v", arg, out) 1736 } 1737 } 1738 1739 func BenchmarkEncodeDecode(b *testing.B) { 1740 job := testJob() 1741 1742 for i := 0; i < b.N; i++ { 1743 buf, err := Encode(1, job) 1744 if err != nil { 1745 b.Fatalf("err: %v", err) 1746 } 1747 1748 var out Job 1749 err = Decode(buf[1:], &out) 1750 if err != nil { 1751 b.Fatalf("err: %v", err) 1752 } 1753 } 1754 } 1755 1756 func TestInvalidServiceCheck(t *testing.T) { 1757 s := Service{ 1758 Name: "service-name", 1759 PortLabel: "bar", 1760 Checks: []*ServiceCheck{ 1761 { 1762 Name: "check-name", 1763 Type: "lol", 1764 }, 1765 }, 1766 } 1767 if err := s.Validate(); err == nil { 1768 t.Fatalf("Service should be invalid (invalid type)") 1769 } 1770 1771 s = Service{ 1772 Name: "service.name", 1773 PortLabel: "bar", 1774 } 1775 if err := s.ValidateName(s.Name); err == nil { 1776 t.Fatalf("Service should be invalid (contains a dot): %v", err) 1777 } 1778 1779 s = Service{ 1780 Name: "-my-service", 1781 PortLabel: "bar", 1782 } 1783 if err := s.Validate(); err == nil { 1784 t.Fatalf("Service should be invalid (begins with a hyphen): %v", err) 1785 } 1786 1787 s = Service{ 1788 Name: "my-service-${NOMAD_META_FOO}", 1789 PortLabel: "bar", 1790 } 1791 if err := s.Validate(); err != nil { 1792 t.Fatalf("Service should be valid: %v", err) 1793 } 1794 1795 s = Service{ 1796 Name: "my_service-${NOMAD_META_FOO}", 1797 PortLabel: "bar", 1798 } 1799 if err := s.Validate(); err == nil { 1800 t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err) 1801 } 1802 1803 s = Service{ 1804 Name: "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456", 1805 PortLabel: "bar", 1806 } 1807 if err := s.ValidateName(s.Name); err == nil { 1808 t.Fatalf("Service should be invalid (too long): %v", err) 1809 } 1810 1811 s = Service{ 1812 Name: "service-name", 1813 Checks: []*ServiceCheck{ 1814 { 1815 Name: "check-tcp", 1816 Type: ServiceCheckTCP, 1817 Interval: 5 * time.Second, 1818 Timeout: 2 * time.Second, 1819 }, 1820 { 1821 Name: "check-http", 1822 Type: ServiceCheckHTTP, 1823 Path: "/foo", 1824 Interval: 5 * time.Second, 1825 Timeout: 2 * time.Second, 1826 }, 1827 }, 1828 } 1829 if err := s.Validate(); err == nil { 1830 t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err) 1831 } 1832 1833 s = Service{ 1834 Name: "service-name", 1835 Checks: []*ServiceCheck{ 1836 { 1837 Name: "check-script", 1838 Type: ServiceCheckScript, 1839 Command: "/bin/date", 1840 Interval: 5 * time.Second, 1841 Timeout: 2 * time.Second, 1842 }, 1843 }, 1844 } 1845 if err := s.Validate(); err != nil { 1846 t.Fatalf("un-expected error: %v", err) 1847 } 1848 } 1849 1850 func TestDistinctCheckID(t *testing.T) { 1851 c1 := ServiceCheck{ 1852 Name: "web-health", 1853 Type: "http", 1854 Path: "/health", 1855 Interval: 2 * time.Second, 1856 Timeout: 3 * time.Second, 1857 } 1858 c2 := ServiceCheck{ 1859 Name: "web-health", 1860 Type: "http", 1861 Path: "/health1", 1862 Interval: 2 * time.Second, 1863 Timeout: 3 * time.Second, 1864 } 1865 1866 c3 := ServiceCheck{ 1867 Name: "web-health", 1868 Type: "http", 1869 Path: "/health", 1870 Interval: 4 * time.Second, 1871 Timeout: 3 * time.Second, 1872 } 1873 serviceID := "123" 1874 c1Hash := c1.Hash(serviceID) 1875 c2Hash := c2.Hash(serviceID) 1876 c3Hash := c3.Hash(serviceID) 1877 1878 if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash { 1879 t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash) 1880 } 1881 1882 } 1883 1884 func TestService_Canonicalize(t *testing.T) { 1885 job := "example" 1886 taskGroup := "cache" 1887 task := "redis" 1888 1889 s := Service{ 1890 Name: "${TASK}-db", 1891 } 1892 1893 s.Canonicalize(job, taskGroup, task) 1894 if s.Name != "redis-db" { 1895 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 1896 } 1897 1898 s.Name = "db" 1899 s.Canonicalize(job, taskGroup, task) 1900 if s.Name != "db" { 1901 t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name) 1902 } 1903 1904 s.Name = "${JOB}-${TASKGROUP}-${TASK}-db" 1905 s.Canonicalize(job, taskGroup, task) 1906 if s.Name != "example-cache-redis-db" { 1907 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 1908 } 1909 1910 s.Name = "${BASE}-db" 1911 s.Canonicalize(job, taskGroup, task) 1912 if s.Name != "example-cache-redis-db" { 1913 t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name) 1914 } 1915 1916 } 1917 1918 func TestJob_ExpandServiceNames(t *testing.T) { 1919 j := &Job{ 1920 Name: "my-job", 1921 TaskGroups: []*TaskGroup{ 1922 { 1923 Name: "web", 1924 Tasks: []*Task{ 1925 { 1926 Name: "frontend", 1927 Services: []*Service{ 1928 { 1929 Name: "${BASE}-default", 1930 }, 1931 { 1932 Name: "jmx", 1933 }, 1934 }, 1935 }, 1936 }, 1937 }, 1938 { 1939 Name: "admin", 1940 Tasks: []*Task{ 1941 { 1942 Name: "admin-web", 1943 }, 1944 }, 1945 }, 1946 }, 1947 } 1948 1949 j.Canonicalize() 1950 1951 service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name 1952 if service1Name != "my-job-web-frontend-default" { 1953 t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name) 1954 } 1955 1956 service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name 1957 if service2Name != "jmx" { 1958 t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name) 1959 } 1960 1961 } 1962 1963 func TestPeriodicConfig_EnabledInvalid(t *testing.T) { 1964 // Create a config that is enabled but with no interval specified. 1965 p := &PeriodicConfig{Enabled: true} 1966 if err := p.Validate(); err == nil { 1967 t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid") 1968 } 1969 1970 // Create a config that is enabled, with a spec but no type specified. 1971 p = &PeriodicConfig{Enabled: true, Spec: "foo"} 1972 if err := p.Validate(); err == nil { 1973 t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid") 1974 } 1975 1976 // Create a config that is enabled, with a spec type but no spec specified. 1977 p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron} 1978 if err := p.Validate(); err == nil { 1979 t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid") 1980 } 1981 1982 // Create a config that is enabled, with a bad time zone. 1983 p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"} 1984 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") { 1985 t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err) 1986 } 1987 } 1988 1989 func TestPeriodicConfig_InvalidCron(t *testing.T) { 1990 specs := []string{"foo", "* *", "@foo"} 1991 for _, spec := range specs { 1992 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 1993 p.Canonicalize() 1994 if err := p.Validate(); err == nil { 1995 t.Fatal("Invalid cron spec") 1996 } 1997 } 1998 } 1999 2000 func TestPeriodicConfig_ValidCron(t *testing.T) { 2001 specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"} 2002 for _, spec := range specs { 2003 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2004 p.Canonicalize() 2005 if err := p.Validate(); err != nil { 2006 t.Fatal("Passed valid cron") 2007 } 2008 } 2009 } 2010 2011 func TestPeriodicConfig_NextCron(t *testing.T) { 2012 require := require.New(t) 2013 2014 type testExpectation struct { 2015 Time time.Time 2016 HasError bool 2017 ErrorMsg string 2018 } 2019 2020 from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC) 2021 specs := []string{"0 0 29 2 * 1980", 2022 "*/5 * * * *", 2023 "1 15-0 * * 1-5"} 2024 expected := []*testExpectation{ 2025 { 2026 Time: time.Time{}, 2027 HasError: false, 2028 }, 2029 { 2030 Time: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC), 2031 HasError: false, 2032 }, 2033 { 2034 Time: time.Time{}, 2035 HasError: true, 2036 ErrorMsg: "failed parsing cron expression", 2037 }, 2038 } 2039 2040 for i, spec := range specs { 2041 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec} 2042 p.Canonicalize() 2043 n, err := p.Next(from) 2044 nextExpected := expected[i] 2045 2046 require.Equal(nextExpected.Time, n) 2047 require.Equal(err != nil, nextExpected.HasError) 2048 if err != nil { 2049 require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg)) 2050 } 2051 } 2052 } 2053 2054 func TestPeriodicConfig_ValidTimeZone(t *testing.T) { 2055 zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"} 2056 for _, zone := range zones { 2057 p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone} 2058 p.Canonicalize() 2059 if err := p.Validate(); err != nil { 2060 t.Fatalf("Valid tz errored: %v", err) 2061 } 2062 } 2063 } 2064 2065 func TestPeriodicConfig_DST(t *testing.T) { 2066 require := require.New(t) 2067 2068 // On Sun, Mar 12, 2:00 am 2017: +1 hour UTC 2069 p := &PeriodicConfig{ 2070 Enabled: true, 2071 SpecType: PeriodicSpecCron, 2072 Spec: "0 2 11-12 3 * 2017", 2073 TimeZone: "America/Los_Angeles", 2074 } 2075 p.Canonicalize() 2076 2077 t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location) 2078 t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location) 2079 2080 // E1 is an 8 hour adjustment, E2 is a 7 hour adjustment 2081 e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC) 2082 e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC) 2083 2084 n1, err := p.Next(t1) 2085 require.Nil(err) 2086 2087 n2, err := p.Next(t2) 2088 require.Nil(err) 2089 2090 require.Equal(e1, n1.UTC()) 2091 require.Equal(e2, n2.UTC()) 2092 } 2093 2094 func TestRestartPolicy_Validate(t *testing.T) { 2095 // Policy with acceptable restart options passes 2096 p := &RestartPolicy{ 2097 Mode: RestartPolicyModeFail, 2098 Attempts: 0, 2099 Interval: 5 * time.Second, 2100 } 2101 if err := p.Validate(); err != nil { 2102 t.Fatalf("err: %v", err) 2103 } 2104 2105 // Policy with ambiguous restart options fails 2106 p = &RestartPolicy{ 2107 Mode: RestartPolicyModeDelay, 2108 Attempts: 0, 2109 Interval: 5 * time.Second, 2110 } 2111 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") { 2112 t.Fatalf("expect ambiguity error, got: %v", err) 2113 } 2114 2115 // Bad policy mode fails 2116 p = &RestartPolicy{ 2117 Mode: "nope", 2118 Attempts: 1, 2119 Interval: 5 * time.Second, 2120 } 2121 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") { 2122 t.Fatalf("expect mode error, got: %v", err) 2123 } 2124 2125 // Fails when attempts*delay does not fit inside interval 2126 p = &RestartPolicy{ 2127 Mode: RestartPolicyModeDelay, 2128 Attempts: 3, 2129 Delay: 5 * time.Second, 2130 Interval: 5 * time.Second, 2131 } 2132 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") { 2133 t.Fatalf("expect restart interval error, got: %v", err) 2134 } 2135 2136 // Fails when interval is to small 2137 p = &RestartPolicy{ 2138 Mode: RestartPolicyModeDelay, 2139 Attempts: 3, 2140 Delay: 5 * time.Second, 2141 Interval: 2 * time.Second, 2142 } 2143 if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") { 2144 t.Fatalf("expect interval too small error, got: %v", err) 2145 } 2146 } 2147 2148 func TestReschedulePolicy_Validate(t *testing.T) { 2149 type testCase struct { 2150 desc string 2151 ReschedulePolicy *ReschedulePolicy 2152 errors []error 2153 } 2154 2155 testCases := []testCase{ 2156 { 2157 desc: "Nil", 2158 }, 2159 { 2160 desc: "Disabled", 2161 ReschedulePolicy: &ReschedulePolicy{ 2162 Attempts: 0, 2163 Interval: 0 * time.Second}, 2164 }, 2165 { 2166 desc: "Disabled", 2167 ReschedulePolicy: &ReschedulePolicy{ 2168 Attempts: -1, 2169 Interval: 5 * time.Minute}, 2170 }, 2171 { 2172 desc: "Valid Linear Delay", 2173 ReschedulePolicy: &ReschedulePolicy{ 2174 Attempts: 1, 2175 Interval: 5 * time.Minute, 2176 Delay: 10 * time.Second, 2177 DelayFunction: "constant"}, 2178 }, 2179 { 2180 desc: "Valid Exponential Delay", 2181 ReschedulePolicy: &ReschedulePolicy{ 2182 Attempts: 5, 2183 Interval: 1 * time.Hour, 2184 Delay: 30 * time.Second, 2185 MaxDelay: 5 * time.Minute, 2186 DelayFunction: "exponential"}, 2187 }, 2188 { 2189 desc: "Valid Fibonacci Delay", 2190 ReschedulePolicy: &ReschedulePolicy{ 2191 Attempts: 5, 2192 Interval: 15 * time.Minute, 2193 Delay: 10 * time.Second, 2194 MaxDelay: 5 * time.Minute, 2195 DelayFunction: "fibonacci"}, 2196 }, 2197 { 2198 desc: "Invalid delay function", 2199 ReschedulePolicy: &ReschedulePolicy{ 2200 Attempts: 1, 2201 Interval: 1 * time.Second, 2202 DelayFunction: "blah"}, 2203 errors: []error{ 2204 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2205 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2206 fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions), 2207 }, 2208 }, 2209 { 2210 desc: "Invalid delay ceiling", 2211 ReschedulePolicy: &ReschedulePolicy{ 2212 Attempts: 1, 2213 Interval: 8 * time.Second, 2214 DelayFunction: "exponential", 2215 Delay: 15 * time.Second, 2216 MaxDelay: 5 * time.Second}, 2217 errors: []error{ 2218 fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)", 2219 15*time.Second, 5*time.Second), 2220 }, 2221 }, 2222 { 2223 desc: "Invalid delay and interval", 2224 ReschedulePolicy: &ReschedulePolicy{ 2225 Attempts: 1, 2226 Interval: 1 * time.Second, 2227 DelayFunction: "constant"}, 2228 errors: []error{ 2229 fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second), 2230 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2231 }, 2232 }, { 2233 // Should suggest 2h40m as the interval 2234 desc: "Invalid Attempts - linear delay", 2235 ReschedulePolicy: &ReschedulePolicy{ 2236 Attempts: 10, 2237 Interval: 1 * time.Hour, 2238 Delay: 20 * time.Minute, 2239 DelayFunction: "constant", 2240 }, 2241 errors: []error{ 2242 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+ 2243 " delay function %q", 3, time.Hour, 20*time.Minute, "constant"), 2244 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2245 200*time.Minute, 10), 2246 }, 2247 }, 2248 { 2249 // Should suggest 4h40m as the interval 2250 // Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40} 2251 desc: "Invalid Attempts - exponential delay", 2252 ReschedulePolicy: &ReschedulePolicy{ 2253 Attempts: 10, 2254 Interval: 30 * time.Minute, 2255 Delay: 5 * time.Minute, 2256 MaxDelay: 40 * time.Minute, 2257 DelayFunction: "exponential", 2258 }, 2259 errors: []error{ 2260 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2261 "delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute, 2262 "exponential", 40*time.Minute), 2263 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2264 280*time.Minute, 10), 2265 }, 2266 }, 2267 { 2268 // Should suggest 8h as the interval 2269 // Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80} 2270 desc: "Invalid Attempts - fibonacci delay", 2271 ReschedulePolicy: &ReschedulePolicy{ 2272 Attempts: 10, 2273 Interval: 1 * time.Hour, 2274 Delay: 20 * time.Minute, 2275 MaxDelay: 80 * time.Minute, 2276 DelayFunction: "fibonacci", 2277 }, 2278 errors: []error{ 2279 fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+ 2280 "delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute, 2281 "fibonacci", 80*time.Minute), 2282 fmt.Errorf("Set the interval to at least %v to accommodate %v attempts", 2283 480*time.Minute, 10), 2284 }, 2285 }, 2286 { 2287 desc: "Ambiguous Unlimited config, has both attempts and unlimited set", 2288 ReschedulePolicy: &ReschedulePolicy{ 2289 Attempts: 1, 2290 Unlimited: true, 2291 DelayFunction: "exponential", 2292 Delay: 5 * time.Minute, 2293 MaxDelay: 1 * time.Hour, 2294 }, 2295 errors: []error{ 2296 fmt.Errorf("Interval must be a non zero value if Attempts > 0"), 2297 fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true), 2298 }, 2299 }, 2300 { 2301 desc: "Invalid Unlimited config", 2302 ReschedulePolicy: &ReschedulePolicy{ 2303 Attempts: 1, 2304 Interval: 1 * time.Second, 2305 Unlimited: true, 2306 DelayFunction: "exponential", 2307 }, 2308 errors: []error{ 2309 fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2310 fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second), 2311 }, 2312 }, 2313 { 2314 desc: "Valid Unlimited config", 2315 ReschedulePolicy: &ReschedulePolicy{ 2316 Unlimited: true, 2317 DelayFunction: "exponential", 2318 Delay: 5 * time.Second, 2319 MaxDelay: 1 * time.Hour, 2320 }, 2321 }, 2322 } 2323 2324 for _, tc := range testCases { 2325 t.Run(tc.desc, func(t *testing.T) { 2326 require := require.New(t) 2327 gotErr := tc.ReschedulePolicy.Validate() 2328 if tc.errors != nil { 2329 // Validate all errors 2330 for _, err := range tc.errors { 2331 require.Contains(gotErr.Error(), err.Error()) 2332 } 2333 } else { 2334 require.Nil(gotErr) 2335 } 2336 }) 2337 } 2338 } 2339 2340 func TestAllocation_Index(t *testing.T) { 2341 a1 := Allocation{ 2342 Name: "example.cache[1]", 2343 TaskGroup: "cache", 2344 JobID: "example", 2345 Job: &Job{ 2346 ID: "example", 2347 TaskGroups: []*TaskGroup{{Name: "cache"}}}, 2348 } 2349 e1 := uint(1) 2350 a2 := a1.Copy() 2351 a2.Name = "example.cache[713127]" 2352 e2 := uint(713127) 2353 2354 if a1.Index() != e1 || a2.Index() != e2 { 2355 t.Fatalf("Got %d and %d", a1.Index(), a2.Index()) 2356 } 2357 } 2358 2359 func TestTaskArtifact_Validate_Source(t *testing.T) { 2360 valid := &TaskArtifact{GetterSource: "google.com"} 2361 if err := valid.Validate(); err != nil { 2362 t.Fatalf("unexpected error: %v", err) 2363 } 2364 } 2365 2366 func TestTaskArtifact_Validate_Dest(t *testing.T) { 2367 valid := &TaskArtifact{GetterSource: "google.com"} 2368 if err := valid.Validate(); err != nil { 2369 t.Fatalf("unexpected error: %v", err) 2370 } 2371 2372 valid.RelativeDest = "local/" 2373 if err := valid.Validate(); err != nil { 2374 t.Fatalf("unexpected error: %v", err) 2375 } 2376 2377 valid.RelativeDest = "local/.." 2378 if err := valid.Validate(); err != nil { 2379 t.Fatalf("unexpected error: %v", err) 2380 } 2381 2382 valid.RelativeDest = "local/../../.." 2383 if err := valid.Validate(); err == nil { 2384 t.Fatalf("expected error: %v", err) 2385 } 2386 } 2387 2388 func TestAllocation_ShouldMigrate(t *testing.T) { 2389 alloc := Allocation{ 2390 PreviousAllocation: "123", 2391 TaskGroup: "foo", 2392 Job: &Job{ 2393 TaskGroups: []*TaskGroup{ 2394 { 2395 Name: "foo", 2396 EphemeralDisk: &EphemeralDisk{ 2397 Migrate: true, 2398 Sticky: true, 2399 }, 2400 }, 2401 }, 2402 }, 2403 } 2404 2405 if !alloc.ShouldMigrate() { 2406 t.Fatalf("bad: %v", alloc) 2407 } 2408 2409 alloc1 := Allocation{ 2410 PreviousAllocation: "123", 2411 TaskGroup: "foo", 2412 Job: &Job{ 2413 TaskGroups: []*TaskGroup{ 2414 { 2415 Name: "foo", 2416 EphemeralDisk: &EphemeralDisk{}, 2417 }, 2418 }, 2419 }, 2420 } 2421 2422 if alloc1.ShouldMigrate() { 2423 t.Fatalf("bad: %v", alloc) 2424 } 2425 2426 alloc2 := Allocation{ 2427 PreviousAllocation: "123", 2428 TaskGroup: "foo", 2429 Job: &Job{ 2430 TaskGroups: []*TaskGroup{ 2431 { 2432 Name: "foo", 2433 EphemeralDisk: &EphemeralDisk{ 2434 Sticky: false, 2435 Migrate: true, 2436 }, 2437 }, 2438 }, 2439 }, 2440 } 2441 2442 if alloc2.ShouldMigrate() { 2443 t.Fatalf("bad: %v", alloc) 2444 } 2445 2446 alloc3 := Allocation{ 2447 PreviousAllocation: "123", 2448 TaskGroup: "foo", 2449 Job: &Job{ 2450 TaskGroups: []*TaskGroup{ 2451 { 2452 Name: "foo", 2453 }, 2454 }, 2455 }, 2456 } 2457 2458 if alloc3.ShouldMigrate() { 2459 t.Fatalf("bad: %v", alloc) 2460 } 2461 2462 // No previous 2463 alloc4 := Allocation{ 2464 TaskGroup: "foo", 2465 Job: &Job{ 2466 TaskGroups: []*TaskGroup{ 2467 { 2468 Name: "foo", 2469 EphemeralDisk: &EphemeralDisk{ 2470 Migrate: true, 2471 Sticky: true, 2472 }, 2473 }, 2474 }, 2475 }, 2476 } 2477 2478 if alloc4.ShouldMigrate() { 2479 t.Fatalf("bad: %v", alloc4) 2480 } 2481 } 2482 2483 func TestTaskArtifact_Validate_Checksum(t *testing.T) { 2484 cases := []struct { 2485 Input *TaskArtifact 2486 Err bool 2487 }{ 2488 { 2489 &TaskArtifact{ 2490 GetterSource: "foo.com", 2491 GetterOptions: map[string]string{ 2492 "checksum": "no-type", 2493 }, 2494 }, 2495 true, 2496 }, 2497 { 2498 &TaskArtifact{ 2499 GetterSource: "foo.com", 2500 GetterOptions: map[string]string{ 2501 "checksum": "md5:toosmall", 2502 }, 2503 }, 2504 true, 2505 }, 2506 { 2507 &TaskArtifact{ 2508 GetterSource: "foo.com", 2509 GetterOptions: map[string]string{ 2510 "checksum": "invalid:type", 2511 }, 2512 }, 2513 true, 2514 }, 2515 } 2516 2517 for i, tc := range cases { 2518 err := tc.Input.Validate() 2519 if (err != nil) != tc.Err { 2520 t.Fatalf("case %d: %v", i, err) 2521 continue 2522 } 2523 } 2524 } 2525 2526 func TestAllocation_Terminated(t *testing.T) { 2527 type desiredState struct { 2528 ClientStatus string 2529 DesiredStatus string 2530 Terminated bool 2531 } 2532 2533 harness := []desiredState{ 2534 { 2535 ClientStatus: AllocClientStatusPending, 2536 DesiredStatus: AllocDesiredStatusStop, 2537 Terminated: false, 2538 }, 2539 { 2540 ClientStatus: AllocClientStatusRunning, 2541 DesiredStatus: AllocDesiredStatusStop, 2542 Terminated: false, 2543 }, 2544 { 2545 ClientStatus: AllocClientStatusFailed, 2546 DesiredStatus: AllocDesiredStatusStop, 2547 Terminated: true, 2548 }, 2549 { 2550 ClientStatus: AllocClientStatusFailed, 2551 DesiredStatus: AllocDesiredStatusRun, 2552 Terminated: true, 2553 }, 2554 } 2555 2556 for _, state := range harness { 2557 alloc := Allocation{} 2558 alloc.DesiredStatus = state.DesiredStatus 2559 alloc.ClientStatus = state.ClientStatus 2560 if alloc.Terminated() != state.Terminated { 2561 t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated()) 2562 } 2563 } 2564 } 2565 2566 func TestAllocation_ShouldReschedule(t *testing.T) { 2567 type testCase struct { 2568 Desc string 2569 FailTime time.Time 2570 ClientStatus string 2571 DesiredStatus string 2572 ReschedulePolicy *ReschedulePolicy 2573 RescheduleTrackers []*RescheduleEvent 2574 ShouldReschedule bool 2575 } 2576 2577 fail := time.Now() 2578 2579 harness := []testCase{ 2580 { 2581 Desc: "Reschedule when desired state is stop", 2582 ClientStatus: AllocClientStatusPending, 2583 DesiredStatus: AllocDesiredStatusStop, 2584 FailTime: fail, 2585 ReschedulePolicy: nil, 2586 ShouldReschedule: false, 2587 }, 2588 { 2589 Desc: "Disabled rescheduling", 2590 ClientStatus: AllocClientStatusFailed, 2591 DesiredStatus: AllocDesiredStatusRun, 2592 FailTime: fail, 2593 ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute}, 2594 ShouldReschedule: false, 2595 }, 2596 { 2597 Desc: "Reschedule when client status is complete", 2598 ClientStatus: AllocClientStatusComplete, 2599 DesiredStatus: AllocDesiredStatusRun, 2600 FailTime: fail, 2601 ReschedulePolicy: nil, 2602 ShouldReschedule: false, 2603 }, 2604 { 2605 Desc: "Reschedule with nil reschedule policy", 2606 ClientStatus: AllocClientStatusFailed, 2607 DesiredStatus: AllocDesiredStatusRun, 2608 FailTime: fail, 2609 ReschedulePolicy: nil, 2610 ShouldReschedule: false, 2611 }, 2612 { 2613 Desc: "Reschedule with unlimited and attempts >0", 2614 ClientStatus: AllocClientStatusFailed, 2615 DesiredStatus: AllocDesiredStatusRun, 2616 FailTime: fail, 2617 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true}, 2618 ShouldReschedule: true, 2619 }, 2620 { 2621 Desc: "Reschedule when client status is complete", 2622 ClientStatus: AllocClientStatusComplete, 2623 DesiredStatus: AllocDesiredStatusRun, 2624 FailTime: fail, 2625 ReschedulePolicy: nil, 2626 ShouldReschedule: false, 2627 }, 2628 { 2629 Desc: "Reschedule with policy when client status complete", 2630 ClientStatus: AllocClientStatusComplete, 2631 DesiredStatus: AllocDesiredStatusRun, 2632 FailTime: fail, 2633 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 2634 ShouldReschedule: false, 2635 }, 2636 { 2637 Desc: "Reschedule with no previous attempts", 2638 ClientStatus: AllocClientStatusFailed, 2639 DesiredStatus: AllocDesiredStatusRun, 2640 FailTime: fail, 2641 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute}, 2642 ShouldReschedule: true, 2643 }, 2644 { 2645 Desc: "Reschedule with leftover attempts", 2646 ClientStatus: AllocClientStatusFailed, 2647 DesiredStatus: AllocDesiredStatusRun, 2648 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 2649 FailTime: fail, 2650 RescheduleTrackers: []*RescheduleEvent{ 2651 { 2652 RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(), 2653 }, 2654 }, 2655 ShouldReschedule: true, 2656 }, 2657 { 2658 Desc: "Reschedule with too old previous attempts", 2659 ClientStatus: AllocClientStatusFailed, 2660 DesiredStatus: AllocDesiredStatusRun, 2661 FailTime: fail, 2662 ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute}, 2663 RescheduleTrackers: []*RescheduleEvent{ 2664 { 2665 RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(), 2666 }, 2667 }, 2668 ShouldReschedule: true, 2669 }, 2670 { 2671 Desc: "Reschedule with no leftover attempts", 2672 ClientStatus: AllocClientStatusFailed, 2673 DesiredStatus: AllocDesiredStatusRun, 2674 FailTime: fail, 2675 ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute}, 2676 RescheduleTrackers: []*RescheduleEvent{ 2677 { 2678 RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(), 2679 }, 2680 { 2681 RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(), 2682 }, 2683 }, 2684 ShouldReschedule: false, 2685 }, 2686 } 2687 2688 for _, state := range harness { 2689 alloc := Allocation{} 2690 alloc.DesiredStatus = state.DesiredStatus 2691 alloc.ClientStatus = state.ClientStatus 2692 alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers} 2693 2694 t.Run(state.Desc, func(t *testing.T) { 2695 if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule { 2696 t.Fatalf("expected %v but got %v", state.ShouldReschedule, got) 2697 } 2698 }) 2699 2700 } 2701 } 2702 2703 func TestAllocation_LastEventTime(t *testing.T) { 2704 type testCase struct { 2705 desc string 2706 taskState map[string]*TaskState 2707 expectedLastEventTime time.Time 2708 } 2709 2710 t1 := time.Now().UTC() 2711 2712 testCases := []testCase{ 2713 { 2714 desc: "nil task state", 2715 expectedLastEventTime: t1, 2716 }, 2717 { 2718 desc: "empty task state", 2719 taskState: make(map[string]*TaskState), 2720 expectedLastEventTime: t1, 2721 }, 2722 { 2723 desc: "Finished At not set", 2724 taskState: map[string]*TaskState{"foo": {State: "start", 2725 StartedAt: t1.Add(-2 * time.Hour)}}, 2726 expectedLastEventTime: t1, 2727 }, 2728 { 2729 desc: "One finished ", 2730 taskState: map[string]*TaskState{"foo": {State: "start", 2731 StartedAt: t1.Add(-2 * time.Hour), 2732 FinishedAt: t1.Add(-1 * time.Hour)}}, 2733 expectedLastEventTime: t1.Add(-1 * time.Hour), 2734 }, 2735 { 2736 desc: "Multiple task groups", 2737 taskState: map[string]*TaskState{"foo": {State: "start", 2738 StartedAt: t1.Add(-2 * time.Hour), 2739 FinishedAt: t1.Add(-1 * time.Hour)}, 2740 "bar": {State: "start", 2741 StartedAt: t1.Add(-2 * time.Hour), 2742 FinishedAt: t1.Add(-40 * time.Minute)}}, 2743 expectedLastEventTime: t1.Add(-40 * time.Minute), 2744 }, 2745 { 2746 desc: "No finishedAt set, one task event, should use modify time", 2747 taskState: map[string]*TaskState{"foo": { 2748 State: "run", 2749 StartedAt: t1.Add(-2 * time.Hour), 2750 Events: []*TaskEvent{ 2751 {Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()}, 2752 }}, 2753 }, 2754 expectedLastEventTime: t1, 2755 }, 2756 } 2757 for _, tc := range testCases { 2758 t.Run(tc.desc, func(t *testing.T) { 2759 alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()} 2760 alloc.TaskStates = tc.taskState 2761 require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime()) 2762 }) 2763 } 2764 } 2765 2766 func TestAllocation_NextDelay(t *testing.T) { 2767 type testCase struct { 2768 desc string 2769 reschedulePolicy *ReschedulePolicy 2770 alloc *Allocation 2771 expectedRescheduleTime time.Time 2772 expectedRescheduleEligible bool 2773 } 2774 now := time.Now() 2775 testCases := []testCase{ 2776 { 2777 desc: "Allocation hasn't failed yet", 2778 reschedulePolicy: &ReschedulePolicy{ 2779 DelayFunction: "constant", 2780 Delay: 5 * time.Second, 2781 }, 2782 alloc: &Allocation{}, 2783 expectedRescheduleTime: time.Time{}, 2784 expectedRescheduleEligible: false, 2785 }, 2786 { 2787 desc: "Allocation lacks task state", 2788 reschedulePolicy: &ReschedulePolicy{ 2789 DelayFunction: "constant", 2790 Delay: 5 * time.Second, 2791 Unlimited: true, 2792 }, 2793 alloc: &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()}, 2794 expectedRescheduleTime: now.UTC().Add(5 * time.Second), 2795 expectedRescheduleEligible: true, 2796 }, 2797 { 2798 desc: "linear delay, unlimited restarts, no reschedule tracker", 2799 reschedulePolicy: &ReschedulePolicy{ 2800 DelayFunction: "constant", 2801 Delay: 5 * time.Second, 2802 Unlimited: true, 2803 }, 2804 alloc: &Allocation{ 2805 ClientStatus: AllocClientStatusFailed, 2806 TaskStates: map[string]*TaskState{"foo": {State: "dead", 2807 StartedAt: now.Add(-1 * time.Hour), 2808 FinishedAt: now.Add(-2 * time.Second)}}, 2809 }, 2810 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2811 expectedRescheduleEligible: true, 2812 }, 2813 { 2814 desc: "linear delay with reschedule tracker", 2815 reschedulePolicy: &ReschedulePolicy{ 2816 DelayFunction: "constant", 2817 Delay: 5 * time.Second, 2818 Interval: 10 * time.Minute, 2819 Attempts: 2, 2820 }, 2821 alloc: &Allocation{ 2822 ClientStatus: AllocClientStatusFailed, 2823 TaskStates: map[string]*TaskState{"foo": {State: "start", 2824 StartedAt: now.Add(-1 * time.Hour), 2825 FinishedAt: now.Add(-2 * time.Second)}}, 2826 RescheduleTracker: &RescheduleTracker{ 2827 Events: []*RescheduleEvent{{ 2828 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 2829 Delay: 5 * time.Second, 2830 }}, 2831 }}, 2832 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2833 expectedRescheduleEligible: true, 2834 }, 2835 { 2836 desc: "linear delay with reschedule tracker, attempts exhausted", 2837 reschedulePolicy: &ReschedulePolicy{ 2838 DelayFunction: "constant", 2839 Delay: 5 * time.Second, 2840 Interval: 10 * time.Minute, 2841 Attempts: 2, 2842 }, 2843 alloc: &Allocation{ 2844 ClientStatus: AllocClientStatusFailed, 2845 TaskStates: map[string]*TaskState{"foo": {State: "start", 2846 StartedAt: now.Add(-1 * time.Hour), 2847 FinishedAt: now.Add(-2 * time.Second)}}, 2848 RescheduleTracker: &RescheduleTracker{ 2849 Events: []*RescheduleEvent{ 2850 { 2851 RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(), 2852 Delay: 5 * time.Second, 2853 }, 2854 { 2855 RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(), 2856 Delay: 5 * time.Second, 2857 }, 2858 }, 2859 }}, 2860 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2861 expectedRescheduleEligible: false, 2862 }, 2863 { 2864 desc: "exponential delay - no reschedule tracker", 2865 reschedulePolicy: &ReschedulePolicy{ 2866 DelayFunction: "exponential", 2867 Delay: 5 * time.Second, 2868 MaxDelay: 90 * time.Second, 2869 Unlimited: true, 2870 }, 2871 alloc: &Allocation{ 2872 ClientStatus: AllocClientStatusFailed, 2873 TaskStates: map[string]*TaskState{"foo": {State: "start", 2874 StartedAt: now.Add(-1 * time.Hour), 2875 FinishedAt: now.Add(-2 * time.Second)}}, 2876 }, 2877 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 2878 expectedRescheduleEligible: true, 2879 }, 2880 { 2881 desc: "exponential delay with reschedule tracker", 2882 reschedulePolicy: &ReschedulePolicy{ 2883 DelayFunction: "exponential", 2884 Delay: 5 * time.Second, 2885 MaxDelay: 90 * time.Second, 2886 Unlimited: true, 2887 }, 2888 alloc: &Allocation{ 2889 ClientStatus: AllocClientStatusFailed, 2890 TaskStates: map[string]*TaskState{"foo": {State: "start", 2891 StartedAt: now.Add(-1 * time.Hour), 2892 FinishedAt: now.Add(-2 * time.Second)}}, 2893 RescheduleTracker: &RescheduleTracker{ 2894 Events: []*RescheduleEvent{ 2895 { 2896 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2897 Delay: 5 * time.Second, 2898 }, 2899 { 2900 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2901 Delay: 10 * time.Second, 2902 }, 2903 { 2904 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2905 Delay: 20 * time.Second, 2906 }, 2907 }, 2908 }}, 2909 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 2910 expectedRescheduleEligible: true, 2911 }, 2912 { 2913 desc: "exponential delay with delay ceiling reached", 2914 reschedulePolicy: &ReschedulePolicy{ 2915 DelayFunction: "exponential", 2916 Delay: 5 * time.Second, 2917 MaxDelay: 90 * time.Second, 2918 Unlimited: true, 2919 }, 2920 alloc: &Allocation{ 2921 ClientStatus: AllocClientStatusFailed, 2922 TaskStates: map[string]*TaskState{"foo": {State: "start", 2923 StartedAt: now.Add(-1 * time.Hour), 2924 FinishedAt: now.Add(-15 * time.Second)}}, 2925 RescheduleTracker: &RescheduleTracker{ 2926 Events: []*RescheduleEvent{ 2927 { 2928 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2929 Delay: 5 * time.Second, 2930 }, 2931 { 2932 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2933 Delay: 10 * time.Second, 2934 }, 2935 { 2936 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2937 Delay: 20 * time.Second, 2938 }, 2939 { 2940 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2941 Delay: 40 * time.Second, 2942 }, 2943 { 2944 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 2945 Delay: 80 * time.Second, 2946 }, 2947 }, 2948 }}, 2949 expectedRescheduleTime: now.Add(-15 * time.Second).Add(90 * time.Second), 2950 expectedRescheduleEligible: true, 2951 }, 2952 { 2953 // Test case where most recent reschedule ran longer than delay ceiling 2954 desc: "exponential delay, delay ceiling reset condition met", 2955 reschedulePolicy: &ReschedulePolicy{ 2956 DelayFunction: "exponential", 2957 Delay: 5 * time.Second, 2958 MaxDelay: 90 * time.Second, 2959 Unlimited: true, 2960 }, 2961 alloc: &Allocation{ 2962 ClientStatus: AllocClientStatusFailed, 2963 TaskStates: map[string]*TaskState{"foo": {State: "start", 2964 StartedAt: now.Add(-1 * time.Hour), 2965 FinishedAt: now.Add(-15 * time.Minute)}}, 2966 RescheduleTracker: &RescheduleTracker{ 2967 Events: []*RescheduleEvent{ 2968 { 2969 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 2970 Delay: 5 * time.Second, 2971 }, 2972 { 2973 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2974 Delay: 10 * time.Second, 2975 }, 2976 { 2977 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2978 Delay: 20 * time.Second, 2979 }, 2980 { 2981 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2982 Delay: 40 * time.Second, 2983 }, 2984 { 2985 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2986 Delay: 80 * time.Second, 2987 }, 2988 { 2989 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2990 Delay: 90 * time.Second, 2991 }, 2992 { 2993 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 2994 Delay: 90 * time.Second, 2995 }, 2996 }, 2997 }}, 2998 expectedRescheduleTime: now.Add(-15 * time.Minute).Add(5 * time.Second), 2999 expectedRescheduleEligible: true, 3000 }, 3001 { 3002 desc: "fibonacci delay - no reschedule tracker", 3003 reschedulePolicy: &ReschedulePolicy{ 3004 DelayFunction: "fibonacci", 3005 Delay: 5 * time.Second, 3006 MaxDelay: 90 * time.Second, 3007 Unlimited: true, 3008 }, 3009 alloc: &Allocation{ 3010 ClientStatus: AllocClientStatusFailed, 3011 TaskStates: map[string]*TaskState{"foo": {State: "start", 3012 StartedAt: now.Add(-1 * time.Hour), 3013 FinishedAt: now.Add(-2 * time.Second)}}}, 3014 expectedRescheduleTime: now.Add(-2 * time.Second).Add(5 * time.Second), 3015 expectedRescheduleEligible: true, 3016 }, 3017 { 3018 desc: "fibonacci delay with reschedule tracker", 3019 reschedulePolicy: &ReschedulePolicy{ 3020 DelayFunction: "fibonacci", 3021 Delay: 5 * time.Second, 3022 MaxDelay: 90 * time.Second, 3023 Unlimited: true, 3024 }, 3025 alloc: &Allocation{ 3026 ClientStatus: AllocClientStatusFailed, 3027 TaskStates: map[string]*TaskState{"foo": {State: "start", 3028 StartedAt: now.Add(-1 * time.Hour), 3029 FinishedAt: now.Add(-2 * time.Second)}}, 3030 RescheduleTracker: &RescheduleTracker{ 3031 Events: []*RescheduleEvent{ 3032 { 3033 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3034 Delay: 5 * time.Second, 3035 }, 3036 { 3037 RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(), 3038 Delay: 5 * time.Second, 3039 }, 3040 }, 3041 }}, 3042 expectedRescheduleTime: now.Add(-2 * time.Second).Add(10 * time.Second), 3043 expectedRescheduleEligible: true, 3044 }, 3045 { 3046 desc: "fibonacci delay with more events", 3047 reschedulePolicy: &ReschedulePolicy{ 3048 DelayFunction: "fibonacci", 3049 Delay: 5 * time.Second, 3050 MaxDelay: 90 * time.Second, 3051 Unlimited: true, 3052 }, 3053 alloc: &Allocation{ 3054 ClientStatus: AllocClientStatusFailed, 3055 TaskStates: map[string]*TaskState{"foo": {State: "start", 3056 StartedAt: now.Add(-1 * time.Hour), 3057 FinishedAt: now.Add(-2 * time.Second)}}, 3058 RescheduleTracker: &RescheduleTracker{ 3059 Events: []*RescheduleEvent{ 3060 { 3061 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3062 Delay: 5 * time.Second, 3063 }, 3064 { 3065 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3066 Delay: 5 * time.Second, 3067 }, 3068 { 3069 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3070 Delay: 10 * time.Second, 3071 }, 3072 { 3073 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3074 Delay: 15 * time.Second, 3075 }, 3076 { 3077 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3078 Delay: 25 * time.Second, 3079 }, 3080 }, 3081 }}, 3082 expectedRescheduleTime: now.Add(-2 * time.Second).Add(40 * time.Second), 3083 expectedRescheduleEligible: true, 3084 }, 3085 { 3086 desc: "fibonacci delay with delay ceiling reached", 3087 reschedulePolicy: &ReschedulePolicy{ 3088 DelayFunction: "fibonacci", 3089 Delay: 5 * time.Second, 3090 MaxDelay: 50 * time.Second, 3091 Unlimited: true, 3092 }, 3093 alloc: &Allocation{ 3094 ClientStatus: AllocClientStatusFailed, 3095 TaskStates: map[string]*TaskState{"foo": {State: "start", 3096 StartedAt: now.Add(-1 * time.Hour), 3097 FinishedAt: now.Add(-15 * time.Second)}}, 3098 RescheduleTracker: &RescheduleTracker{ 3099 Events: []*RescheduleEvent{ 3100 { 3101 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3102 Delay: 5 * time.Second, 3103 }, 3104 { 3105 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3106 Delay: 5 * time.Second, 3107 }, 3108 { 3109 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3110 Delay: 10 * time.Second, 3111 }, 3112 { 3113 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3114 Delay: 15 * time.Second, 3115 }, 3116 { 3117 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3118 Delay: 25 * time.Second, 3119 }, 3120 { 3121 RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(), 3122 Delay: 40 * time.Second, 3123 }, 3124 }, 3125 }}, 3126 expectedRescheduleTime: now.Add(-15 * time.Second).Add(50 * time.Second), 3127 expectedRescheduleEligible: true, 3128 }, 3129 { 3130 desc: "fibonacci delay with delay reset condition met", 3131 reschedulePolicy: &ReschedulePolicy{ 3132 DelayFunction: "fibonacci", 3133 Delay: 5 * time.Second, 3134 MaxDelay: 50 * time.Second, 3135 Unlimited: true, 3136 }, 3137 alloc: &Allocation{ 3138 ClientStatus: AllocClientStatusFailed, 3139 TaskStates: map[string]*TaskState{"foo": {State: "start", 3140 StartedAt: now.Add(-1 * time.Hour), 3141 FinishedAt: now.Add(-5 * time.Minute)}}, 3142 RescheduleTracker: &RescheduleTracker{ 3143 Events: []*RescheduleEvent{ 3144 { 3145 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3146 Delay: 5 * time.Second, 3147 }, 3148 { 3149 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3150 Delay: 5 * time.Second, 3151 }, 3152 { 3153 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3154 Delay: 10 * time.Second, 3155 }, 3156 { 3157 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3158 Delay: 15 * time.Second, 3159 }, 3160 { 3161 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3162 Delay: 25 * time.Second, 3163 }, 3164 { 3165 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3166 Delay: 40 * time.Second, 3167 }, 3168 }, 3169 }}, 3170 expectedRescheduleTime: now.Add(-5 * time.Minute).Add(5 * time.Second), 3171 expectedRescheduleEligible: true, 3172 }, 3173 { 3174 desc: "fibonacci delay with the most recent event that reset delay value", 3175 reschedulePolicy: &ReschedulePolicy{ 3176 DelayFunction: "fibonacci", 3177 Delay: 5 * time.Second, 3178 MaxDelay: 50 * time.Second, 3179 Unlimited: true, 3180 }, 3181 alloc: &Allocation{ 3182 ClientStatus: AllocClientStatusFailed, 3183 TaskStates: map[string]*TaskState{"foo": {State: "start", 3184 StartedAt: now.Add(-1 * time.Hour), 3185 FinishedAt: now.Add(-5 * time.Second)}}, 3186 RescheduleTracker: &RescheduleTracker{ 3187 Events: []*RescheduleEvent{ 3188 { 3189 RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(), 3190 Delay: 5 * time.Second, 3191 }, 3192 { 3193 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3194 Delay: 5 * time.Second, 3195 }, 3196 { 3197 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3198 Delay: 10 * time.Second, 3199 }, 3200 { 3201 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3202 Delay: 15 * time.Second, 3203 }, 3204 { 3205 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3206 Delay: 25 * time.Second, 3207 }, 3208 { 3209 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3210 Delay: 40 * time.Second, 3211 }, 3212 { 3213 RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(), 3214 Delay: 50 * time.Second, 3215 }, 3216 { 3217 RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(), 3218 Delay: 5 * time.Second, 3219 }, 3220 }, 3221 }}, 3222 expectedRescheduleTime: now.Add(-5 * time.Second).Add(5 * time.Second), 3223 expectedRescheduleEligible: true, 3224 }, 3225 } 3226 for _, tc := range testCases { 3227 t.Run(tc.desc, func(t *testing.T) { 3228 require := require.New(t) 3229 j := testJob() 3230 j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy 3231 tc.alloc.Job = j 3232 tc.alloc.TaskGroup = j.TaskGroups[0].Name 3233 reschedTime, allowed := tc.alloc.NextRescheduleTime() 3234 require.Equal(tc.expectedRescheduleEligible, allowed) 3235 require.Equal(tc.expectedRescheduleTime, reschedTime) 3236 }) 3237 } 3238 3239 } 3240 3241 func TestRescheduleTracker_Copy(t *testing.T) { 3242 type testCase struct { 3243 original *RescheduleTracker 3244 expected *RescheduleTracker 3245 } 3246 3247 cases := []testCase{ 3248 {nil, nil}, 3249 {&RescheduleTracker{Events: []*RescheduleEvent{ 3250 {RescheduleTime: 2, 3251 PrevAllocID: "12", 3252 PrevNodeID: "12", 3253 Delay: 30 * time.Second}, 3254 }}, &RescheduleTracker{Events: []*RescheduleEvent{ 3255 {RescheduleTime: 2, 3256 PrevAllocID: "12", 3257 PrevNodeID: "12", 3258 Delay: 30 * time.Second}, 3259 }}}, 3260 } 3261 3262 for _, tc := range cases { 3263 if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) { 3264 t.Fatalf("expected %v but got %v", *tc.expected, *got) 3265 } 3266 } 3267 } 3268 3269 func TestVault_Validate(t *testing.T) { 3270 v := &Vault{ 3271 Env: true, 3272 ChangeMode: VaultChangeModeNoop, 3273 } 3274 3275 if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") { 3276 t.Fatalf("Expected policy list empty error") 3277 } 3278 3279 v.Policies = []string{"foo", "root"} 3280 v.ChangeMode = VaultChangeModeSignal 3281 3282 err := v.Validate() 3283 if err == nil { 3284 t.Fatalf("Expected validation errors") 3285 } 3286 3287 if !strings.Contains(err.Error(), "Signal must") { 3288 t.Fatalf("Expected signal empty error") 3289 } 3290 if !strings.Contains(err.Error(), "root") { 3291 t.Fatalf("Expected root error") 3292 } 3293 } 3294 3295 func TestParameterizedJobConfig_Validate(t *testing.T) { 3296 d := &ParameterizedJobConfig{ 3297 Payload: "foo", 3298 } 3299 3300 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") { 3301 t.Fatalf("Expected unknown payload requirement: %v", err) 3302 } 3303 3304 d.Payload = DispatchPayloadOptional 3305 d.MetaOptional = []string{"foo", "bar"} 3306 d.MetaRequired = []string{"bar", "baz"} 3307 3308 if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") { 3309 t.Fatalf("Expected meta not being disjoint error: %v", err) 3310 } 3311 } 3312 3313 func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { 3314 job := testJob() 3315 job.ParameterizedJob = &ParameterizedJobConfig{ 3316 Payload: DispatchPayloadOptional, 3317 } 3318 job.Type = JobTypeSystem 3319 3320 if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") { 3321 t.Fatalf("Expected bad scheduler tpye: %v", err) 3322 } 3323 } 3324 3325 func TestParameterizedJobConfig_Canonicalize(t *testing.T) { 3326 d := &ParameterizedJobConfig{} 3327 d.Canonicalize() 3328 if d.Payload != DispatchPayloadOptional { 3329 t.Fatalf("Canonicalize failed") 3330 } 3331 } 3332 3333 func TestDispatchPayloadConfig_Validate(t *testing.T) { 3334 d := &DispatchPayloadConfig{ 3335 File: "foo", 3336 } 3337 3338 // task/local/haha 3339 if err := d.Validate(); err != nil { 3340 t.Fatalf("bad: %v", err) 3341 } 3342 3343 // task/haha 3344 d.File = "../haha" 3345 if err := d.Validate(); err != nil { 3346 t.Fatalf("bad: %v", err) 3347 } 3348 3349 // ../haha 3350 d.File = "../../../haha" 3351 if err := d.Validate(); err == nil { 3352 t.Fatalf("bad: %v", err) 3353 } 3354 } 3355 3356 func TestIsRecoverable(t *testing.T) { 3357 if IsRecoverable(nil) { 3358 t.Errorf("nil should not be recoverable") 3359 } 3360 if IsRecoverable(NewRecoverableError(nil, true)) { 3361 t.Errorf("NewRecoverableError(nil, true) should not be recoverable") 3362 } 3363 if IsRecoverable(fmt.Errorf("i promise im recoverable")) { 3364 t.Errorf("Custom errors should not be recoverable") 3365 } 3366 if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) { 3367 t.Errorf("Explicitly unrecoverable errors should not be recoverable") 3368 } 3369 if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) { 3370 t.Errorf("Explicitly recoverable errors *should* be recoverable") 3371 } 3372 } 3373 3374 func TestACLTokenValidate(t *testing.T) { 3375 tk := &ACLToken{} 3376 3377 // Missing a type 3378 err := tk.Validate() 3379 assert.NotNil(t, err) 3380 if !strings.Contains(err.Error(), "client or management") { 3381 t.Fatalf("bad: %v", err) 3382 } 3383 3384 // Missing policies 3385 tk.Type = ACLClientToken 3386 err = tk.Validate() 3387 assert.NotNil(t, err) 3388 if !strings.Contains(err.Error(), "missing policies") { 3389 t.Fatalf("bad: %v", err) 3390 } 3391 3392 // Invalid policies 3393 tk.Type = ACLManagementToken 3394 tk.Policies = []string{"foo"} 3395 err = tk.Validate() 3396 assert.NotNil(t, err) 3397 if !strings.Contains(err.Error(), "associated with policies") { 3398 t.Fatalf("bad: %v", err) 3399 } 3400 3401 // Name too long policies 3402 tk.Name = "" 3403 for i := 0; i < 8; i++ { 3404 tk.Name += uuid.Generate() 3405 } 3406 tk.Policies = nil 3407 err = tk.Validate() 3408 assert.NotNil(t, err) 3409 if !strings.Contains(err.Error(), "too long") { 3410 t.Fatalf("bad: %v", err) 3411 } 3412 3413 // Make it valid 3414 tk.Name = "foo" 3415 err = tk.Validate() 3416 assert.Nil(t, err) 3417 } 3418 3419 func TestACLTokenPolicySubset(t *testing.T) { 3420 tk := &ACLToken{ 3421 Type: ACLClientToken, 3422 Policies: []string{"foo", "bar", "baz"}, 3423 } 3424 3425 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3426 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3427 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3428 assert.Equal(t, true, tk.PolicySubset([]string{})) 3429 assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"})) 3430 assert.Equal(t, false, tk.PolicySubset([]string{"new"})) 3431 3432 tk = &ACLToken{ 3433 Type: ACLManagementToken, 3434 } 3435 3436 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"})) 3437 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"})) 3438 assert.Equal(t, true, tk.PolicySubset([]string{"foo"})) 3439 assert.Equal(t, true, tk.PolicySubset([]string{})) 3440 assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"})) 3441 assert.Equal(t, true, tk.PolicySubset([]string{"new"})) 3442 } 3443 3444 func TestACLTokenSetHash(t *testing.T) { 3445 tk := &ACLToken{ 3446 Name: "foo", 3447 Type: ACLClientToken, 3448 Policies: []string{"foo", "bar"}, 3449 Global: false, 3450 } 3451 out1 := tk.SetHash() 3452 assert.NotNil(t, out1) 3453 assert.NotNil(t, tk.Hash) 3454 assert.Equal(t, out1, tk.Hash) 3455 3456 tk.Policies = []string{"foo"} 3457 out2 := tk.SetHash() 3458 assert.NotNil(t, out2) 3459 assert.NotNil(t, tk.Hash) 3460 assert.Equal(t, out2, tk.Hash) 3461 assert.NotEqual(t, out1, out2) 3462 } 3463 3464 func TestACLPolicySetHash(t *testing.T) { 3465 ap := &ACLPolicy{ 3466 Name: "foo", 3467 Description: "great policy", 3468 Rules: "node { policy = \"read\" }", 3469 } 3470 out1 := ap.SetHash() 3471 assert.NotNil(t, out1) 3472 assert.NotNil(t, ap.Hash) 3473 assert.Equal(t, out1, ap.Hash) 3474 3475 ap.Rules = "node { policy = \"write\" }" 3476 out2 := ap.SetHash() 3477 assert.NotNil(t, out2) 3478 assert.NotNil(t, ap.Hash) 3479 assert.Equal(t, out2, ap.Hash) 3480 assert.NotEqual(t, out1, out2) 3481 } 3482 3483 func TestTaskEventPopulate(t *testing.T) { 3484 prepopulatedEvent := NewTaskEvent(TaskSetup) 3485 prepopulatedEvent.DisplayMessage = "Hola" 3486 testcases := []struct { 3487 event *TaskEvent 3488 expectedMsg string 3489 }{ 3490 {nil, ""}, 3491 {prepopulatedEvent, "Hola"}, 3492 {NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"}, 3493 {NewTaskEvent(TaskStarted), "Task started by client"}, 3494 {NewTaskEvent(TaskReceived), "Task received by client"}, 3495 {NewTaskEvent(TaskFailedValidation), "Validation of task failed"}, 3496 {NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"}, 3497 {NewTaskEvent(TaskSetupFailure), "Task setup failed"}, 3498 {NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"}, 3499 {NewTaskEvent(TaskDriverFailure), "Failed to start task"}, 3500 {NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"}, 3501 {NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"}, 3502 {NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"}, 3503 {NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"}, 3504 {NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"}, 3505 {NewTaskEvent(TaskKilling), "Sent interrupt"}, 3506 {NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"}, 3507 {NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"}, 3508 {NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"}, 3509 {NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""}, 3510 {NewTaskEvent(TaskKilled), "Task successfully killed"}, 3511 {NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"}, 3512 {NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"}, 3513 {NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"}, 3514 {NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"}, 3515 {NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"}, 3516 {NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"}, 3517 {NewTaskEvent(TaskSignaling), "Task being sent a signal"}, 3518 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"}, 3519 {NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"}, 3520 {NewTaskEvent(TaskRestartSignal), "Task signaled to restart"}, 3521 {NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"}, 3522 {NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"}, 3523 {NewTaskEvent("Unknown Type, No message"), ""}, 3524 {NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"}, 3525 } 3526 3527 for _, tc := range testcases { 3528 tc.event.PopulateEventDisplayMessage() 3529 if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg { 3530 t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage) 3531 } 3532 } 3533 } 3534 3535 func TestNetworkResourcesEquals(t *testing.T) { 3536 require := require.New(t) 3537 var networkResourcesTest = []struct { 3538 input []*NetworkResource 3539 expected bool 3540 errorMsg string 3541 }{ 3542 { 3543 []*NetworkResource{ 3544 { 3545 IP: "10.0.0.1", 3546 MBits: 50, 3547 ReservedPorts: []Port{{"web", 80}}, 3548 }, 3549 { 3550 IP: "10.0.0.1", 3551 MBits: 50, 3552 ReservedPorts: []Port{{"web", 80}}, 3553 }, 3554 }, 3555 true, 3556 "Equal network resources should return true", 3557 }, 3558 { 3559 []*NetworkResource{ 3560 { 3561 IP: "10.0.0.0", 3562 MBits: 50, 3563 ReservedPorts: []Port{{"web", 80}}, 3564 }, 3565 { 3566 IP: "10.0.0.1", 3567 MBits: 50, 3568 ReservedPorts: []Port{{"web", 80}}, 3569 }, 3570 }, 3571 false, 3572 "Different IP addresses should return false", 3573 }, 3574 { 3575 []*NetworkResource{ 3576 { 3577 IP: "10.0.0.1", 3578 MBits: 40, 3579 ReservedPorts: []Port{{"web", 80}}, 3580 }, 3581 { 3582 IP: "10.0.0.1", 3583 MBits: 50, 3584 ReservedPorts: []Port{{"web", 80}}, 3585 }, 3586 }, 3587 false, 3588 "Different MBits values should return false", 3589 }, 3590 { 3591 []*NetworkResource{ 3592 { 3593 IP: "10.0.0.1", 3594 MBits: 50, 3595 ReservedPorts: []Port{{"web", 80}}, 3596 }, 3597 { 3598 IP: "10.0.0.1", 3599 MBits: 50, 3600 ReservedPorts: []Port{{"web", 80}, {"web", 80}}, 3601 }, 3602 }, 3603 false, 3604 "Different ReservedPorts lengths should return false", 3605 }, 3606 { 3607 []*NetworkResource{ 3608 { 3609 IP: "10.0.0.1", 3610 MBits: 50, 3611 ReservedPorts: []Port{{"web", 80}}, 3612 }, 3613 { 3614 IP: "10.0.0.1", 3615 MBits: 50, 3616 ReservedPorts: []Port{}, 3617 }, 3618 }, 3619 false, 3620 "Empty and non empty ReservedPorts values should return false", 3621 }, 3622 { 3623 []*NetworkResource{ 3624 { 3625 IP: "10.0.0.1", 3626 MBits: 50, 3627 ReservedPorts: []Port{{"web", 80}}, 3628 }, 3629 { 3630 IP: "10.0.0.1", 3631 MBits: 50, 3632 ReservedPorts: []Port{{"notweb", 80}}, 3633 }, 3634 }, 3635 false, 3636 "Different valued ReservedPorts values should return false", 3637 }, 3638 { 3639 []*NetworkResource{ 3640 { 3641 IP: "10.0.0.1", 3642 MBits: 50, 3643 DynamicPorts: []Port{{"web", 80}}, 3644 }, 3645 { 3646 IP: "10.0.0.1", 3647 MBits: 50, 3648 DynamicPorts: []Port{{"web", 80}, {"web", 80}}, 3649 }, 3650 }, 3651 false, 3652 "Different DynamicPorts lengths should return false", 3653 }, 3654 { 3655 []*NetworkResource{ 3656 { 3657 IP: "10.0.0.1", 3658 MBits: 50, 3659 DynamicPorts: []Port{{"web", 80}}, 3660 }, 3661 { 3662 IP: "10.0.0.1", 3663 MBits: 50, 3664 DynamicPorts: []Port{}, 3665 }, 3666 }, 3667 false, 3668 "Empty and non empty DynamicPorts values should return false", 3669 }, 3670 { 3671 []*NetworkResource{ 3672 { 3673 IP: "10.0.0.1", 3674 MBits: 50, 3675 DynamicPorts: []Port{{"web", 80}}, 3676 }, 3677 { 3678 IP: "10.0.0.1", 3679 MBits: 50, 3680 DynamicPorts: []Port{{"notweb", 80}}, 3681 }, 3682 }, 3683 false, 3684 "Different valued DynamicPorts values should return false", 3685 }, 3686 } 3687 for _, testCase := range networkResourcesTest { 3688 first := testCase.input[0] 3689 second := testCase.input[1] 3690 require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg) 3691 } 3692 } 3693 3694 func TestNode_Canonicalize(t *testing.T) { 3695 t.Parallel() 3696 require := require.New(t) 3697 3698 // Make sure the eligiblity is set properly 3699 node := &Node{} 3700 node.Canonicalize() 3701 require.Equal(NodeSchedulingEligible, node.SchedulingEligibility) 3702 3703 node = &Node{ 3704 Drain: true, 3705 } 3706 node.Canonicalize() 3707 require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility) 3708 } 3709 3710 func TestNode_Copy(t *testing.T) { 3711 t.Parallel() 3712 require := require.New(t) 3713 3714 node := &Node{ 3715 ID: uuid.Generate(), 3716 SecretID: uuid.Generate(), 3717 Datacenter: "dc1", 3718 Name: "foobar", 3719 Attributes: map[string]string{ 3720 "kernel.name": "linux", 3721 "arch": "x86", 3722 "nomad.version": "0.5.0", 3723 "driver.exec": "1", 3724 "driver.mock_driver": "1", 3725 }, 3726 Resources: &Resources{ 3727 CPU: 4000, 3728 MemoryMB: 8192, 3729 DiskMB: 100 * 1024, 3730 IOPS: 150, 3731 Networks: []*NetworkResource{ 3732 { 3733 Device: "eth0", 3734 CIDR: "192.168.0.100/32", 3735 MBits: 1000, 3736 }, 3737 }, 3738 }, 3739 Reserved: &Resources{ 3740 CPU: 100, 3741 MemoryMB: 256, 3742 DiskMB: 4 * 1024, 3743 Networks: []*NetworkResource{ 3744 { 3745 Device: "eth0", 3746 IP: "192.168.0.100", 3747 ReservedPorts: []Port{{Label: "ssh", Value: 22}}, 3748 MBits: 1, 3749 }, 3750 }, 3751 }, 3752 Links: map[string]string{ 3753 "consul": "foobar.dc1", 3754 }, 3755 Meta: map[string]string{ 3756 "pci-dss": "true", 3757 "database": "mysql", 3758 "version": "5.6", 3759 }, 3760 NodeClass: "linux-medium-pci", 3761 Status: NodeStatusReady, 3762 SchedulingEligibility: NodeSchedulingEligible, 3763 Drivers: map[string]*DriverInfo{ 3764 "mock_driver": { 3765 Attributes: map[string]string{"running": "1"}, 3766 Detected: true, 3767 Healthy: true, 3768 HealthDescription: "Currently active", 3769 UpdateTime: time.Now(), 3770 }, 3771 }, 3772 } 3773 node.ComputeClass() 3774 3775 node2 := node.Copy() 3776 3777 require.Equal(node.Attributes, node2.Attributes) 3778 require.Equal(node.Resources, node2.Resources) 3779 require.Equal(node.Reserved, node2.Reserved) 3780 require.Equal(node.Links, node2.Links) 3781 require.Equal(node.Meta, node2.Meta) 3782 require.Equal(node.Events, node2.Events) 3783 require.Equal(node.DrainStrategy, node2.DrainStrategy) 3784 require.Equal(node.Drivers, node2.Drivers) 3785 }