github.com/zoomfoo/nomad@v0.8.5-0.20180907175415-f28fd3a1a056/nomad/structs/structs_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"reflect"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/hashicorp/consul/api"
    12  	"github.com/hashicorp/go-multierror"
    13  	"github.com/hashicorp/nomad/helper/uuid"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  )
    17  
    18  func TestJob_Validate(t *testing.T) {
    19  	j := &Job{}
    20  	err := j.Validate()
    21  	mErr := err.(*multierror.Error)
    22  	if !strings.Contains(mErr.Errors[0].Error(), "job region") {
    23  		t.Fatalf("err: %s", err)
    24  	}
    25  	if !strings.Contains(mErr.Errors[1].Error(), "job ID") {
    26  		t.Fatalf("err: %s", err)
    27  	}
    28  	if !strings.Contains(mErr.Errors[2].Error(), "job name") {
    29  		t.Fatalf("err: %s", err)
    30  	}
    31  	if !strings.Contains(mErr.Errors[3].Error(), "namespace") {
    32  		t.Fatalf("err: %s", err)
    33  	}
    34  	if !strings.Contains(mErr.Errors[4].Error(), "job type") {
    35  		t.Fatalf("err: %s", err)
    36  	}
    37  	if !strings.Contains(mErr.Errors[5].Error(), "priority") {
    38  		t.Fatalf("err: %s", err)
    39  	}
    40  	if !strings.Contains(mErr.Errors[6].Error(), "datacenters") {
    41  		t.Fatalf("err: %s", err)
    42  	}
    43  	if !strings.Contains(mErr.Errors[7].Error(), "task groups") {
    44  		t.Fatalf("err: %s", err)
    45  	}
    46  
    47  	j = &Job{
    48  		Type: "invalid-job-type",
    49  	}
    50  	err = j.Validate()
    51  	if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) {
    52  		t.Errorf("expected %s but found: %v", expected, err)
    53  	}
    54  
    55  	j = &Job{
    56  		Type: JobTypeService,
    57  		Periodic: &PeriodicConfig{
    58  			Enabled: true,
    59  		},
    60  	}
    61  	err = j.Validate()
    62  	mErr = err.(*multierror.Error)
    63  	if !strings.Contains(mErr.Error(), "Periodic") {
    64  		t.Fatalf("err: %s", err)
    65  	}
    66  
    67  	j = &Job{
    68  		Region:      "global",
    69  		ID:          uuid.Generate(),
    70  		Namespace:   "test",
    71  		Name:        "my-job",
    72  		Type:        JobTypeService,
    73  		Priority:    50,
    74  		Datacenters: []string{"dc1"},
    75  		TaskGroups: []*TaskGroup{
    76  			{
    77  				Name: "web",
    78  				RestartPolicy: &RestartPolicy{
    79  					Interval: 5 * time.Minute,
    80  					Delay:    10 * time.Second,
    81  					Attempts: 10,
    82  				},
    83  			},
    84  			{
    85  				Name: "web",
    86  				RestartPolicy: &RestartPolicy{
    87  					Interval: 5 * time.Minute,
    88  					Delay:    10 * time.Second,
    89  					Attempts: 10,
    90  				},
    91  			},
    92  			{
    93  				RestartPolicy: &RestartPolicy{
    94  					Interval: 5 * time.Minute,
    95  					Delay:    10 * time.Second,
    96  					Attempts: 10,
    97  				},
    98  			},
    99  		},
   100  	}
   101  	err = j.Validate()
   102  	mErr = err.(*multierror.Error)
   103  	if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") {
   104  		t.Fatalf("err: %s", err)
   105  	}
   106  	if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") {
   107  		t.Fatalf("err: %s", err)
   108  	}
   109  	if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") {
   110  		t.Fatalf("err: %s", err)
   111  	}
   112  }
   113  
   114  func TestJob_Warnings(t *testing.T) {
   115  	cases := []struct {
   116  		Name     string
   117  		Job      *Job
   118  		Expected []string
   119  	}{
   120  		{
   121  			Name:     "Higher counts for update stanza",
   122  			Expected: []string{"max parallel count is greater"},
   123  			Job: &Job{
   124  				Type: JobTypeService,
   125  				TaskGroups: []*TaskGroup{
   126  					{
   127  						Name:  "foo",
   128  						Count: 2,
   129  						Update: &UpdateStrategy{
   130  							MaxParallel: 10,
   131  						},
   132  					},
   133  				},
   134  			},
   135  		},
   136  	}
   137  
   138  	for _, c := range cases {
   139  		t.Run(c.Name, func(t *testing.T) {
   140  			warnings := c.Job.Warnings()
   141  			if warnings == nil {
   142  				if len(c.Expected) == 0 {
   143  					return
   144  				} else {
   145  					t.Fatal("Got no warnings when they were expected")
   146  				}
   147  			}
   148  
   149  			a := warnings.Error()
   150  			for _, e := range c.Expected {
   151  				if !strings.Contains(a, e) {
   152  					t.Fatalf("Got warnings %q; didn't contain %q", a, e)
   153  				}
   154  			}
   155  		})
   156  	}
   157  }
   158  
   159  func TestJob_SpecChanged(t *testing.T) {
   160  	// Get a base test job
   161  	base := testJob()
   162  
   163  	// Only modify the indexes/mutable state of the job
   164  	mutatedBase := base.Copy()
   165  	mutatedBase.Status = "foo"
   166  	mutatedBase.ModifyIndex = base.ModifyIndex + 100
   167  
   168  	// changed contains a spec change that should be detected
   169  	change := base.Copy()
   170  	change.Priority = 99
   171  
   172  	cases := []struct {
   173  		Name     string
   174  		Original *Job
   175  		New      *Job
   176  		Changed  bool
   177  	}{
   178  		{
   179  			Name:     "Same job except mutable indexes",
   180  			Changed:  false,
   181  			Original: base,
   182  			New:      mutatedBase,
   183  		},
   184  		{
   185  			Name:     "Different",
   186  			Changed:  true,
   187  			Original: base,
   188  			New:      change,
   189  		},
   190  	}
   191  
   192  	for _, c := range cases {
   193  		t.Run(c.Name, func(t *testing.T) {
   194  			if actual := c.Original.SpecChanged(c.New); actual != c.Changed {
   195  				t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed)
   196  			}
   197  		})
   198  	}
   199  }
   200  
   201  func testJob() *Job {
   202  	return &Job{
   203  		Region:      "global",
   204  		ID:          uuid.Generate(),
   205  		Namespace:   "test",
   206  		Name:        "my-job",
   207  		Type:        JobTypeService,
   208  		Priority:    50,
   209  		AllAtOnce:   false,
   210  		Datacenters: []string{"dc1"},
   211  		Constraints: []*Constraint{
   212  			{
   213  				LTarget: "$attr.kernel.name",
   214  				RTarget: "linux",
   215  				Operand: "=",
   216  			},
   217  		},
   218  		Periodic: &PeriodicConfig{
   219  			Enabled: false,
   220  		},
   221  		TaskGroups: []*TaskGroup{
   222  			{
   223  				Name:          "web",
   224  				Count:         10,
   225  				EphemeralDisk: DefaultEphemeralDisk(),
   226  				RestartPolicy: &RestartPolicy{
   227  					Mode:     RestartPolicyModeFail,
   228  					Attempts: 3,
   229  					Interval: 10 * time.Minute,
   230  					Delay:    1 * time.Minute,
   231  				},
   232  				ReschedulePolicy: &ReschedulePolicy{
   233  					Interval:      5 * time.Minute,
   234  					Attempts:      10,
   235  					Delay:         5 * time.Second,
   236  					DelayFunction: "constant",
   237  				},
   238  				Tasks: []*Task{
   239  					{
   240  						Name:   "web",
   241  						Driver: "exec",
   242  						Config: map[string]interface{}{
   243  							"command": "/bin/date",
   244  						},
   245  						Env: map[string]string{
   246  							"FOO": "bar",
   247  						},
   248  						Artifacts: []*TaskArtifact{
   249  							{
   250  								GetterSource: "http://foo.com",
   251  							},
   252  						},
   253  						Services: []*Service{
   254  							{
   255  								Name:      "${TASK}-frontend",
   256  								PortLabel: "http",
   257  							},
   258  						},
   259  						Resources: &Resources{
   260  							CPU:      500,
   261  							MemoryMB: 256,
   262  							Networks: []*NetworkResource{
   263  								{
   264  									MBits:        50,
   265  									DynamicPorts: []Port{{Label: "http"}},
   266  								},
   267  							},
   268  						},
   269  						LogConfig: &LogConfig{
   270  							MaxFiles:      10,
   271  							MaxFileSizeMB: 1,
   272  						},
   273  					},
   274  				},
   275  				Meta: map[string]string{
   276  					"elb_check_type":     "http",
   277  					"elb_check_interval": "30s",
   278  					"elb_check_min":      "3",
   279  				},
   280  			},
   281  		},
   282  		Meta: map[string]string{
   283  			"owner": "armon",
   284  		},
   285  	}
   286  }
   287  
   288  func TestJob_Copy(t *testing.T) {
   289  	j := testJob()
   290  	c := j.Copy()
   291  	if !reflect.DeepEqual(j, c) {
   292  		t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j)
   293  	}
   294  }
   295  
   296  func TestJob_IsPeriodic(t *testing.T) {
   297  	j := &Job{
   298  		Type: JobTypeService,
   299  		Periodic: &PeriodicConfig{
   300  			Enabled: true,
   301  		},
   302  	}
   303  	if !j.IsPeriodic() {
   304  		t.Fatalf("IsPeriodic() returned false on periodic job")
   305  	}
   306  
   307  	j = &Job{
   308  		Type: JobTypeService,
   309  	}
   310  	if j.IsPeriodic() {
   311  		t.Fatalf("IsPeriodic() returned true on non-periodic job")
   312  	}
   313  }
   314  
   315  func TestJob_IsPeriodicActive(t *testing.T) {
   316  	cases := []struct {
   317  		job    *Job
   318  		active bool
   319  	}{
   320  		{
   321  			job: &Job{
   322  				Type: JobTypeService,
   323  				Periodic: &PeriodicConfig{
   324  					Enabled: true,
   325  				},
   326  			},
   327  			active: true,
   328  		},
   329  		{
   330  			job: &Job{
   331  				Type: JobTypeService,
   332  				Periodic: &PeriodicConfig{
   333  					Enabled: false,
   334  				},
   335  			},
   336  			active: false,
   337  		},
   338  		{
   339  			job: &Job{
   340  				Type: JobTypeService,
   341  				Periodic: &PeriodicConfig{
   342  					Enabled: true,
   343  				},
   344  				Stop: true,
   345  			},
   346  			active: false,
   347  		},
   348  		{
   349  			job: &Job{
   350  				Type: JobTypeService,
   351  				Periodic: &PeriodicConfig{
   352  					Enabled: false,
   353  				},
   354  				ParameterizedJob: &ParameterizedJobConfig{},
   355  			},
   356  			active: false,
   357  		},
   358  	}
   359  
   360  	for i, c := range cases {
   361  		if act := c.job.IsPeriodicActive(); act != c.active {
   362  			t.Fatalf("case %d failed: got %v; want %v", i, act, c.active)
   363  		}
   364  	}
   365  }
   366  
   367  func TestJob_SystemJob_Validate(t *testing.T) {
   368  	j := testJob()
   369  	j.Type = JobTypeSystem
   370  	j.TaskGroups[0].ReschedulePolicy = nil
   371  	j.Canonicalize()
   372  
   373  	err := j.Validate()
   374  	if err == nil || !strings.Contains(err.Error(), "exceed") {
   375  		t.Fatalf("expect error due to count")
   376  	}
   377  
   378  	j.TaskGroups[0].Count = 0
   379  	if err := j.Validate(); err != nil {
   380  		t.Fatalf("unexpected err: %v", err)
   381  	}
   382  
   383  	j.TaskGroups[0].Count = 1
   384  	if err := j.Validate(); err != nil {
   385  		t.Fatalf("unexpected err: %v", err)
   386  	}
   387  
   388  	// Add affinities at job, task group and task level, that should fail validation
   389  
   390  	j.Affinities = []*Affinity{{
   391  		Operand: "=",
   392  		LTarget: "${node.datacenter}",
   393  		RTarget: "dc1",
   394  	}}
   395  	j.TaskGroups[0].Affinities = []*Affinity{{
   396  		Operand: "=",
   397  		LTarget: "${meta.rack}",
   398  		RTarget: "r1",
   399  	}}
   400  	j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{
   401  		Operand: "=",
   402  		LTarget: "${meta.rack}",
   403  		RTarget: "r1",
   404  	}}
   405  	err = j.Validate()
   406  	require.NotNil(t, err)
   407  	require.Contains(t, err.Error(), "System jobs may not have an affinity stanza")
   408  
   409  	// Add spread at job and task group level, that should fail validation
   410  	j.Spreads = []*Spread{{
   411  		Attribute: "${node.datacenter}",
   412  		Weight:    100,
   413  	}}
   414  	j.TaskGroups[0].Spreads = []*Spread{{
   415  		Attribute: "${node.datacenter}",
   416  		Weight:    100,
   417  	}}
   418  
   419  	err = j.Validate()
   420  	require.NotNil(t, err)
   421  	require.Contains(t, err.Error(), "System jobs may not have a spread stanza")
   422  
   423  }
   424  
   425  func TestJob_VaultPolicies(t *testing.T) {
   426  	j0 := &Job{}
   427  	e0 := make(map[string]map[string]*Vault, 0)
   428  
   429  	vj1 := &Vault{
   430  		Policies: []string{
   431  			"p1",
   432  			"p2",
   433  		},
   434  	}
   435  	vj2 := &Vault{
   436  		Policies: []string{
   437  			"p3",
   438  			"p4",
   439  		},
   440  	}
   441  	vj3 := &Vault{
   442  		Policies: []string{
   443  			"p5",
   444  		},
   445  	}
   446  	j1 := &Job{
   447  		TaskGroups: []*TaskGroup{
   448  			{
   449  				Name: "foo",
   450  				Tasks: []*Task{
   451  					{
   452  						Name: "t1",
   453  					},
   454  					{
   455  						Name:  "t2",
   456  						Vault: vj1,
   457  					},
   458  				},
   459  			},
   460  			{
   461  				Name: "bar",
   462  				Tasks: []*Task{
   463  					{
   464  						Name:  "t3",
   465  						Vault: vj2,
   466  					},
   467  					{
   468  						Name:  "t4",
   469  						Vault: vj3,
   470  					},
   471  				},
   472  			},
   473  		},
   474  	}
   475  
   476  	e1 := map[string]map[string]*Vault{
   477  		"foo": {
   478  			"t2": vj1,
   479  		},
   480  		"bar": {
   481  			"t3": vj2,
   482  			"t4": vj3,
   483  		},
   484  	}
   485  
   486  	cases := []struct {
   487  		Job      *Job
   488  		Expected map[string]map[string]*Vault
   489  	}{
   490  		{
   491  			Job:      j0,
   492  			Expected: e0,
   493  		},
   494  		{
   495  			Job:      j1,
   496  			Expected: e1,
   497  		},
   498  	}
   499  
   500  	for i, c := range cases {
   501  		got := c.Job.VaultPolicies()
   502  		if !reflect.DeepEqual(got, c.Expected) {
   503  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   504  		}
   505  	}
   506  }
   507  
   508  func TestJob_RequiredSignals(t *testing.T) {
   509  	j0 := &Job{}
   510  	e0 := make(map[string]map[string][]string, 0)
   511  
   512  	vj1 := &Vault{
   513  		Policies:   []string{"p1"},
   514  		ChangeMode: VaultChangeModeNoop,
   515  	}
   516  	vj2 := &Vault{
   517  		Policies:     []string{"p1"},
   518  		ChangeMode:   VaultChangeModeSignal,
   519  		ChangeSignal: "SIGUSR1",
   520  	}
   521  	tj1 := &Template{
   522  		SourcePath: "foo",
   523  		DestPath:   "bar",
   524  		ChangeMode: TemplateChangeModeNoop,
   525  	}
   526  	tj2 := &Template{
   527  		SourcePath:   "foo",
   528  		DestPath:     "bar",
   529  		ChangeMode:   TemplateChangeModeSignal,
   530  		ChangeSignal: "SIGUSR2",
   531  	}
   532  	j1 := &Job{
   533  		TaskGroups: []*TaskGroup{
   534  			{
   535  				Name: "foo",
   536  				Tasks: []*Task{
   537  					{
   538  						Name: "t1",
   539  					},
   540  					{
   541  						Name:      "t2",
   542  						Vault:     vj2,
   543  						Templates: []*Template{tj2},
   544  					},
   545  				},
   546  			},
   547  			{
   548  				Name: "bar",
   549  				Tasks: []*Task{
   550  					{
   551  						Name:      "t3",
   552  						Vault:     vj1,
   553  						Templates: []*Template{tj1},
   554  					},
   555  					{
   556  						Name:  "t4",
   557  						Vault: vj2,
   558  					},
   559  				},
   560  			},
   561  		},
   562  	}
   563  
   564  	e1 := map[string]map[string][]string{
   565  		"foo": {
   566  			"t2": {"SIGUSR1", "SIGUSR2"},
   567  		},
   568  		"bar": {
   569  			"t4": {"SIGUSR1"},
   570  		},
   571  	}
   572  
   573  	j2 := &Job{
   574  		TaskGroups: []*TaskGroup{
   575  			{
   576  				Name: "foo",
   577  				Tasks: []*Task{
   578  					{
   579  						Name:       "t1",
   580  						KillSignal: "SIGQUIT",
   581  					},
   582  				},
   583  			},
   584  		},
   585  	}
   586  
   587  	e2 := map[string]map[string][]string{
   588  		"foo": {
   589  			"t1": {"SIGQUIT"},
   590  		},
   591  	}
   592  
   593  	cases := []struct {
   594  		Job      *Job
   595  		Expected map[string]map[string][]string
   596  	}{
   597  		{
   598  			Job:      j0,
   599  			Expected: e0,
   600  		},
   601  		{
   602  			Job:      j1,
   603  			Expected: e1,
   604  		},
   605  		{
   606  			Job:      j2,
   607  			Expected: e2,
   608  		},
   609  	}
   610  
   611  	for i, c := range cases {
   612  		got := c.Job.RequiredSignals()
   613  		if !reflect.DeepEqual(got, c.Expected) {
   614  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   615  		}
   616  	}
   617  }
   618  
   619  func TestTaskGroup_Validate(t *testing.T) {
   620  	j := testJob()
   621  	tg := &TaskGroup{
   622  		Count: -1,
   623  		RestartPolicy: &RestartPolicy{
   624  			Interval: 5 * time.Minute,
   625  			Delay:    10 * time.Second,
   626  			Attempts: 10,
   627  			Mode:     RestartPolicyModeDelay,
   628  		},
   629  		ReschedulePolicy: &ReschedulePolicy{
   630  			Interval: 5 * time.Minute,
   631  			Attempts: 5,
   632  			Delay:    5 * time.Second,
   633  		},
   634  	}
   635  	err := tg.Validate(j)
   636  	mErr := err.(*multierror.Error)
   637  	if !strings.Contains(mErr.Errors[0].Error(), "group name") {
   638  		t.Fatalf("err: %s", err)
   639  	}
   640  	if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") {
   641  		t.Fatalf("err: %s", err)
   642  	}
   643  	if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") {
   644  		t.Fatalf("err: %s", err)
   645  	}
   646  
   647  	tg = &TaskGroup{
   648  		Tasks: []*Task{
   649  			{
   650  				Name: "task-a",
   651  				Resources: &Resources{
   652  					Networks: []*NetworkResource{
   653  						{
   654  							ReservedPorts: []Port{{Label: "foo", Value: 123}},
   655  						},
   656  					},
   657  				},
   658  			},
   659  			{
   660  				Name: "task-b",
   661  				Resources: &Resources{
   662  					Networks: []*NetworkResource{
   663  						{
   664  							ReservedPorts: []Port{{Label: "foo", Value: 123}},
   665  						},
   666  					},
   667  				},
   668  			},
   669  		},
   670  	}
   671  	err = tg.Validate(&Job{})
   672  	expected := `Static port 123 already reserved by task-a:foo`
   673  	if !strings.Contains(err.Error(), expected) {
   674  		t.Errorf("expected %s but found: %v", expected, err)
   675  	}
   676  
   677  	tg = &TaskGroup{
   678  		Tasks: []*Task{
   679  			{
   680  				Name: "task-a",
   681  				Resources: &Resources{
   682  					Networks: []*NetworkResource{
   683  						{
   684  							ReservedPorts: []Port{
   685  								{Label: "foo", Value: 123},
   686  								{Label: "bar", Value: 123},
   687  							},
   688  						},
   689  					},
   690  				},
   691  			},
   692  		},
   693  	}
   694  	err = tg.Validate(&Job{})
   695  	expected = `Static port 123 already reserved by task-a:foo`
   696  	if !strings.Contains(err.Error(), expected) {
   697  		t.Errorf("expected %s but found: %v", expected, err)
   698  	}
   699  
   700  	tg = &TaskGroup{
   701  		Name:  "web",
   702  		Count: 1,
   703  		Tasks: []*Task{
   704  			{Name: "web", Leader: true},
   705  			{Name: "web", Leader: true},
   706  			{},
   707  		},
   708  		RestartPolicy: &RestartPolicy{
   709  			Interval: 5 * time.Minute,
   710  			Delay:    10 * time.Second,
   711  			Attempts: 10,
   712  			Mode:     RestartPolicyModeDelay,
   713  		},
   714  		ReschedulePolicy: &ReschedulePolicy{
   715  			Interval:      5 * time.Minute,
   716  			Attempts:      10,
   717  			Delay:         5 * time.Second,
   718  			DelayFunction: "constant",
   719  		},
   720  	}
   721  
   722  	err = tg.Validate(j)
   723  	mErr = err.(*multierror.Error)
   724  	if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") {
   725  		t.Fatalf("err: %s", err)
   726  	}
   727  	if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") {
   728  		t.Fatalf("err: %s", err)
   729  	}
   730  	if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") {
   731  		t.Fatalf("err: %s", err)
   732  	}
   733  	if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") {
   734  		t.Fatalf("err: %s", err)
   735  	}
   736  	if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") {
   737  		t.Fatalf("err: %s", err)
   738  	}
   739  
   740  	tg = &TaskGroup{
   741  		Name:  "web",
   742  		Count: 1,
   743  		Tasks: []*Task{
   744  			{Name: "web", Leader: true},
   745  		},
   746  		Update: DefaultUpdateStrategy.Copy(),
   747  	}
   748  	j.Type = JobTypeBatch
   749  	err = tg.Validate(j)
   750  	if !strings.Contains(err.Error(), "does not allow update block") {
   751  		t.Fatalf("err: %s", err)
   752  	}
   753  
   754  	tg = &TaskGroup{
   755  		Count: -1,
   756  		RestartPolicy: &RestartPolicy{
   757  			Interval: 5 * time.Minute,
   758  			Delay:    10 * time.Second,
   759  			Attempts: 10,
   760  			Mode:     RestartPolicyModeDelay,
   761  		},
   762  		ReschedulePolicy: &ReschedulePolicy{
   763  			Interval: 5 * time.Minute,
   764  			Attempts: 5,
   765  			Delay:    5 * time.Second,
   766  		},
   767  	}
   768  	j.Type = JobTypeSystem
   769  	err = tg.Validate(j)
   770  	if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") {
   771  		t.Fatalf("err: %s", err)
   772  	}
   773  }
   774  
   775  func TestTask_Validate(t *testing.T) {
   776  	task := &Task{}
   777  	ephemeralDisk := DefaultEphemeralDisk()
   778  	err := task.Validate(ephemeralDisk, JobTypeBatch)
   779  	mErr := err.(*multierror.Error)
   780  	if !strings.Contains(mErr.Errors[0].Error(), "task name") {
   781  		t.Fatalf("err: %s", err)
   782  	}
   783  	if !strings.Contains(mErr.Errors[1].Error(), "task driver") {
   784  		t.Fatalf("err: %s", err)
   785  	}
   786  	if !strings.Contains(mErr.Errors[2].Error(), "task resources") {
   787  		t.Fatalf("err: %s", err)
   788  	}
   789  
   790  	task = &Task{Name: "web/foo"}
   791  	err = task.Validate(ephemeralDisk, JobTypeBatch)
   792  	mErr = err.(*multierror.Error)
   793  	if !strings.Contains(mErr.Errors[0].Error(), "slashes") {
   794  		t.Fatalf("err: %s", err)
   795  	}
   796  
   797  	task = &Task{
   798  		Name:   "web",
   799  		Driver: "docker",
   800  		Resources: &Resources{
   801  			CPU:      100,
   802  			MemoryMB: 100,
   803  			IOPS:     10,
   804  		},
   805  		LogConfig: DefaultLogConfig(),
   806  	}
   807  	ephemeralDisk.SizeMB = 200
   808  	err = task.Validate(ephemeralDisk, JobTypeBatch)
   809  	if err != nil {
   810  		t.Fatalf("err: %s", err)
   811  	}
   812  
   813  	task.Constraints = append(task.Constraints,
   814  		&Constraint{
   815  			Operand: ConstraintDistinctHosts,
   816  		},
   817  		&Constraint{
   818  			Operand: ConstraintDistinctProperty,
   819  			LTarget: "${meta.rack}",
   820  		})
   821  
   822  	err = task.Validate(ephemeralDisk, JobTypeBatch)
   823  	mErr = err.(*multierror.Error)
   824  	if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") {
   825  		t.Fatalf("err: %s", err)
   826  	}
   827  	if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") {
   828  		t.Fatalf("err: %s", err)
   829  	}
   830  }
   831  
   832  func TestTask_Validate_Services(t *testing.T) {
   833  	s1 := &Service{
   834  		Name:      "service-name",
   835  		PortLabel: "bar",
   836  		Checks: []*ServiceCheck{
   837  			{
   838  				Name:     "check-name",
   839  				Type:     ServiceCheckTCP,
   840  				Interval: 0 * time.Second,
   841  			},
   842  			{
   843  				Name:    "check-name",
   844  				Type:    ServiceCheckTCP,
   845  				Timeout: 2 * time.Second,
   846  			},
   847  			{
   848  				Name:     "check-name",
   849  				Type:     ServiceCheckTCP,
   850  				Interval: 1 * time.Second,
   851  			},
   852  		},
   853  	}
   854  
   855  	s2 := &Service{
   856  		Name:      "service-name",
   857  		PortLabel: "bar",
   858  	}
   859  
   860  	s3 := &Service{
   861  		Name:      "service-A",
   862  		PortLabel: "a",
   863  	}
   864  	s4 := &Service{
   865  		Name:      "service-A",
   866  		PortLabel: "b",
   867  	}
   868  
   869  	ephemeralDisk := DefaultEphemeralDisk()
   870  	ephemeralDisk.SizeMB = 200
   871  	task := &Task{
   872  		Name:   "web",
   873  		Driver: "docker",
   874  		Resources: &Resources{
   875  			CPU:      100,
   876  			MemoryMB: 100,
   877  			IOPS:     10,
   878  		},
   879  		Services: []*Service{s1, s2},
   880  	}
   881  
   882  	task1 := &Task{
   883  		Name:      "web",
   884  		Driver:    "docker",
   885  		Resources: DefaultResources(),
   886  		Services:  []*Service{s3, s4},
   887  		LogConfig: DefaultLogConfig(),
   888  	}
   889  	task1.Resources.Networks = []*NetworkResource{
   890  		{
   891  			MBits: 10,
   892  			DynamicPorts: []Port{
   893  				{
   894  					Label: "a",
   895  					Value: 1000,
   896  				},
   897  				{
   898  					Label: "b",
   899  					Value: 2000,
   900  				},
   901  			},
   902  		},
   903  	}
   904  
   905  	err := task.Validate(ephemeralDisk, JobTypeService)
   906  	if err == nil {
   907  		t.Fatal("expected an error")
   908  	}
   909  
   910  	if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") {
   911  		t.Fatalf("err: %v", err)
   912  	}
   913  
   914  	if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") {
   915  		t.Fatalf("err: %v", err)
   916  	}
   917  
   918  	if !strings.Contains(err.Error(), "missing required value interval") {
   919  		t.Fatalf("err: %v", err)
   920  	}
   921  
   922  	if !strings.Contains(err.Error(), "cannot be less than") {
   923  		t.Fatalf("err: %v", err)
   924  	}
   925  
   926  	if err = task1.Validate(ephemeralDisk, JobTypeService); err != nil {
   927  		t.Fatalf("err : %v", err)
   928  	}
   929  }
   930  
   931  func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) {
   932  	ephemeralDisk := DefaultEphemeralDisk()
   933  	getTask := func(s *Service) *Task {
   934  		task := &Task{
   935  			Name:      "web",
   936  			Driver:    "docker",
   937  			Resources: DefaultResources(),
   938  			Services:  []*Service{s},
   939  			LogConfig: DefaultLogConfig(),
   940  		}
   941  		task.Resources.Networks = []*NetworkResource{
   942  			{
   943  				MBits: 10,
   944  				DynamicPorts: []Port{
   945  					{
   946  						Label: "http",
   947  						Value: 80,
   948  					},
   949  				},
   950  			},
   951  		}
   952  		return task
   953  	}
   954  
   955  	cases := []*Service{
   956  		{
   957  			// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
   958  			Name:        "DriverModeWithLabel",
   959  			PortLabel:   "http",
   960  			AddressMode: AddressModeDriver,
   961  		},
   962  		{
   963  			Name:        "DriverModeWithPort",
   964  			PortLabel:   "80",
   965  			AddressMode: AddressModeDriver,
   966  		},
   967  		{
   968  			Name:        "HostModeWithLabel",
   969  			PortLabel:   "http",
   970  			AddressMode: AddressModeHost,
   971  		},
   972  		{
   973  			Name:        "HostModeWithoutLabel",
   974  			AddressMode: AddressModeHost,
   975  		},
   976  		{
   977  			Name:        "DriverModeWithoutLabel",
   978  			AddressMode: AddressModeDriver,
   979  		},
   980  	}
   981  
   982  	for _, service := range cases {
   983  		task := getTask(service)
   984  		t.Run(service.Name, func(t *testing.T) {
   985  			if err := task.Validate(ephemeralDisk, JobTypeService); err != nil {
   986  				t.Fatalf("unexpected err: %v", err)
   987  			}
   988  		})
   989  	}
   990  }
   991  
   992  func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) {
   993  	ephemeralDisk := DefaultEphemeralDisk()
   994  	getTask := func(s *Service) *Task {
   995  		task := &Task{
   996  			Name:      "web",
   997  			Driver:    "docker",
   998  			Resources: DefaultResources(),
   999  			Services:  []*Service{s},
  1000  			LogConfig: DefaultLogConfig(),
  1001  		}
  1002  		task.Resources.Networks = []*NetworkResource{
  1003  			{
  1004  				MBits: 10,
  1005  				DynamicPorts: []Port{
  1006  					{
  1007  						Label: "http",
  1008  						Value: 80,
  1009  					},
  1010  				},
  1011  			},
  1012  		}
  1013  		return task
  1014  	}
  1015  
  1016  	cases := []*Service{
  1017  		{
  1018  			// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
  1019  			Name:        "DriverModeWithLabel",
  1020  			PortLabel:   "asdf",
  1021  			AddressMode: AddressModeDriver,
  1022  		},
  1023  		{
  1024  			Name:        "HostModeWithLabel",
  1025  			PortLabel:   "asdf",
  1026  			AddressMode: AddressModeHost,
  1027  		},
  1028  		{
  1029  			Name:        "HostModeWithPort",
  1030  			PortLabel:   "80",
  1031  			AddressMode: AddressModeHost,
  1032  		},
  1033  	}
  1034  
  1035  	for _, service := range cases {
  1036  		task := getTask(service)
  1037  		t.Run(service.Name, func(t *testing.T) {
  1038  			err := task.Validate(ephemeralDisk, JobTypeService)
  1039  			if err == nil {
  1040  				t.Fatalf("expected an error")
  1041  			}
  1042  			//t.Logf("err: %v", err)
  1043  		})
  1044  	}
  1045  }
  1046  
  1047  func TestTask_Validate_Service_Check(t *testing.T) {
  1048  
  1049  	invalidCheck := ServiceCheck{
  1050  		Name:     "check-name",
  1051  		Command:  "/bin/true",
  1052  		Type:     ServiceCheckScript,
  1053  		Interval: 10 * time.Second,
  1054  	}
  1055  
  1056  	err := invalidCheck.validate()
  1057  	if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") {
  1058  		t.Fatalf("expected a timeout validation error but received: %q", err)
  1059  	}
  1060  
  1061  	check1 := ServiceCheck{
  1062  		Name:     "check-name",
  1063  		Type:     ServiceCheckTCP,
  1064  		Interval: 10 * time.Second,
  1065  		Timeout:  2 * time.Second,
  1066  	}
  1067  
  1068  	if err := check1.validate(); err != nil {
  1069  		t.Fatalf("err: %v", err)
  1070  	}
  1071  
  1072  	check1.InitialStatus = "foo"
  1073  	err = check1.validate()
  1074  	if err == nil {
  1075  		t.Fatal("Expected an error")
  1076  	}
  1077  
  1078  	if !strings.Contains(err.Error(), "invalid initial check state (foo)") {
  1079  		t.Fatalf("err: %v", err)
  1080  	}
  1081  
  1082  	check1.InitialStatus = api.HealthCritical
  1083  	err = check1.validate()
  1084  	if err != nil {
  1085  		t.Fatalf("err: %v", err)
  1086  	}
  1087  
  1088  	check1.InitialStatus = api.HealthPassing
  1089  	err = check1.validate()
  1090  	if err != nil {
  1091  		t.Fatalf("err: %v", err)
  1092  	}
  1093  
  1094  	check1.InitialStatus = ""
  1095  	err = check1.validate()
  1096  	if err != nil {
  1097  		t.Fatalf("err: %v", err)
  1098  	}
  1099  
  1100  	check2 := ServiceCheck{
  1101  		Name:     "check-name-2",
  1102  		Type:     ServiceCheckHTTP,
  1103  		Interval: 10 * time.Second,
  1104  		Timeout:  2 * time.Second,
  1105  		Path:     "/foo/bar",
  1106  	}
  1107  
  1108  	err = check2.validate()
  1109  	if err != nil {
  1110  		t.Fatalf("err: %v", err)
  1111  	}
  1112  
  1113  	check2.Path = ""
  1114  	err = check2.validate()
  1115  	if err == nil {
  1116  		t.Fatal("Expected an error")
  1117  	}
  1118  	if !strings.Contains(err.Error(), "valid http path") {
  1119  		t.Fatalf("err: %v", err)
  1120  	}
  1121  
  1122  	check2.Path = "http://www.example.com"
  1123  	err = check2.validate()
  1124  	if err == nil {
  1125  		t.Fatal("Expected an error")
  1126  	}
  1127  	if !strings.Contains(err.Error(), "relative http path") {
  1128  		t.Fatalf("err: %v", err)
  1129  	}
  1130  }
  1131  
  1132  // TestTask_Validate_Service_Check_AddressMode asserts that checks do not
  1133  // inherit address mode but do inherit ports.
  1134  func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
  1135  	getTask := func(s *Service) *Task {
  1136  		return &Task{
  1137  			Resources: &Resources{
  1138  				Networks: []*NetworkResource{
  1139  					{
  1140  						DynamicPorts: []Port{
  1141  							{
  1142  								Label: "http",
  1143  								Value: 9999,
  1144  							},
  1145  						},
  1146  					},
  1147  				},
  1148  			},
  1149  			Services: []*Service{s},
  1150  		}
  1151  	}
  1152  
  1153  	cases := []struct {
  1154  		Service     *Service
  1155  		ErrContains string
  1156  	}{
  1157  		{
  1158  			Service: &Service{
  1159  				Name:        "invalid-driver",
  1160  				PortLabel:   "80",
  1161  				AddressMode: "host",
  1162  			},
  1163  			ErrContains: `port label "80" referenced`,
  1164  		},
  1165  		{
  1166  			Service: &Service{
  1167  				Name:        "http-driver-fail-1",
  1168  				PortLabel:   "80",
  1169  				AddressMode: "driver",
  1170  				Checks: []*ServiceCheck{
  1171  					{
  1172  						Name:     "invalid-check-1",
  1173  						Type:     "tcp",
  1174  						Interval: time.Second,
  1175  						Timeout:  time.Second,
  1176  					},
  1177  				},
  1178  			},
  1179  			ErrContains: `check "invalid-check-1" cannot use a numeric port`,
  1180  		},
  1181  		{
  1182  			Service: &Service{
  1183  				Name:        "http-driver-fail-2",
  1184  				PortLabel:   "80",
  1185  				AddressMode: "driver",
  1186  				Checks: []*ServiceCheck{
  1187  					{
  1188  						Name:      "invalid-check-2",
  1189  						Type:      "tcp",
  1190  						PortLabel: "80",
  1191  						Interval:  time.Second,
  1192  						Timeout:   time.Second,
  1193  					},
  1194  				},
  1195  			},
  1196  			ErrContains: `check "invalid-check-2" cannot use a numeric port`,
  1197  		},
  1198  		{
  1199  			Service: &Service{
  1200  				Name:        "http-driver-fail-3",
  1201  				PortLabel:   "80",
  1202  				AddressMode: "driver",
  1203  				Checks: []*ServiceCheck{
  1204  					{
  1205  						Name:      "invalid-check-3",
  1206  						Type:      "tcp",
  1207  						PortLabel: "missing-port-label",
  1208  						Interval:  time.Second,
  1209  						Timeout:   time.Second,
  1210  					},
  1211  				},
  1212  			},
  1213  			ErrContains: `port label "missing-port-label" referenced`,
  1214  		},
  1215  		{
  1216  			Service: &Service{
  1217  				Name:        "http-driver-passes",
  1218  				PortLabel:   "80",
  1219  				AddressMode: "driver",
  1220  				Checks: []*ServiceCheck{
  1221  					{
  1222  						Name:     "valid-script-check",
  1223  						Type:     "script",
  1224  						Command:  "ok",
  1225  						Interval: time.Second,
  1226  						Timeout:  time.Second,
  1227  					},
  1228  					{
  1229  						Name:      "valid-host-check",
  1230  						Type:      "tcp",
  1231  						PortLabel: "http",
  1232  						Interval:  time.Second,
  1233  						Timeout:   time.Second,
  1234  					},
  1235  					{
  1236  						Name:        "valid-driver-check",
  1237  						Type:        "tcp",
  1238  						AddressMode: "driver",
  1239  						Interval:    time.Second,
  1240  						Timeout:     time.Second,
  1241  					},
  1242  				},
  1243  			},
  1244  		},
  1245  		{
  1246  			Service: &Service{
  1247  				Name: "empty-address-3673-passes-1",
  1248  				Checks: []*ServiceCheck{
  1249  					{
  1250  						Name:      "valid-port-label",
  1251  						Type:      "tcp",
  1252  						PortLabel: "http",
  1253  						Interval:  time.Second,
  1254  						Timeout:   time.Second,
  1255  					},
  1256  					{
  1257  						Name:     "empty-is-ok",
  1258  						Type:     "script",
  1259  						Command:  "ok",
  1260  						Interval: time.Second,
  1261  						Timeout:  time.Second,
  1262  					},
  1263  				},
  1264  			},
  1265  		},
  1266  		{
  1267  			Service: &Service{
  1268  				Name: "empty-address-3673-passes-2",
  1269  			},
  1270  		},
  1271  		{
  1272  			Service: &Service{
  1273  				Name: "empty-address-3673-fails",
  1274  				Checks: []*ServiceCheck{
  1275  					{
  1276  						Name:     "empty-is-not-ok",
  1277  						Type:     "tcp",
  1278  						Interval: time.Second,
  1279  						Timeout:  time.Second,
  1280  					},
  1281  				},
  1282  			},
  1283  			ErrContains: `invalid: check requires a port but neither check nor service`,
  1284  		},
  1285  	}
  1286  
  1287  	for _, tc := range cases {
  1288  		tc := tc
  1289  		task := getTask(tc.Service)
  1290  		t.Run(tc.Service.Name, func(t *testing.T) {
  1291  			err := validateServices(task)
  1292  			if err == nil && tc.ErrContains == "" {
  1293  				// Ok!
  1294  				return
  1295  			}
  1296  			if err == nil {
  1297  				t.Fatalf("no error returned. expected: %s", tc.ErrContains)
  1298  			}
  1299  			if !strings.Contains(err.Error(), tc.ErrContains) {
  1300  				t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
  1301  			}
  1302  		})
  1303  	}
  1304  }
  1305  
  1306  func TestTask_Validate_Service_Check_GRPC(t *testing.T) {
  1307  	t.Parallel()
  1308  	// Bad (no port)
  1309  	invalidGRPC := &ServiceCheck{
  1310  		Type:     ServiceCheckGRPC,
  1311  		Interval: time.Second,
  1312  		Timeout:  time.Second,
  1313  	}
  1314  	service := &Service{
  1315  		Name:   "test",
  1316  		Checks: []*ServiceCheck{invalidGRPC},
  1317  	}
  1318  
  1319  	assert.Error(t, service.Validate())
  1320  
  1321  	// Good
  1322  	service.Checks[0] = &ServiceCheck{
  1323  		Type:      ServiceCheckGRPC,
  1324  		Interval:  time.Second,
  1325  		Timeout:   time.Second,
  1326  		PortLabel: "some-port-label",
  1327  	}
  1328  
  1329  	assert.NoError(t, service.Validate())
  1330  }
  1331  
  1332  func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
  1333  	t.Parallel()
  1334  	invalidCheckRestart := &CheckRestart{
  1335  		Limit: -1,
  1336  		Grace: -1,
  1337  	}
  1338  
  1339  	err := invalidCheckRestart.Validate()
  1340  	assert.NotNil(t, err, "invalidateCheckRestart.Validate()")
  1341  	assert.Len(t, err.(*multierror.Error).Errors, 2)
  1342  
  1343  	validCheckRestart := &CheckRestart{}
  1344  	assert.Nil(t, validCheckRestart.Validate())
  1345  
  1346  	validCheckRestart.Limit = 1
  1347  	validCheckRestart.Grace = 1
  1348  	assert.Nil(t, validCheckRestart.Validate())
  1349  }
  1350  
  1351  func TestTask_Validate_LogConfig(t *testing.T) {
  1352  	task := &Task{
  1353  		LogConfig: DefaultLogConfig(),
  1354  	}
  1355  	ephemeralDisk := &EphemeralDisk{
  1356  		SizeMB: 1,
  1357  	}
  1358  
  1359  	err := task.Validate(ephemeralDisk, JobTypeService)
  1360  	mErr := err.(*multierror.Error)
  1361  	if !strings.Contains(mErr.Errors[3].Error(), "log storage") {
  1362  		t.Fatalf("err: %s", err)
  1363  	}
  1364  }
  1365  
  1366  func TestTask_Validate_Template(t *testing.T) {
  1367  
  1368  	bad := &Template{}
  1369  	task := &Task{
  1370  		Templates: []*Template{bad},
  1371  	}
  1372  	ephemeralDisk := &EphemeralDisk{
  1373  		SizeMB: 1,
  1374  	}
  1375  
  1376  	err := task.Validate(ephemeralDisk, JobTypeService)
  1377  	if !strings.Contains(err.Error(), "Template 1 validation failed") {
  1378  		t.Fatalf("err: %s", err)
  1379  	}
  1380  
  1381  	// Have two templates that share the same destination
  1382  	good := &Template{
  1383  		SourcePath: "foo",
  1384  		DestPath:   "local/foo",
  1385  		ChangeMode: "noop",
  1386  	}
  1387  
  1388  	task.Templates = []*Template{good, good}
  1389  	err = task.Validate(ephemeralDisk, JobTypeService)
  1390  	if !strings.Contains(err.Error(), "same destination as") {
  1391  		t.Fatalf("err: %s", err)
  1392  	}
  1393  
  1394  	// Env templates can't use signals
  1395  	task.Templates = []*Template{
  1396  		{
  1397  			Envvars:    true,
  1398  			ChangeMode: "signal",
  1399  		},
  1400  	}
  1401  
  1402  	err = task.Validate(ephemeralDisk, JobTypeService)
  1403  	if err == nil {
  1404  		t.Fatalf("expected error from Template.Validate")
  1405  	}
  1406  	if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) {
  1407  		t.Errorf("expected to find %q but found %v", expected, err)
  1408  	}
  1409  }
  1410  
  1411  func TestTemplate_Validate(t *testing.T) {
  1412  	cases := []struct {
  1413  		Tmpl         *Template
  1414  		Fail         bool
  1415  		ContainsErrs []string
  1416  	}{
  1417  		{
  1418  			Tmpl: &Template{},
  1419  			Fail: true,
  1420  			ContainsErrs: []string{
  1421  				"specify a source path",
  1422  				"specify a destination",
  1423  				TemplateChangeModeInvalidError.Error(),
  1424  			},
  1425  		},
  1426  		{
  1427  			Tmpl: &Template{
  1428  				Splay: -100,
  1429  			},
  1430  			Fail: true,
  1431  			ContainsErrs: []string{
  1432  				"positive splay",
  1433  			},
  1434  		},
  1435  		{
  1436  			Tmpl: &Template{
  1437  				ChangeMode: "foo",
  1438  			},
  1439  			Fail: true,
  1440  			ContainsErrs: []string{
  1441  				TemplateChangeModeInvalidError.Error(),
  1442  			},
  1443  		},
  1444  		{
  1445  			Tmpl: &Template{
  1446  				ChangeMode: "signal",
  1447  			},
  1448  			Fail: true,
  1449  			ContainsErrs: []string{
  1450  				"specify signal value",
  1451  			},
  1452  		},
  1453  		{
  1454  			Tmpl: &Template{
  1455  				SourcePath: "foo",
  1456  				DestPath:   "../../root",
  1457  				ChangeMode: "noop",
  1458  			},
  1459  			Fail: true,
  1460  			ContainsErrs: []string{
  1461  				"destination escapes",
  1462  			},
  1463  		},
  1464  		{
  1465  			Tmpl: &Template{
  1466  				SourcePath: "foo",
  1467  				DestPath:   "local/foo",
  1468  				ChangeMode: "noop",
  1469  			},
  1470  			Fail: false,
  1471  		},
  1472  		{
  1473  			Tmpl: &Template{
  1474  				SourcePath: "foo",
  1475  				DestPath:   "local/foo",
  1476  				ChangeMode: "noop",
  1477  				Perms:      "0444",
  1478  			},
  1479  			Fail: false,
  1480  		},
  1481  		{
  1482  			Tmpl: &Template{
  1483  				SourcePath: "foo",
  1484  				DestPath:   "local/foo",
  1485  				ChangeMode: "noop",
  1486  				Perms:      "zza",
  1487  			},
  1488  			Fail: true,
  1489  			ContainsErrs: []string{
  1490  				"as octal",
  1491  			},
  1492  		},
  1493  	}
  1494  
  1495  	for i, c := range cases {
  1496  		err := c.Tmpl.Validate()
  1497  		if err != nil {
  1498  			if !c.Fail {
  1499  				t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err)
  1500  			}
  1501  
  1502  			e := err.Error()
  1503  			for _, exp := range c.ContainsErrs {
  1504  				if !strings.Contains(e, exp) {
  1505  					t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e)
  1506  				}
  1507  			}
  1508  		} else if c.Fail {
  1509  			t.Fatalf("Case %d: should have failed: %v", i+1, err)
  1510  		}
  1511  	}
  1512  }
  1513  
  1514  func TestConstraint_Validate(t *testing.T) {
  1515  	c := &Constraint{}
  1516  	err := c.Validate()
  1517  	mErr := err.(*multierror.Error)
  1518  	if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") {
  1519  		t.Fatalf("err: %s", err)
  1520  	}
  1521  
  1522  	c = &Constraint{
  1523  		LTarget: "$attr.kernel.name",
  1524  		RTarget: "linux",
  1525  		Operand: "=",
  1526  	}
  1527  	err = c.Validate()
  1528  	if err != nil {
  1529  		t.Fatalf("err: %v", err)
  1530  	}
  1531  
  1532  	// Perform additional regexp validation
  1533  	c.Operand = ConstraintRegex
  1534  	c.RTarget = "(foo"
  1535  	err = c.Validate()
  1536  	mErr = err.(*multierror.Error)
  1537  	if !strings.Contains(mErr.Errors[0].Error(), "missing closing") {
  1538  		t.Fatalf("err: %s", err)
  1539  	}
  1540  
  1541  	// Perform version validation
  1542  	c.Operand = ConstraintVersion
  1543  	c.RTarget = "~> foo"
  1544  	err = c.Validate()
  1545  	mErr = err.(*multierror.Error)
  1546  	if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") {
  1547  		t.Fatalf("err: %s", err)
  1548  	}
  1549  
  1550  	// Perform distinct_property validation
  1551  	c.Operand = ConstraintDistinctProperty
  1552  	c.RTarget = "0"
  1553  	err = c.Validate()
  1554  	mErr = err.(*multierror.Error)
  1555  	if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") {
  1556  		t.Fatalf("err: %s", err)
  1557  	}
  1558  
  1559  	c.RTarget = "-1"
  1560  	err = c.Validate()
  1561  	mErr = err.(*multierror.Error)
  1562  	if !strings.Contains(mErr.Errors[0].Error(), "to uint64") {
  1563  		t.Fatalf("err: %s", err)
  1564  	}
  1565  
  1566  	// Perform distinct_hosts validation
  1567  	c.Operand = ConstraintDistinctHosts
  1568  	c.LTarget = ""
  1569  	c.RTarget = ""
  1570  	if err := c.Validate(); err != nil {
  1571  		t.Fatalf("expected valid constraint: %v", err)
  1572  	}
  1573  
  1574  	// Perform set_contains validation
  1575  	c.Operand = ConstraintSetContains
  1576  	c.RTarget = ""
  1577  	err = c.Validate()
  1578  	mErr = err.(*multierror.Error)
  1579  	if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") {
  1580  		t.Fatalf("err: %s", err)
  1581  	}
  1582  
  1583  	// Perform LTarget validation
  1584  	c.Operand = ConstraintRegex
  1585  	c.RTarget = "foo"
  1586  	c.LTarget = ""
  1587  	err = c.Validate()
  1588  	mErr = err.(*multierror.Error)
  1589  	if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") {
  1590  		t.Fatalf("err: %s", err)
  1591  	}
  1592  
  1593  	// Perform constraint type validation
  1594  	c.Operand = "foo"
  1595  	err = c.Validate()
  1596  	mErr = err.(*multierror.Error)
  1597  	if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") {
  1598  		t.Fatalf("err: %s", err)
  1599  	}
  1600  }
  1601  
  1602  func TestAffinity_Validate(t *testing.T) {
  1603  
  1604  	type tc struct {
  1605  		affinity *Affinity
  1606  		err      error
  1607  		name     string
  1608  	}
  1609  
  1610  	testCases := []tc{
  1611  		{
  1612  			affinity: &Affinity{},
  1613  			err:      fmt.Errorf("Missing affinity operand"),
  1614  		},
  1615  		{
  1616  			affinity: &Affinity{
  1617  				Operand: "foo",
  1618  				LTarget: "${meta.node_class}",
  1619  				Weight:  10,
  1620  			},
  1621  			err: fmt.Errorf("Unknown affinity operator \"foo\""),
  1622  		},
  1623  		{
  1624  			affinity: &Affinity{
  1625  				Operand: "=",
  1626  				LTarget: "${meta.node_class}",
  1627  				Weight:  10,
  1628  			},
  1629  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  1630  		},
  1631  		{
  1632  			affinity: &Affinity{
  1633  				Operand: "=",
  1634  				LTarget: "${meta.node_class}",
  1635  				RTarget: "c4",
  1636  				Weight:  0,
  1637  			},
  1638  			err: fmt.Errorf("Affinity weight cannot be zero"),
  1639  		},
  1640  		{
  1641  			affinity: &Affinity{
  1642  				Operand: "=",
  1643  				LTarget: "${meta.node_class}",
  1644  				RTarget: "c4",
  1645  				Weight:  500,
  1646  			},
  1647  			err: fmt.Errorf("Affinity weight must be within the range [-100,100]"),
  1648  		},
  1649  		{
  1650  			affinity: &Affinity{
  1651  				Operand: "=",
  1652  				LTarget: "${node.class}",
  1653  				Weight:  10,
  1654  			},
  1655  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  1656  		},
  1657  		{
  1658  			affinity: &Affinity{
  1659  				Operand: "version",
  1660  				LTarget: "${meta.os}",
  1661  				RTarget: ">>2.0",
  1662  				Weight:  500,
  1663  			},
  1664  			err: fmt.Errorf("Version affinity is invalid"),
  1665  		},
  1666  		{
  1667  			affinity: &Affinity{
  1668  				Operand: "regexp",
  1669  				LTarget: "${meta.os}",
  1670  				RTarget: "\\K2.0",
  1671  				Weight:  100,
  1672  			},
  1673  			err: fmt.Errorf("Regular expression failed to compile"),
  1674  		},
  1675  	}
  1676  
  1677  	for _, tc := range testCases {
  1678  		t.Run(tc.name, func(t *testing.T) {
  1679  			err := tc.affinity.Validate()
  1680  			if tc.err != nil {
  1681  				require.NotNil(t, err)
  1682  				require.Contains(t, err.Error(), tc.err.Error())
  1683  			} else {
  1684  				require.Nil(t, err)
  1685  			}
  1686  		})
  1687  	}
  1688  }
  1689  
  1690  func TestUpdateStrategy_Validate(t *testing.T) {
  1691  	u := &UpdateStrategy{
  1692  		MaxParallel:      0,
  1693  		HealthCheck:      "foo",
  1694  		MinHealthyTime:   -10,
  1695  		HealthyDeadline:  -15,
  1696  		ProgressDeadline: -25,
  1697  		AutoRevert:       false,
  1698  		Canary:           -1,
  1699  	}
  1700  
  1701  	err := u.Validate()
  1702  	mErr := err.(*multierror.Error)
  1703  	if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") {
  1704  		t.Fatalf("err: %s", err)
  1705  	}
  1706  	if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") {
  1707  		t.Fatalf("err: %s", err)
  1708  	}
  1709  	if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") {
  1710  		t.Fatalf("err: %s", err)
  1711  	}
  1712  	if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") {
  1713  		t.Fatalf("err: %s", err)
  1714  	}
  1715  	if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") {
  1716  		t.Fatalf("err: %s", err)
  1717  	}
  1718  	if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") {
  1719  		t.Fatalf("err: %s", err)
  1720  	}
  1721  	if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") {
  1722  		t.Fatalf("err: %s", err)
  1723  	}
  1724  	if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") {
  1725  		t.Fatalf("err: %s", err)
  1726  	}
  1727  }
  1728  
  1729  func TestResource_NetIndex(t *testing.T) {
  1730  	r := &Resources{
  1731  		Networks: []*NetworkResource{
  1732  			{Device: "eth0"},
  1733  			{Device: "lo0"},
  1734  			{Device: ""},
  1735  		},
  1736  	}
  1737  	if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 {
  1738  		t.Fatalf("Bad: %d", idx)
  1739  	}
  1740  	if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 {
  1741  		t.Fatalf("Bad: %d", idx)
  1742  	}
  1743  	if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 {
  1744  		t.Fatalf("Bad: %d", idx)
  1745  	}
  1746  }
  1747  
  1748  func TestResource_Superset(t *testing.T) {
  1749  	r1 := &Resources{
  1750  		CPU:      2000,
  1751  		MemoryMB: 2048,
  1752  		DiskMB:   10000,
  1753  		IOPS:     100,
  1754  	}
  1755  	r2 := &Resources{
  1756  		CPU:      2000,
  1757  		MemoryMB: 1024,
  1758  		DiskMB:   5000,
  1759  		IOPS:     50,
  1760  	}
  1761  
  1762  	if s, _ := r1.Superset(r1); !s {
  1763  		t.Fatalf("bad")
  1764  	}
  1765  	if s, _ := r1.Superset(r2); !s {
  1766  		t.Fatalf("bad")
  1767  	}
  1768  	if s, _ := r2.Superset(r1); s {
  1769  		t.Fatalf("bad")
  1770  	}
  1771  	if s, _ := r2.Superset(r2); !s {
  1772  		t.Fatalf("bad")
  1773  	}
  1774  }
  1775  
  1776  func TestResource_Add(t *testing.T) {
  1777  	r1 := &Resources{
  1778  		CPU:      2000,
  1779  		MemoryMB: 2048,
  1780  		DiskMB:   10000,
  1781  		IOPS:     100,
  1782  		Networks: []*NetworkResource{
  1783  			{
  1784  				CIDR:          "10.0.0.0/8",
  1785  				MBits:         100,
  1786  				ReservedPorts: []Port{{"ssh", 22}},
  1787  			},
  1788  		},
  1789  	}
  1790  	r2 := &Resources{
  1791  		CPU:      2000,
  1792  		MemoryMB: 1024,
  1793  		DiskMB:   5000,
  1794  		IOPS:     50,
  1795  		Networks: []*NetworkResource{
  1796  			{
  1797  				IP:            "10.0.0.1",
  1798  				MBits:         50,
  1799  				ReservedPorts: []Port{{"web", 80}},
  1800  			},
  1801  		},
  1802  	}
  1803  
  1804  	err := r1.Add(r2)
  1805  	if err != nil {
  1806  		t.Fatalf("Err: %v", err)
  1807  	}
  1808  
  1809  	expect := &Resources{
  1810  		CPU:      3000,
  1811  		MemoryMB: 3072,
  1812  		DiskMB:   15000,
  1813  		IOPS:     150,
  1814  		Networks: []*NetworkResource{
  1815  			{
  1816  				CIDR:          "10.0.0.0/8",
  1817  				MBits:         150,
  1818  				ReservedPorts: []Port{{"ssh", 22}, {"web", 80}},
  1819  			},
  1820  		},
  1821  	}
  1822  
  1823  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  1824  		t.Fatalf("bad: %#v %#v", expect, r1)
  1825  	}
  1826  }
  1827  
  1828  func TestResource_Add_Network(t *testing.T) {
  1829  	r1 := &Resources{}
  1830  	r2 := &Resources{
  1831  		Networks: []*NetworkResource{
  1832  			{
  1833  				MBits:        50,
  1834  				DynamicPorts: []Port{{"http", 0}, {"https", 0}},
  1835  			},
  1836  		},
  1837  	}
  1838  	r3 := &Resources{
  1839  		Networks: []*NetworkResource{
  1840  			{
  1841  				MBits:        25,
  1842  				DynamicPorts: []Port{{"admin", 0}},
  1843  			},
  1844  		},
  1845  	}
  1846  
  1847  	err := r1.Add(r2)
  1848  	if err != nil {
  1849  		t.Fatalf("Err: %v", err)
  1850  	}
  1851  	err = r1.Add(r3)
  1852  	if err != nil {
  1853  		t.Fatalf("Err: %v", err)
  1854  	}
  1855  
  1856  	expect := &Resources{
  1857  		Networks: []*NetworkResource{
  1858  			{
  1859  				MBits:        75,
  1860  				DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
  1861  			},
  1862  		},
  1863  	}
  1864  
  1865  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  1866  		t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0])
  1867  	}
  1868  }
  1869  
  1870  func TestEncodeDecode(t *testing.T) {
  1871  	type FooRequest struct {
  1872  		Foo string
  1873  		Bar int
  1874  		Baz bool
  1875  	}
  1876  	arg := &FooRequest{
  1877  		Foo: "test",
  1878  		Bar: 42,
  1879  		Baz: true,
  1880  	}
  1881  	buf, err := Encode(1, arg)
  1882  	if err != nil {
  1883  		t.Fatalf("err: %v", err)
  1884  	}
  1885  
  1886  	var out FooRequest
  1887  	err = Decode(buf[1:], &out)
  1888  	if err != nil {
  1889  		t.Fatalf("err: %v", err)
  1890  	}
  1891  
  1892  	if !reflect.DeepEqual(arg, &out) {
  1893  		t.Fatalf("bad: %#v %#v", arg, out)
  1894  	}
  1895  }
  1896  
  1897  func BenchmarkEncodeDecode(b *testing.B) {
  1898  	job := testJob()
  1899  
  1900  	for i := 0; i < b.N; i++ {
  1901  		buf, err := Encode(1, job)
  1902  		if err != nil {
  1903  			b.Fatalf("err: %v", err)
  1904  		}
  1905  
  1906  		var out Job
  1907  		err = Decode(buf[1:], &out)
  1908  		if err != nil {
  1909  			b.Fatalf("err: %v", err)
  1910  		}
  1911  	}
  1912  }
  1913  
  1914  func TestInvalidServiceCheck(t *testing.T) {
  1915  	s := Service{
  1916  		Name:      "service-name",
  1917  		PortLabel: "bar",
  1918  		Checks: []*ServiceCheck{
  1919  			{
  1920  				Name: "check-name",
  1921  				Type: "lol",
  1922  			},
  1923  		},
  1924  	}
  1925  	if err := s.Validate(); err == nil {
  1926  		t.Fatalf("Service should be invalid (invalid type)")
  1927  	}
  1928  
  1929  	s = Service{
  1930  		Name:      "service.name",
  1931  		PortLabel: "bar",
  1932  	}
  1933  	if err := s.ValidateName(s.Name); err == nil {
  1934  		t.Fatalf("Service should be invalid (contains a dot): %v", err)
  1935  	}
  1936  
  1937  	s = Service{
  1938  		Name:      "-my-service",
  1939  		PortLabel: "bar",
  1940  	}
  1941  	if err := s.Validate(); err == nil {
  1942  		t.Fatalf("Service should be invalid (begins with a hyphen): %v", err)
  1943  	}
  1944  
  1945  	s = Service{
  1946  		Name:      "my-service-${NOMAD_META_FOO}",
  1947  		PortLabel: "bar",
  1948  	}
  1949  	if err := s.Validate(); err != nil {
  1950  		t.Fatalf("Service should be valid: %v", err)
  1951  	}
  1952  
  1953  	s = Service{
  1954  		Name:      "my_service-${NOMAD_META_FOO}",
  1955  		PortLabel: "bar",
  1956  	}
  1957  	if err := s.Validate(); err == nil {
  1958  		t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err)
  1959  	}
  1960  
  1961  	s = Service{
  1962  		Name:      "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456",
  1963  		PortLabel: "bar",
  1964  	}
  1965  	if err := s.ValidateName(s.Name); err == nil {
  1966  		t.Fatalf("Service should be invalid (too long): %v", err)
  1967  	}
  1968  
  1969  	s = Service{
  1970  		Name: "service-name",
  1971  		Checks: []*ServiceCheck{
  1972  			{
  1973  				Name:     "check-tcp",
  1974  				Type:     ServiceCheckTCP,
  1975  				Interval: 5 * time.Second,
  1976  				Timeout:  2 * time.Second,
  1977  			},
  1978  			{
  1979  				Name:     "check-http",
  1980  				Type:     ServiceCheckHTTP,
  1981  				Path:     "/foo",
  1982  				Interval: 5 * time.Second,
  1983  				Timeout:  2 * time.Second,
  1984  			},
  1985  		},
  1986  	}
  1987  	if err := s.Validate(); err == nil {
  1988  		t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err)
  1989  	}
  1990  
  1991  	s = Service{
  1992  		Name: "service-name",
  1993  		Checks: []*ServiceCheck{
  1994  			{
  1995  				Name:     "check-script",
  1996  				Type:     ServiceCheckScript,
  1997  				Command:  "/bin/date",
  1998  				Interval: 5 * time.Second,
  1999  				Timeout:  2 * time.Second,
  2000  			},
  2001  		},
  2002  	}
  2003  	if err := s.Validate(); err != nil {
  2004  		t.Fatalf("un-expected error: %v", err)
  2005  	}
  2006  }
  2007  
  2008  func TestDistinctCheckID(t *testing.T) {
  2009  	c1 := ServiceCheck{
  2010  		Name:     "web-health",
  2011  		Type:     "http",
  2012  		Path:     "/health",
  2013  		Interval: 2 * time.Second,
  2014  		Timeout:  3 * time.Second,
  2015  	}
  2016  	c2 := ServiceCheck{
  2017  		Name:     "web-health",
  2018  		Type:     "http",
  2019  		Path:     "/health1",
  2020  		Interval: 2 * time.Second,
  2021  		Timeout:  3 * time.Second,
  2022  	}
  2023  
  2024  	c3 := ServiceCheck{
  2025  		Name:     "web-health",
  2026  		Type:     "http",
  2027  		Path:     "/health",
  2028  		Interval: 4 * time.Second,
  2029  		Timeout:  3 * time.Second,
  2030  	}
  2031  	serviceID := "123"
  2032  	c1Hash := c1.Hash(serviceID)
  2033  	c2Hash := c2.Hash(serviceID)
  2034  	c3Hash := c3.Hash(serviceID)
  2035  
  2036  	if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash {
  2037  		t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash)
  2038  	}
  2039  
  2040  }
  2041  
  2042  func TestService_Canonicalize(t *testing.T) {
  2043  	job := "example"
  2044  	taskGroup := "cache"
  2045  	task := "redis"
  2046  
  2047  	s := Service{
  2048  		Name: "${TASK}-db",
  2049  	}
  2050  
  2051  	s.Canonicalize(job, taskGroup, task)
  2052  	if s.Name != "redis-db" {
  2053  		t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name)
  2054  	}
  2055  
  2056  	s.Name = "db"
  2057  	s.Canonicalize(job, taskGroup, task)
  2058  	if s.Name != "db" {
  2059  		t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name)
  2060  	}
  2061  
  2062  	s.Name = "${JOB}-${TASKGROUP}-${TASK}-db"
  2063  	s.Canonicalize(job, taskGroup, task)
  2064  	if s.Name != "example-cache-redis-db" {
  2065  		t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name)
  2066  	}
  2067  
  2068  	s.Name = "${BASE}-db"
  2069  	s.Canonicalize(job, taskGroup, task)
  2070  	if s.Name != "example-cache-redis-db" {
  2071  		t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name)
  2072  	}
  2073  
  2074  }
  2075  
  2076  func TestJob_ExpandServiceNames(t *testing.T) {
  2077  	j := &Job{
  2078  		Name: "my-job",
  2079  		TaskGroups: []*TaskGroup{
  2080  			{
  2081  				Name: "web",
  2082  				Tasks: []*Task{
  2083  					{
  2084  						Name: "frontend",
  2085  						Services: []*Service{
  2086  							{
  2087  								Name: "${BASE}-default",
  2088  							},
  2089  							{
  2090  								Name: "jmx",
  2091  							},
  2092  						},
  2093  					},
  2094  				},
  2095  			},
  2096  			{
  2097  				Name: "admin",
  2098  				Tasks: []*Task{
  2099  					{
  2100  						Name: "admin-web",
  2101  					},
  2102  				},
  2103  			},
  2104  		},
  2105  	}
  2106  
  2107  	j.Canonicalize()
  2108  
  2109  	service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name
  2110  	if service1Name != "my-job-web-frontend-default" {
  2111  		t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name)
  2112  	}
  2113  
  2114  	service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name
  2115  	if service2Name != "jmx" {
  2116  		t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name)
  2117  	}
  2118  
  2119  }
  2120  
  2121  func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
  2122  	// Create a config that is enabled but with no interval specified.
  2123  	p := &PeriodicConfig{Enabled: true}
  2124  	if err := p.Validate(); err == nil {
  2125  		t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid")
  2126  	}
  2127  
  2128  	// Create a config that is enabled, with a spec but no type specified.
  2129  	p = &PeriodicConfig{Enabled: true, Spec: "foo"}
  2130  	if err := p.Validate(); err == nil {
  2131  		t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid")
  2132  	}
  2133  
  2134  	// Create a config that is enabled, with a spec type but no spec specified.
  2135  	p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron}
  2136  	if err := p.Validate(); err == nil {
  2137  		t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid")
  2138  	}
  2139  
  2140  	// Create a config that is enabled, with a bad time zone.
  2141  	p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
  2142  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
  2143  		t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
  2144  	}
  2145  }
  2146  
  2147  func TestPeriodicConfig_InvalidCron(t *testing.T) {
  2148  	specs := []string{"foo", "* *", "@foo"}
  2149  	for _, spec := range specs {
  2150  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  2151  		p.Canonicalize()
  2152  		if err := p.Validate(); err == nil {
  2153  			t.Fatal("Invalid cron spec")
  2154  		}
  2155  	}
  2156  }
  2157  
  2158  func TestPeriodicConfig_ValidCron(t *testing.T) {
  2159  	specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"}
  2160  	for _, spec := range specs {
  2161  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  2162  		p.Canonicalize()
  2163  		if err := p.Validate(); err != nil {
  2164  			t.Fatal("Passed valid cron")
  2165  		}
  2166  	}
  2167  }
  2168  
  2169  func TestPeriodicConfig_NextCron(t *testing.T) {
  2170  	require := require.New(t)
  2171  
  2172  	type testExpectation struct {
  2173  		Time     time.Time
  2174  		HasError bool
  2175  		ErrorMsg string
  2176  	}
  2177  
  2178  	from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC)
  2179  	specs := []string{"0 0 29 2 * 1980",
  2180  		"*/5 * * * *",
  2181  		"1 15-0 * * 1-5"}
  2182  	expected := []*testExpectation{
  2183  		{
  2184  			Time:     time.Time{},
  2185  			HasError: false,
  2186  		},
  2187  		{
  2188  			Time:     time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC),
  2189  			HasError: false,
  2190  		},
  2191  		{
  2192  			Time:     time.Time{},
  2193  			HasError: true,
  2194  			ErrorMsg: "failed parsing cron expression",
  2195  		},
  2196  	}
  2197  
  2198  	for i, spec := range specs {
  2199  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  2200  		p.Canonicalize()
  2201  		n, err := p.Next(from)
  2202  		nextExpected := expected[i]
  2203  
  2204  		require.Equal(nextExpected.Time, n)
  2205  		require.Equal(err != nil, nextExpected.HasError)
  2206  		if err != nil {
  2207  			require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg))
  2208  		}
  2209  	}
  2210  }
  2211  
  2212  func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
  2213  	zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"}
  2214  	for _, zone := range zones {
  2215  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
  2216  		p.Canonicalize()
  2217  		if err := p.Validate(); err != nil {
  2218  			t.Fatalf("Valid tz errored: %v", err)
  2219  		}
  2220  	}
  2221  }
  2222  
  2223  func TestPeriodicConfig_DST(t *testing.T) {
  2224  	require := require.New(t)
  2225  
  2226  	// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
  2227  	p := &PeriodicConfig{
  2228  		Enabled:  true,
  2229  		SpecType: PeriodicSpecCron,
  2230  		Spec:     "0 2 11-12 3 * 2017",
  2231  		TimeZone: "America/Los_Angeles",
  2232  	}
  2233  	p.Canonicalize()
  2234  
  2235  	t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location)
  2236  	t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location)
  2237  
  2238  	// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
  2239  	e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC)
  2240  	e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC)
  2241  
  2242  	n1, err := p.Next(t1)
  2243  	require.Nil(err)
  2244  
  2245  	n2, err := p.Next(t2)
  2246  	require.Nil(err)
  2247  
  2248  	require.Equal(e1, n1.UTC())
  2249  	require.Equal(e2, n2.UTC())
  2250  }
  2251  
  2252  func TestRestartPolicy_Validate(t *testing.T) {
  2253  	// Policy with acceptable restart options passes
  2254  	p := &RestartPolicy{
  2255  		Mode:     RestartPolicyModeFail,
  2256  		Attempts: 0,
  2257  		Interval: 5 * time.Second,
  2258  	}
  2259  	if err := p.Validate(); err != nil {
  2260  		t.Fatalf("err: %v", err)
  2261  	}
  2262  
  2263  	// Policy with ambiguous restart options fails
  2264  	p = &RestartPolicy{
  2265  		Mode:     RestartPolicyModeDelay,
  2266  		Attempts: 0,
  2267  		Interval: 5 * time.Second,
  2268  	}
  2269  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") {
  2270  		t.Fatalf("expect ambiguity error, got: %v", err)
  2271  	}
  2272  
  2273  	// Bad policy mode fails
  2274  	p = &RestartPolicy{
  2275  		Mode:     "nope",
  2276  		Attempts: 1,
  2277  		Interval: 5 * time.Second,
  2278  	}
  2279  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") {
  2280  		t.Fatalf("expect mode error, got: %v", err)
  2281  	}
  2282  
  2283  	// Fails when attempts*delay does not fit inside interval
  2284  	p = &RestartPolicy{
  2285  		Mode:     RestartPolicyModeDelay,
  2286  		Attempts: 3,
  2287  		Delay:    5 * time.Second,
  2288  		Interval: 5 * time.Second,
  2289  	}
  2290  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") {
  2291  		t.Fatalf("expect restart interval error, got: %v", err)
  2292  	}
  2293  
  2294  	// Fails when interval is to small
  2295  	p = &RestartPolicy{
  2296  		Mode:     RestartPolicyModeDelay,
  2297  		Attempts: 3,
  2298  		Delay:    5 * time.Second,
  2299  		Interval: 2 * time.Second,
  2300  	}
  2301  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") {
  2302  		t.Fatalf("expect interval too small error, got: %v", err)
  2303  	}
  2304  }
  2305  
  2306  func TestReschedulePolicy_Validate(t *testing.T) {
  2307  	type testCase struct {
  2308  		desc             string
  2309  		ReschedulePolicy *ReschedulePolicy
  2310  		errors           []error
  2311  	}
  2312  
  2313  	testCases := []testCase{
  2314  		{
  2315  			desc: "Nil",
  2316  		},
  2317  		{
  2318  			desc: "Disabled",
  2319  			ReschedulePolicy: &ReschedulePolicy{
  2320  				Attempts: 0,
  2321  				Interval: 0 * time.Second},
  2322  		},
  2323  		{
  2324  			desc: "Disabled",
  2325  			ReschedulePolicy: &ReschedulePolicy{
  2326  				Attempts: -1,
  2327  				Interval: 5 * time.Minute},
  2328  		},
  2329  		{
  2330  			desc: "Valid Linear Delay",
  2331  			ReschedulePolicy: &ReschedulePolicy{
  2332  				Attempts:      1,
  2333  				Interval:      5 * time.Minute,
  2334  				Delay:         10 * time.Second,
  2335  				DelayFunction: "constant"},
  2336  		},
  2337  		{
  2338  			desc: "Valid Exponential Delay",
  2339  			ReschedulePolicy: &ReschedulePolicy{
  2340  				Attempts:      5,
  2341  				Interval:      1 * time.Hour,
  2342  				Delay:         30 * time.Second,
  2343  				MaxDelay:      5 * time.Minute,
  2344  				DelayFunction: "exponential"},
  2345  		},
  2346  		{
  2347  			desc: "Valid Fibonacci Delay",
  2348  			ReschedulePolicy: &ReschedulePolicy{
  2349  				Attempts:      5,
  2350  				Interval:      15 * time.Minute,
  2351  				Delay:         10 * time.Second,
  2352  				MaxDelay:      5 * time.Minute,
  2353  				DelayFunction: "fibonacci"},
  2354  		},
  2355  		{
  2356  			desc: "Invalid delay function",
  2357  			ReschedulePolicy: &ReschedulePolicy{
  2358  				Attempts:      1,
  2359  				Interval:      1 * time.Second,
  2360  				DelayFunction: "blah"},
  2361  			errors: []error{
  2362  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  2363  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2364  				fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions),
  2365  			},
  2366  		},
  2367  		{
  2368  			desc: "Invalid delay ceiling",
  2369  			ReschedulePolicy: &ReschedulePolicy{
  2370  				Attempts:      1,
  2371  				Interval:      8 * time.Second,
  2372  				DelayFunction: "exponential",
  2373  				Delay:         15 * time.Second,
  2374  				MaxDelay:      5 * time.Second},
  2375  			errors: []error{
  2376  				fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)",
  2377  					15*time.Second, 5*time.Second),
  2378  			},
  2379  		},
  2380  		{
  2381  			desc: "Invalid delay and interval",
  2382  			ReschedulePolicy: &ReschedulePolicy{
  2383  				Attempts:      1,
  2384  				Interval:      1 * time.Second,
  2385  				DelayFunction: "constant"},
  2386  			errors: []error{
  2387  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  2388  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2389  			},
  2390  		}, {
  2391  			// Should suggest 2h40m as the interval
  2392  			desc: "Invalid Attempts - linear delay",
  2393  			ReschedulePolicy: &ReschedulePolicy{
  2394  				Attempts:      10,
  2395  				Interval:      1 * time.Hour,
  2396  				Delay:         20 * time.Minute,
  2397  				DelayFunction: "constant",
  2398  			},
  2399  			errors: []error{
  2400  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+
  2401  					" delay function %q", 3, time.Hour, 20*time.Minute, "constant"),
  2402  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  2403  					200*time.Minute, 10),
  2404  			},
  2405  		},
  2406  		{
  2407  			// Should suggest 4h40m as the interval
  2408  			// Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40}
  2409  			desc: "Invalid Attempts - exponential delay",
  2410  			ReschedulePolicy: &ReschedulePolicy{
  2411  				Attempts:      10,
  2412  				Interval:      30 * time.Minute,
  2413  				Delay:         5 * time.Minute,
  2414  				MaxDelay:      40 * time.Minute,
  2415  				DelayFunction: "exponential",
  2416  			},
  2417  			errors: []error{
  2418  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  2419  					"delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute,
  2420  					"exponential", 40*time.Minute),
  2421  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  2422  					280*time.Minute, 10),
  2423  			},
  2424  		},
  2425  		{
  2426  			// Should suggest 8h as the interval
  2427  			// Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80}
  2428  			desc: "Invalid Attempts - fibonacci delay",
  2429  			ReschedulePolicy: &ReschedulePolicy{
  2430  				Attempts:      10,
  2431  				Interval:      1 * time.Hour,
  2432  				Delay:         20 * time.Minute,
  2433  				MaxDelay:      80 * time.Minute,
  2434  				DelayFunction: "fibonacci",
  2435  			},
  2436  			errors: []error{
  2437  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  2438  					"delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute,
  2439  					"fibonacci", 80*time.Minute),
  2440  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  2441  					480*time.Minute, 10),
  2442  			},
  2443  		},
  2444  		{
  2445  			desc: "Ambiguous Unlimited config, has both attempts and unlimited set",
  2446  			ReschedulePolicy: &ReschedulePolicy{
  2447  				Attempts:      1,
  2448  				Unlimited:     true,
  2449  				DelayFunction: "exponential",
  2450  				Delay:         5 * time.Minute,
  2451  				MaxDelay:      1 * time.Hour,
  2452  			},
  2453  			errors: []error{
  2454  				fmt.Errorf("Interval must be a non zero value if Attempts > 0"),
  2455  				fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true),
  2456  			},
  2457  		},
  2458  		{
  2459  			desc: "Invalid Unlimited config",
  2460  			ReschedulePolicy: &ReschedulePolicy{
  2461  				Attempts:      1,
  2462  				Interval:      1 * time.Second,
  2463  				Unlimited:     true,
  2464  				DelayFunction: "exponential",
  2465  			},
  2466  			errors: []error{
  2467  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2468  				fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2469  			},
  2470  		},
  2471  		{
  2472  			desc: "Valid Unlimited config",
  2473  			ReschedulePolicy: &ReschedulePolicy{
  2474  				Unlimited:     true,
  2475  				DelayFunction: "exponential",
  2476  				Delay:         5 * time.Second,
  2477  				MaxDelay:      1 * time.Hour,
  2478  			},
  2479  		},
  2480  	}
  2481  
  2482  	for _, tc := range testCases {
  2483  		t.Run(tc.desc, func(t *testing.T) {
  2484  			require := require.New(t)
  2485  			gotErr := tc.ReschedulePolicy.Validate()
  2486  			if tc.errors != nil {
  2487  				// Validate all errors
  2488  				for _, err := range tc.errors {
  2489  					require.Contains(gotErr.Error(), err.Error())
  2490  				}
  2491  			} else {
  2492  				require.Nil(gotErr)
  2493  			}
  2494  		})
  2495  	}
  2496  }
  2497  
  2498  func TestAllocation_Index(t *testing.T) {
  2499  	a1 := Allocation{
  2500  		Name:      "example.cache[1]",
  2501  		TaskGroup: "cache",
  2502  		JobID:     "example",
  2503  		Job: &Job{
  2504  			ID:         "example",
  2505  			TaskGroups: []*TaskGroup{{Name: "cache"}}},
  2506  	}
  2507  	e1 := uint(1)
  2508  	a2 := a1.Copy()
  2509  	a2.Name = "example.cache[713127]"
  2510  	e2 := uint(713127)
  2511  
  2512  	if a1.Index() != e1 || a2.Index() != e2 {
  2513  		t.Fatalf("Got %d and %d", a1.Index(), a2.Index())
  2514  	}
  2515  }
  2516  
  2517  func TestTaskArtifact_Validate_Source(t *testing.T) {
  2518  	valid := &TaskArtifact{GetterSource: "google.com"}
  2519  	if err := valid.Validate(); err != nil {
  2520  		t.Fatalf("unexpected error: %v", err)
  2521  	}
  2522  }
  2523  
  2524  func TestTaskArtifact_Validate_Dest(t *testing.T) {
  2525  	valid := &TaskArtifact{GetterSource: "google.com"}
  2526  	if err := valid.Validate(); err != nil {
  2527  		t.Fatalf("unexpected error: %v", err)
  2528  	}
  2529  
  2530  	valid.RelativeDest = "local/"
  2531  	if err := valid.Validate(); err != nil {
  2532  		t.Fatalf("unexpected error: %v", err)
  2533  	}
  2534  
  2535  	valid.RelativeDest = "local/.."
  2536  	if err := valid.Validate(); err != nil {
  2537  		t.Fatalf("unexpected error: %v", err)
  2538  	}
  2539  
  2540  	valid.RelativeDest = "local/../../.."
  2541  	if err := valid.Validate(); err == nil {
  2542  		t.Fatalf("expected error: %v", err)
  2543  	}
  2544  }
  2545  
  2546  func TestAllocation_ShouldMigrate(t *testing.T) {
  2547  	alloc := Allocation{
  2548  		PreviousAllocation: "123",
  2549  		TaskGroup:          "foo",
  2550  		Job: &Job{
  2551  			TaskGroups: []*TaskGroup{
  2552  				{
  2553  					Name: "foo",
  2554  					EphemeralDisk: &EphemeralDisk{
  2555  						Migrate: true,
  2556  						Sticky:  true,
  2557  					},
  2558  				},
  2559  			},
  2560  		},
  2561  	}
  2562  
  2563  	if !alloc.ShouldMigrate() {
  2564  		t.Fatalf("bad: %v", alloc)
  2565  	}
  2566  
  2567  	alloc1 := Allocation{
  2568  		PreviousAllocation: "123",
  2569  		TaskGroup:          "foo",
  2570  		Job: &Job{
  2571  			TaskGroups: []*TaskGroup{
  2572  				{
  2573  					Name:          "foo",
  2574  					EphemeralDisk: &EphemeralDisk{},
  2575  				},
  2576  			},
  2577  		},
  2578  	}
  2579  
  2580  	if alloc1.ShouldMigrate() {
  2581  		t.Fatalf("bad: %v", alloc)
  2582  	}
  2583  
  2584  	alloc2 := Allocation{
  2585  		PreviousAllocation: "123",
  2586  		TaskGroup:          "foo",
  2587  		Job: &Job{
  2588  			TaskGroups: []*TaskGroup{
  2589  				{
  2590  					Name: "foo",
  2591  					EphemeralDisk: &EphemeralDisk{
  2592  						Sticky:  false,
  2593  						Migrate: true,
  2594  					},
  2595  				},
  2596  			},
  2597  		},
  2598  	}
  2599  
  2600  	if alloc2.ShouldMigrate() {
  2601  		t.Fatalf("bad: %v", alloc)
  2602  	}
  2603  
  2604  	alloc3 := Allocation{
  2605  		PreviousAllocation: "123",
  2606  		TaskGroup:          "foo",
  2607  		Job: &Job{
  2608  			TaskGroups: []*TaskGroup{
  2609  				{
  2610  					Name: "foo",
  2611  				},
  2612  			},
  2613  		},
  2614  	}
  2615  
  2616  	if alloc3.ShouldMigrate() {
  2617  		t.Fatalf("bad: %v", alloc)
  2618  	}
  2619  
  2620  	// No previous
  2621  	alloc4 := Allocation{
  2622  		TaskGroup: "foo",
  2623  		Job: &Job{
  2624  			TaskGroups: []*TaskGroup{
  2625  				{
  2626  					Name: "foo",
  2627  					EphemeralDisk: &EphemeralDisk{
  2628  						Migrate: true,
  2629  						Sticky:  true,
  2630  					},
  2631  				},
  2632  			},
  2633  		},
  2634  	}
  2635  
  2636  	if alloc4.ShouldMigrate() {
  2637  		t.Fatalf("bad: %v", alloc4)
  2638  	}
  2639  }
  2640  
  2641  func TestTaskArtifact_Validate_Checksum(t *testing.T) {
  2642  	cases := []struct {
  2643  		Input *TaskArtifact
  2644  		Err   bool
  2645  	}{
  2646  		{
  2647  			&TaskArtifact{
  2648  				GetterSource: "foo.com",
  2649  				GetterOptions: map[string]string{
  2650  					"checksum": "no-type",
  2651  				},
  2652  			},
  2653  			true,
  2654  		},
  2655  		{
  2656  			&TaskArtifact{
  2657  				GetterSource: "foo.com",
  2658  				GetterOptions: map[string]string{
  2659  					"checksum": "md5:toosmall",
  2660  				},
  2661  			},
  2662  			true,
  2663  		},
  2664  		{
  2665  			&TaskArtifact{
  2666  				GetterSource: "foo.com",
  2667  				GetterOptions: map[string]string{
  2668  					"checksum": "invalid:type",
  2669  				},
  2670  			},
  2671  			true,
  2672  		},
  2673  	}
  2674  
  2675  	for i, tc := range cases {
  2676  		err := tc.Input.Validate()
  2677  		if (err != nil) != tc.Err {
  2678  			t.Fatalf("case %d: %v", i, err)
  2679  			continue
  2680  		}
  2681  	}
  2682  }
  2683  
  2684  func TestAllocation_Terminated(t *testing.T) {
  2685  	type desiredState struct {
  2686  		ClientStatus  string
  2687  		DesiredStatus string
  2688  		Terminated    bool
  2689  	}
  2690  
  2691  	harness := []desiredState{
  2692  		{
  2693  			ClientStatus:  AllocClientStatusPending,
  2694  			DesiredStatus: AllocDesiredStatusStop,
  2695  			Terminated:    false,
  2696  		},
  2697  		{
  2698  			ClientStatus:  AllocClientStatusRunning,
  2699  			DesiredStatus: AllocDesiredStatusStop,
  2700  			Terminated:    false,
  2701  		},
  2702  		{
  2703  			ClientStatus:  AllocClientStatusFailed,
  2704  			DesiredStatus: AllocDesiredStatusStop,
  2705  			Terminated:    true,
  2706  		},
  2707  		{
  2708  			ClientStatus:  AllocClientStatusFailed,
  2709  			DesiredStatus: AllocDesiredStatusRun,
  2710  			Terminated:    true,
  2711  		},
  2712  	}
  2713  
  2714  	for _, state := range harness {
  2715  		alloc := Allocation{}
  2716  		alloc.DesiredStatus = state.DesiredStatus
  2717  		alloc.ClientStatus = state.ClientStatus
  2718  		if alloc.Terminated() != state.Terminated {
  2719  			t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated())
  2720  		}
  2721  	}
  2722  }
  2723  
  2724  func TestAllocation_ShouldReschedule(t *testing.T) {
  2725  	type testCase struct {
  2726  		Desc               string
  2727  		FailTime           time.Time
  2728  		ClientStatus       string
  2729  		DesiredStatus      string
  2730  		ReschedulePolicy   *ReschedulePolicy
  2731  		RescheduleTrackers []*RescheduleEvent
  2732  		ShouldReschedule   bool
  2733  	}
  2734  
  2735  	fail := time.Now()
  2736  
  2737  	harness := []testCase{
  2738  		{
  2739  			Desc:             "Reschedule when desired state is stop",
  2740  			ClientStatus:     AllocClientStatusPending,
  2741  			DesiredStatus:    AllocDesiredStatusStop,
  2742  			FailTime:         fail,
  2743  			ReschedulePolicy: nil,
  2744  			ShouldReschedule: false,
  2745  		},
  2746  		{
  2747  			Desc:             "Disabled rescheduling",
  2748  			ClientStatus:     AllocClientStatusFailed,
  2749  			DesiredStatus:    AllocDesiredStatusRun,
  2750  			FailTime:         fail,
  2751  			ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute},
  2752  			ShouldReschedule: false,
  2753  		},
  2754  		{
  2755  			Desc:             "Reschedule when client status is complete",
  2756  			ClientStatus:     AllocClientStatusComplete,
  2757  			DesiredStatus:    AllocDesiredStatusRun,
  2758  			FailTime:         fail,
  2759  			ReschedulePolicy: nil,
  2760  			ShouldReschedule: false,
  2761  		},
  2762  		{
  2763  			Desc:             "Reschedule with nil reschedule policy",
  2764  			ClientStatus:     AllocClientStatusFailed,
  2765  			DesiredStatus:    AllocDesiredStatusRun,
  2766  			FailTime:         fail,
  2767  			ReschedulePolicy: nil,
  2768  			ShouldReschedule: false,
  2769  		},
  2770  		{
  2771  			Desc:             "Reschedule with unlimited and attempts >0",
  2772  			ClientStatus:     AllocClientStatusFailed,
  2773  			DesiredStatus:    AllocDesiredStatusRun,
  2774  			FailTime:         fail,
  2775  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true},
  2776  			ShouldReschedule: true,
  2777  		},
  2778  		{
  2779  			Desc:             "Reschedule when client status is complete",
  2780  			ClientStatus:     AllocClientStatusComplete,
  2781  			DesiredStatus:    AllocDesiredStatusRun,
  2782  			FailTime:         fail,
  2783  			ReschedulePolicy: nil,
  2784  			ShouldReschedule: false,
  2785  		},
  2786  		{
  2787  			Desc:             "Reschedule with policy when client status complete",
  2788  			ClientStatus:     AllocClientStatusComplete,
  2789  			DesiredStatus:    AllocDesiredStatusRun,
  2790  			FailTime:         fail,
  2791  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  2792  			ShouldReschedule: false,
  2793  		},
  2794  		{
  2795  			Desc:             "Reschedule with no previous attempts",
  2796  			ClientStatus:     AllocClientStatusFailed,
  2797  			DesiredStatus:    AllocDesiredStatusRun,
  2798  			FailTime:         fail,
  2799  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  2800  			ShouldReschedule: true,
  2801  		},
  2802  		{
  2803  			Desc:             "Reschedule with leftover attempts",
  2804  			ClientStatus:     AllocClientStatusFailed,
  2805  			DesiredStatus:    AllocDesiredStatusRun,
  2806  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  2807  			FailTime:         fail,
  2808  			RescheduleTrackers: []*RescheduleEvent{
  2809  				{
  2810  					RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(),
  2811  				},
  2812  			},
  2813  			ShouldReschedule: true,
  2814  		},
  2815  		{
  2816  			Desc:             "Reschedule with too old previous attempts",
  2817  			ClientStatus:     AllocClientStatusFailed,
  2818  			DesiredStatus:    AllocDesiredStatusRun,
  2819  			FailTime:         fail,
  2820  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute},
  2821  			RescheduleTrackers: []*RescheduleEvent{
  2822  				{
  2823  					RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(),
  2824  				},
  2825  			},
  2826  			ShouldReschedule: true,
  2827  		},
  2828  		{
  2829  			Desc:             "Reschedule with no leftover attempts",
  2830  			ClientStatus:     AllocClientStatusFailed,
  2831  			DesiredStatus:    AllocDesiredStatusRun,
  2832  			FailTime:         fail,
  2833  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  2834  			RescheduleTrackers: []*RescheduleEvent{
  2835  				{
  2836  					RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
  2837  				},
  2838  				{
  2839  					RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(),
  2840  				},
  2841  			},
  2842  			ShouldReschedule: false,
  2843  		},
  2844  	}
  2845  
  2846  	for _, state := range harness {
  2847  		alloc := Allocation{}
  2848  		alloc.DesiredStatus = state.DesiredStatus
  2849  		alloc.ClientStatus = state.ClientStatus
  2850  		alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers}
  2851  
  2852  		t.Run(state.Desc, func(t *testing.T) {
  2853  			if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule {
  2854  				t.Fatalf("expected %v but got %v", state.ShouldReschedule, got)
  2855  			}
  2856  		})
  2857  
  2858  	}
  2859  }
  2860  
  2861  func TestAllocation_LastEventTime(t *testing.T) {
  2862  	type testCase struct {
  2863  		desc                  string
  2864  		taskState             map[string]*TaskState
  2865  		expectedLastEventTime time.Time
  2866  	}
  2867  
  2868  	t1 := time.Now().UTC()
  2869  
  2870  	testCases := []testCase{
  2871  		{
  2872  			desc:                  "nil task state",
  2873  			expectedLastEventTime: t1,
  2874  		},
  2875  		{
  2876  			desc:                  "empty task state",
  2877  			taskState:             make(map[string]*TaskState),
  2878  			expectedLastEventTime: t1,
  2879  		},
  2880  		{
  2881  			desc: "Finished At not set",
  2882  			taskState: map[string]*TaskState{"foo": {State: "start",
  2883  				StartedAt: t1.Add(-2 * time.Hour)}},
  2884  			expectedLastEventTime: t1,
  2885  		},
  2886  		{
  2887  			desc: "One finished ",
  2888  			taskState: map[string]*TaskState{"foo": {State: "start",
  2889  				StartedAt:  t1.Add(-2 * time.Hour),
  2890  				FinishedAt: t1.Add(-1 * time.Hour)}},
  2891  			expectedLastEventTime: t1.Add(-1 * time.Hour),
  2892  		},
  2893  		{
  2894  			desc: "Multiple task groups",
  2895  			taskState: map[string]*TaskState{"foo": {State: "start",
  2896  				StartedAt:  t1.Add(-2 * time.Hour),
  2897  				FinishedAt: t1.Add(-1 * time.Hour)},
  2898  				"bar": {State: "start",
  2899  					StartedAt:  t1.Add(-2 * time.Hour),
  2900  					FinishedAt: t1.Add(-40 * time.Minute)}},
  2901  			expectedLastEventTime: t1.Add(-40 * time.Minute),
  2902  		},
  2903  		{
  2904  			desc: "No finishedAt set, one task event, should use modify time",
  2905  			taskState: map[string]*TaskState{"foo": {
  2906  				State:     "run",
  2907  				StartedAt: t1.Add(-2 * time.Hour),
  2908  				Events: []*TaskEvent{
  2909  					{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
  2910  				}},
  2911  			},
  2912  			expectedLastEventTime: t1,
  2913  		},
  2914  	}
  2915  	for _, tc := range testCases {
  2916  		t.Run(tc.desc, func(t *testing.T) {
  2917  			alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()}
  2918  			alloc.TaskStates = tc.taskState
  2919  			require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime())
  2920  		})
  2921  	}
  2922  }
  2923  
  2924  func TestAllocation_NextDelay(t *testing.T) {
  2925  	type testCase struct {
  2926  		desc                       string
  2927  		reschedulePolicy           *ReschedulePolicy
  2928  		alloc                      *Allocation
  2929  		expectedRescheduleTime     time.Time
  2930  		expectedRescheduleEligible bool
  2931  	}
  2932  	now := time.Now()
  2933  	testCases := []testCase{
  2934  		{
  2935  			desc: "Allocation hasn't failed yet",
  2936  			reschedulePolicy: &ReschedulePolicy{
  2937  				DelayFunction: "constant",
  2938  				Delay:         5 * time.Second,
  2939  			},
  2940  			alloc:                      &Allocation{},
  2941  			expectedRescheduleTime:     time.Time{},
  2942  			expectedRescheduleEligible: false,
  2943  		},
  2944  		{
  2945  			desc: "Allocation lacks task state",
  2946  			reschedulePolicy: &ReschedulePolicy{
  2947  				DelayFunction: "constant",
  2948  				Delay:         5 * time.Second,
  2949  				Unlimited:     true,
  2950  			},
  2951  			alloc:                      &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
  2952  			expectedRescheduleTime:     now.UTC().Add(5 * time.Second),
  2953  			expectedRescheduleEligible: true,
  2954  		},
  2955  		{
  2956  			desc: "linear delay, unlimited restarts, no reschedule tracker",
  2957  			reschedulePolicy: &ReschedulePolicy{
  2958  				DelayFunction: "constant",
  2959  				Delay:         5 * time.Second,
  2960  				Unlimited:     true,
  2961  			},
  2962  			alloc: &Allocation{
  2963  				ClientStatus: AllocClientStatusFailed,
  2964  				TaskStates: map[string]*TaskState{"foo": {State: "dead",
  2965  					StartedAt:  now.Add(-1 * time.Hour),
  2966  					FinishedAt: now.Add(-2 * time.Second)}},
  2967  			},
  2968  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  2969  			expectedRescheduleEligible: true,
  2970  		},
  2971  		{
  2972  			desc: "linear delay with reschedule tracker",
  2973  			reschedulePolicy: &ReschedulePolicy{
  2974  				DelayFunction: "constant",
  2975  				Delay:         5 * time.Second,
  2976  				Interval:      10 * time.Minute,
  2977  				Attempts:      2,
  2978  			},
  2979  			alloc: &Allocation{
  2980  				ClientStatus: AllocClientStatusFailed,
  2981  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  2982  					StartedAt:  now.Add(-1 * time.Hour),
  2983  					FinishedAt: now.Add(-2 * time.Second)}},
  2984  				RescheduleTracker: &RescheduleTracker{
  2985  					Events: []*RescheduleEvent{{
  2986  						RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  2987  						Delay:          5 * time.Second,
  2988  					}},
  2989  				}},
  2990  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  2991  			expectedRescheduleEligible: true,
  2992  		},
  2993  		{
  2994  			desc: "linear delay with reschedule tracker, attempts exhausted",
  2995  			reschedulePolicy: &ReschedulePolicy{
  2996  				DelayFunction: "constant",
  2997  				Delay:         5 * time.Second,
  2998  				Interval:      10 * time.Minute,
  2999  				Attempts:      2,
  3000  			},
  3001  			alloc: &Allocation{
  3002  				ClientStatus: AllocClientStatusFailed,
  3003  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3004  					StartedAt:  now.Add(-1 * time.Hour),
  3005  					FinishedAt: now.Add(-2 * time.Second)}},
  3006  				RescheduleTracker: &RescheduleTracker{
  3007  					Events: []*RescheduleEvent{
  3008  						{
  3009  							RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
  3010  							Delay:          5 * time.Second,
  3011  						},
  3012  						{
  3013  							RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  3014  							Delay:          5 * time.Second,
  3015  						},
  3016  					},
  3017  				}},
  3018  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3019  			expectedRescheduleEligible: false,
  3020  		},
  3021  		{
  3022  			desc: "exponential delay - no reschedule tracker",
  3023  			reschedulePolicy: &ReschedulePolicy{
  3024  				DelayFunction: "exponential",
  3025  				Delay:         5 * time.Second,
  3026  				MaxDelay:      90 * time.Second,
  3027  				Unlimited:     true,
  3028  			},
  3029  			alloc: &Allocation{
  3030  				ClientStatus: AllocClientStatusFailed,
  3031  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3032  					StartedAt:  now.Add(-1 * time.Hour),
  3033  					FinishedAt: now.Add(-2 * time.Second)}},
  3034  			},
  3035  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3036  			expectedRescheduleEligible: true,
  3037  		},
  3038  		{
  3039  			desc: "exponential delay with reschedule tracker",
  3040  			reschedulePolicy: &ReschedulePolicy{
  3041  				DelayFunction: "exponential",
  3042  				Delay:         5 * time.Second,
  3043  				MaxDelay:      90 * time.Second,
  3044  				Unlimited:     true,
  3045  			},
  3046  			alloc: &Allocation{
  3047  				ClientStatus: AllocClientStatusFailed,
  3048  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3049  					StartedAt:  now.Add(-1 * time.Hour),
  3050  					FinishedAt: now.Add(-2 * time.Second)}},
  3051  				RescheduleTracker: &RescheduleTracker{
  3052  					Events: []*RescheduleEvent{
  3053  						{
  3054  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3055  							Delay:          5 * time.Second,
  3056  						},
  3057  						{
  3058  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3059  							Delay:          10 * time.Second,
  3060  						},
  3061  						{
  3062  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3063  							Delay:          20 * time.Second,
  3064  						},
  3065  					},
  3066  				}},
  3067  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  3068  			expectedRescheduleEligible: true,
  3069  		},
  3070  		{
  3071  			desc: "exponential delay with delay ceiling reached",
  3072  			reschedulePolicy: &ReschedulePolicy{
  3073  				DelayFunction: "exponential",
  3074  				Delay:         5 * time.Second,
  3075  				MaxDelay:      90 * time.Second,
  3076  				Unlimited:     true,
  3077  			},
  3078  			alloc: &Allocation{
  3079  				ClientStatus: AllocClientStatusFailed,
  3080  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3081  					StartedAt:  now.Add(-1 * time.Hour),
  3082  					FinishedAt: now.Add(-15 * time.Second)}},
  3083  				RescheduleTracker: &RescheduleTracker{
  3084  					Events: []*RescheduleEvent{
  3085  						{
  3086  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3087  							Delay:          5 * time.Second,
  3088  						},
  3089  						{
  3090  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3091  							Delay:          10 * time.Second,
  3092  						},
  3093  						{
  3094  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3095  							Delay:          20 * time.Second,
  3096  						},
  3097  						{
  3098  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3099  							Delay:          40 * time.Second,
  3100  						},
  3101  						{
  3102  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  3103  							Delay:          80 * time.Second,
  3104  						},
  3105  					},
  3106  				}},
  3107  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(90 * time.Second),
  3108  			expectedRescheduleEligible: true,
  3109  		},
  3110  		{
  3111  			// Test case where most recent reschedule ran longer than delay ceiling
  3112  			desc: "exponential delay, delay ceiling reset condition met",
  3113  			reschedulePolicy: &ReschedulePolicy{
  3114  				DelayFunction: "exponential",
  3115  				Delay:         5 * time.Second,
  3116  				MaxDelay:      90 * time.Second,
  3117  				Unlimited:     true,
  3118  			},
  3119  			alloc: &Allocation{
  3120  				ClientStatus: AllocClientStatusFailed,
  3121  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3122  					StartedAt:  now.Add(-1 * time.Hour),
  3123  					FinishedAt: now.Add(-15 * time.Minute)}},
  3124  				RescheduleTracker: &RescheduleTracker{
  3125  					Events: []*RescheduleEvent{
  3126  						{
  3127  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3128  							Delay:          5 * time.Second,
  3129  						},
  3130  						{
  3131  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3132  							Delay:          10 * time.Second,
  3133  						},
  3134  						{
  3135  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3136  							Delay:          20 * time.Second,
  3137  						},
  3138  						{
  3139  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3140  							Delay:          40 * time.Second,
  3141  						},
  3142  						{
  3143  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3144  							Delay:          80 * time.Second,
  3145  						},
  3146  						{
  3147  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3148  							Delay:          90 * time.Second,
  3149  						},
  3150  						{
  3151  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3152  							Delay:          90 * time.Second,
  3153  						},
  3154  					},
  3155  				}},
  3156  			expectedRescheduleTime:     now.Add(-15 * time.Minute).Add(5 * time.Second),
  3157  			expectedRescheduleEligible: true,
  3158  		},
  3159  		{
  3160  			desc: "fibonacci delay - no reschedule tracker",
  3161  			reschedulePolicy: &ReschedulePolicy{
  3162  				DelayFunction: "fibonacci",
  3163  				Delay:         5 * time.Second,
  3164  				MaxDelay:      90 * time.Second,
  3165  				Unlimited:     true,
  3166  			},
  3167  			alloc: &Allocation{
  3168  				ClientStatus: AllocClientStatusFailed,
  3169  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3170  					StartedAt:  now.Add(-1 * time.Hour),
  3171  					FinishedAt: now.Add(-2 * time.Second)}}},
  3172  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3173  			expectedRescheduleEligible: true,
  3174  		},
  3175  		{
  3176  			desc: "fibonacci delay with reschedule tracker",
  3177  			reschedulePolicy: &ReschedulePolicy{
  3178  				DelayFunction: "fibonacci",
  3179  				Delay:         5 * time.Second,
  3180  				MaxDelay:      90 * time.Second,
  3181  				Unlimited:     true,
  3182  			},
  3183  			alloc: &Allocation{
  3184  				ClientStatus: AllocClientStatusFailed,
  3185  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3186  					StartedAt:  now.Add(-1 * time.Hour),
  3187  					FinishedAt: now.Add(-2 * time.Second)}},
  3188  				RescheduleTracker: &RescheduleTracker{
  3189  					Events: []*RescheduleEvent{
  3190  						{
  3191  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3192  							Delay:          5 * time.Second,
  3193  						},
  3194  						{
  3195  							RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(),
  3196  							Delay:          5 * time.Second,
  3197  						},
  3198  					},
  3199  				}},
  3200  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(10 * time.Second),
  3201  			expectedRescheduleEligible: true,
  3202  		},
  3203  		{
  3204  			desc: "fibonacci delay with more events",
  3205  			reschedulePolicy: &ReschedulePolicy{
  3206  				DelayFunction: "fibonacci",
  3207  				Delay:         5 * time.Second,
  3208  				MaxDelay:      90 * time.Second,
  3209  				Unlimited:     true,
  3210  			},
  3211  			alloc: &Allocation{
  3212  				ClientStatus: AllocClientStatusFailed,
  3213  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3214  					StartedAt:  now.Add(-1 * time.Hour),
  3215  					FinishedAt: now.Add(-2 * time.Second)}},
  3216  				RescheduleTracker: &RescheduleTracker{
  3217  					Events: []*RescheduleEvent{
  3218  						{
  3219  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3220  							Delay:          5 * time.Second,
  3221  						},
  3222  						{
  3223  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3224  							Delay:          5 * time.Second,
  3225  						},
  3226  						{
  3227  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3228  							Delay:          10 * time.Second,
  3229  						},
  3230  						{
  3231  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3232  							Delay:          15 * time.Second,
  3233  						},
  3234  						{
  3235  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3236  							Delay:          25 * time.Second,
  3237  						},
  3238  					},
  3239  				}},
  3240  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  3241  			expectedRescheduleEligible: true,
  3242  		},
  3243  		{
  3244  			desc: "fibonacci delay with delay ceiling reached",
  3245  			reschedulePolicy: &ReschedulePolicy{
  3246  				DelayFunction: "fibonacci",
  3247  				Delay:         5 * time.Second,
  3248  				MaxDelay:      50 * time.Second,
  3249  				Unlimited:     true,
  3250  			},
  3251  			alloc: &Allocation{
  3252  				ClientStatus: AllocClientStatusFailed,
  3253  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3254  					StartedAt:  now.Add(-1 * time.Hour),
  3255  					FinishedAt: now.Add(-15 * time.Second)}},
  3256  				RescheduleTracker: &RescheduleTracker{
  3257  					Events: []*RescheduleEvent{
  3258  						{
  3259  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3260  							Delay:          5 * time.Second,
  3261  						},
  3262  						{
  3263  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3264  							Delay:          5 * time.Second,
  3265  						},
  3266  						{
  3267  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3268  							Delay:          10 * time.Second,
  3269  						},
  3270  						{
  3271  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3272  							Delay:          15 * time.Second,
  3273  						},
  3274  						{
  3275  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3276  							Delay:          25 * time.Second,
  3277  						},
  3278  						{
  3279  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  3280  							Delay:          40 * time.Second,
  3281  						},
  3282  					},
  3283  				}},
  3284  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(50 * time.Second),
  3285  			expectedRescheduleEligible: true,
  3286  		},
  3287  		{
  3288  			desc: "fibonacci delay with delay reset condition met",
  3289  			reschedulePolicy: &ReschedulePolicy{
  3290  				DelayFunction: "fibonacci",
  3291  				Delay:         5 * time.Second,
  3292  				MaxDelay:      50 * time.Second,
  3293  				Unlimited:     true,
  3294  			},
  3295  			alloc: &Allocation{
  3296  				ClientStatus: AllocClientStatusFailed,
  3297  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3298  					StartedAt:  now.Add(-1 * time.Hour),
  3299  					FinishedAt: now.Add(-5 * time.Minute)}},
  3300  				RescheduleTracker: &RescheduleTracker{
  3301  					Events: []*RescheduleEvent{
  3302  						{
  3303  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3304  							Delay:          5 * time.Second,
  3305  						},
  3306  						{
  3307  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3308  							Delay:          5 * time.Second,
  3309  						},
  3310  						{
  3311  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3312  							Delay:          10 * time.Second,
  3313  						},
  3314  						{
  3315  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3316  							Delay:          15 * time.Second,
  3317  						},
  3318  						{
  3319  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3320  							Delay:          25 * time.Second,
  3321  						},
  3322  						{
  3323  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3324  							Delay:          40 * time.Second,
  3325  						},
  3326  					},
  3327  				}},
  3328  			expectedRescheduleTime:     now.Add(-5 * time.Minute).Add(5 * time.Second),
  3329  			expectedRescheduleEligible: true,
  3330  		},
  3331  		{
  3332  			desc: "fibonacci delay with the most recent event that reset delay value",
  3333  			reschedulePolicy: &ReschedulePolicy{
  3334  				DelayFunction: "fibonacci",
  3335  				Delay:         5 * time.Second,
  3336  				MaxDelay:      50 * time.Second,
  3337  				Unlimited:     true,
  3338  			},
  3339  			alloc: &Allocation{
  3340  				ClientStatus: AllocClientStatusFailed,
  3341  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3342  					StartedAt:  now.Add(-1 * time.Hour),
  3343  					FinishedAt: now.Add(-5 * time.Second)}},
  3344  				RescheduleTracker: &RescheduleTracker{
  3345  					Events: []*RescheduleEvent{
  3346  						{
  3347  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3348  							Delay:          5 * time.Second,
  3349  						},
  3350  						{
  3351  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3352  							Delay:          5 * time.Second,
  3353  						},
  3354  						{
  3355  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3356  							Delay:          10 * time.Second,
  3357  						},
  3358  						{
  3359  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3360  							Delay:          15 * time.Second,
  3361  						},
  3362  						{
  3363  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3364  							Delay:          25 * time.Second,
  3365  						},
  3366  						{
  3367  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3368  							Delay:          40 * time.Second,
  3369  						},
  3370  						{
  3371  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3372  							Delay:          50 * time.Second,
  3373  						},
  3374  						{
  3375  							RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(),
  3376  							Delay:          5 * time.Second,
  3377  						},
  3378  					},
  3379  				}},
  3380  			expectedRescheduleTime:     now.Add(-5 * time.Second).Add(5 * time.Second),
  3381  			expectedRescheduleEligible: true,
  3382  		},
  3383  	}
  3384  	for _, tc := range testCases {
  3385  		t.Run(tc.desc, func(t *testing.T) {
  3386  			require := require.New(t)
  3387  			j := testJob()
  3388  			j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy
  3389  			tc.alloc.Job = j
  3390  			tc.alloc.TaskGroup = j.TaskGroups[0].Name
  3391  			reschedTime, allowed := tc.alloc.NextRescheduleTime()
  3392  			require.Equal(tc.expectedRescheduleEligible, allowed)
  3393  			require.Equal(tc.expectedRescheduleTime, reschedTime)
  3394  		})
  3395  	}
  3396  
  3397  }
  3398  
  3399  func TestRescheduleTracker_Copy(t *testing.T) {
  3400  	type testCase struct {
  3401  		original *RescheduleTracker
  3402  		expected *RescheduleTracker
  3403  	}
  3404  
  3405  	cases := []testCase{
  3406  		{nil, nil},
  3407  		{&RescheduleTracker{Events: []*RescheduleEvent{
  3408  			{RescheduleTime: 2,
  3409  				PrevAllocID: "12",
  3410  				PrevNodeID:  "12",
  3411  				Delay:       30 * time.Second},
  3412  		}}, &RescheduleTracker{Events: []*RescheduleEvent{
  3413  			{RescheduleTime: 2,
  3414  				PrevAllocID: "12",
  3415  				PrevNodeID:  "12",
  3416  				Delay:       30 * time.Second},
  3417  		}}},
  3418  	}
  3419  
  3420  	for _, tc := range cases {
  3421  		if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) {
  3422  			t.Fatalf("expected %v but got %v", *tc.expected, *got)
  3423  		}
  3424  	}
  3425  }
  3426  
  3427  func TestVault_Validate(t *testing.T) {
  3428  	v := &Vault{
  3429  		Env:        true,
  3430  		ChangeMode: VaultChangeModeNoop,
  3431  	}
  3432  
  3433  	if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") {
  3434  		t.Fatalf("Expected policy list empty error")
  3435  	}
  3436  
  3437  	v.Policies = []string{"foo", "root"}
  3438  	v.ChangeMode = VaultChangeModeSignal
  3439  
  3440  	err := v.Validate()
  3441  	if err == nil {
  3442  		t.Fatalf("Expected validation errors")
  3443  	}
  3444  
  3445  	if !strings.Contains(err.Error(), "Signal must") {
  3446  		t.Fatalf("Expected signal empty error")
  3447  	}
  3448  	if !strings.Contains(err.Error(), "root") {
  3449  		t.Fatalf("Expected root error")
  3450  	}
  3451  }
  3452  
  3453  func TestParameterizedJobConfig_Validate(t *testing.T) {
  3454  	d := &ParameterizedJobConfig{
  3455  		Payload: "foo",
  3456  	}
  3457  
  3458  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") {
  3459  		t.Fatalf("Expected unknown payload requirement: %v", err)
  3460  	}
  3461  
  3462  	d.Payload = DispatchPayloadOptional
  3463  	d.MetaOptional = []string{"foo", "bar"}
  3464  	d.MetaRequired = []string{"bar", "baz"}
  3465  
  3466  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") {
  3467  		t.Fatalf("Expected meta not being disjoint error: %v", err)
  3468  	}
  3469  }
  3470  
  3471  func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) {
  3472  	job := testJob()
  3473  	job.ParameterizedJob = &ParameterizedJobConfig{
  3474  		Payload: DispatchPayloadOptional,
  3475  	}
  3476  	job.Type = JobTypeSystem
  3477  
  3478  	if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") {
  3479  		t.Fatalf("Expected bad scheduler tpye: %v", err)
  3480  	}
  3481  }
  3482  
  3483  func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
  3484  	d := &ParameterizedJobConfig{}
  3485  	d.Canonicalize()
  3486  	if d.Payload != DispatchPayloadOptional {
  3487  		t.Fatalf("Canonicalize failed")
  3488  	}
  3489  }
  3490  
  3491  func TestDispatchPayloadConfig_Validate(t *testing.T) {
  3492  	d := &DispatchPayloadConfig{
  3493  		File: "foo",
  3494  	}
  3495  
  3496  	// task/local/haha
  3497  	if err := d.Validate(); err != nil {
  3498  		t.Fatalf("bad: %v", err)
  3499  	}
  3500  
  3501  	// task/haha
  3502  	d.File = "../haha"
  3503  	if err := d.Validate(); err != nil {
  3504  		t.Fatalf("bad: %v", err)
  3505  	}
  3506  
  3507  	// ../haha
  3508  	d.File = "../../../haha"
  3509  	if err := d.Validate(); err == nil {
  3510  		t.Fatalf("bad: %v", err)
  3511  	}
  3512  }
  3513  
  3514  func TestIsRecoverable(t *testing.T) {
  3515  	if IsRecoverable(nil) {
  3516  		t.Errorf("nil should not be recoverable")
  3517  	}
  3518  	if IsRecoverable(NewRecoverableError(nil, true)) {
  3519  		t.Errorf("NewRecoverableError(nil, true) should not be recoverable")
  3520  	}
  3521  	if IsRecoverable(fmt.Errorf("i promise im recoverable")) {
  3522  		t.Errorf("Custom errors should not be recoverable")
  3523  	}
  3524  	if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) {
  3525  		t.Errorf("Explicitly unrecoverable errors should not be recoverable")
  3526  	}
  3527  	if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) {
  3528  		t.Errorf("Explicitly recoverable errors *should* be recoverable")
  3529  	}
  3530  }
  3531  
  3532  func TestACLTokenValidate(t *testing.T) {
  3533  	tk := &ACLToken{}
  3534  
  3535  	// Missing a type
  3536  	err := tk.Validate()
  3537  	assert.NotNil(t, err)
  3538  	if !strings.Contains(err.Error(), "client or management") {
  3539  		t.Fatalf("bad: %v", err)
  3540  	}
  3541  
  3542  	// Missing policies
  3543  	tk.Type = ACLClientToken
  3544  	err = tk.Validate()
  3545  	assert.NotNil(t, err)
  3546  	if !strings.Contains(err.Error(), "missing policies") {
  3547  		t.Fatalf("bad: %v", err)
  3548  	}
  3549  
  3550  	// Invalid policies
  3551  	tk.Type = ACLManagementToken
  3552  	tk.Policies = []string{"foo"}
  3553  	err = tk.Validate()
  3554  	assert.NotNil(t, err)
  3555  	if !strings.Contains(err.Error(), "associated with policies") {
  3556  		t.Fatalf("bad: %v", err)
  3557  	}
  3558  
  3559  	// Name too long policies
  3560  	tk.Name = ""
  3561  	for i := 0; i < 8; i++ {
  3562  		tk.Name += uuid.Generate()
  3563  	}
  3564  	tk.Policies = nil
  3565  	err = tk.Validate()
  3566  	assert.NotNil(t, err)
  3567  	if !strings.Contains(err.Error(), "too long") {
  3568  		t.Fatalf("bad: %v", err)
  3569  	}
  3570  
  3571  	// Make it valid
  3572  	tk.Name = "foo"
  3573  	err = tk.Validate()
  3574  	assert.Nil(t, err)
  3575  }
  3576  
  3577  func TestACLTokenPolicySubset(t *testing.T) {
  3578  	tk := &ACLToken{
  3579  		Type:     ACLClientToken,
  3580  		Policies: []string{"foo", "bar", "baz"},
  3581  	}
  3582  
  3583  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
  3584  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
  3585  	assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
  3586  	assert.Equal(t, true, tk.PolicySubset([]string{}))
  3587  	assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"}))
  3588  	assert.Equal(t, false, tk.PolicySubset([]string{"new"}))
  3589  
  3590  	tk = &ACLToken{
  3591  		Type: ACLManagementToken,
  3592  	}
  3593  
  3594  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
  3595  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
  3596  	assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
  3597  	assert.Equal(t, true, tk.PolicySubset([]string{}))
  3598  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"}))
  3599  	assert.Equal(t, true, tk.PolicySubset([]string{"new"}))
  3600  }
  3601  
  3602  func TestACLTokenSetHash(t *testing.T) {
  3603  	tk := &ACLToken{
  3604  		Name:     "foo",
  3605  		Type:     ACLClientToken,
  3606  		Policies: []string{"foo", "bar"},
  3607  		Global:   false,
  3608  	}
  3609  	out1 := tk.SetHash()
  3610  	assert.NotNil(t, out1)
  3611  	assert.NotNil(t, tk.Hash)
  3612  	assert.Equal(t, out1, tk.Hash)
  3613  
  3614  	tk.Policies = []string{"foo"}
  3615  	out2 := tk.SetHash()
  3616  	assert.NotNil(t, out2)
  3617  	assert.NotNil(t, tk.Hash)
  3618  	assert.Equal(t, out2, tk.Hash)
  3619  	assert.NotEqual(t, out1, out2)
  3620  }
  3621  
  3622  func TestACLPolicySetHash(t *testing.T) {
  3623  	ap := &ACLPolicy{
  3624  		Name:        "foo",
  3625  		Description: "great policy",
  3626  		Rules:       "node { policy = \"read\" }",
  3627  	}
  3628  	out1 := ap.SetHash()
  3629  	assert.NotNil(t, out1)
  3630  	assert.NotNil(t, ap.Hash)
  3631  	assert.Equal(t, out1, ap.Hash)
  3632  
  3633  	ap.Rules = "node { policy = \"write\" }"
  3634  	out2 := ap.SetHash()
  3635  	assert.NotNil(t, out2)
  3636  	assert.NotNil(t, ap.Hash)
  3637  	assert.Equal(t, out2, ap.Hash)
  3638  	assert.NotEqual(t, out1, out2)
  3639  }
  3640  
  3641  func TestTaskEventPopulate(t *testing.T) {
  3642  	prepopulatedEvent := NewTaskEvent(TaskSetup)
  3643  	prepopulatedEvent.DisplayMessage = "Hola"
  3644  	testcases := []struct {
  3645  		event       *TaskEvent
  3646  		expectedMsg string
  3647  	}{
  3648  		{nil, ""},
  3649  		{prepopulatedEvent, "Hola"},
  3650  		{NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"},
  3651  		{NewTaskEvent(TaskStarted), "Task started by client"},
  3652  		{NewTaskEvent(TaskReceived), "Task received by client"},
  3653  		{NewTaskEvent(TaskFailedValidation), "Validation of task failed"},
  3654  		{NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"},
  3655  		{NewTaskEvent(TaskSetupFailure), "Task setup failed"},
  3656  		{NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"},
  3657  		{NewTaskEvent(TaskDriverFailure), "Failed to start task"},
  3658  		{NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"},
  3659  		{NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"},
  3660  		{NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"},
  3661  		{NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"},
  3662  		{NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"},
  3663  		{NewTaskEvent(TaskKilling), "Sent interrupt"},
  3664  		{NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"},
  3665  		{NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"},
  3666  		{NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"},
  3667  		{NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""},
  3668  		{NewTaskEvent(TaskKilled), "Task successfully killed"},
  3669  		{NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"},
  3670  		{NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"},
  3671  		{NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"},
  3672  		{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
  3673  		{NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"},
  3674  		{NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"},
  3675  		{NewTaskEvent(TaskSignaling), "Task being sent a signal"},
  3676  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"},
  3677  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"},
  3678  		{NewTaskEvent(TaskRestartSignal), "Task signaled to restart"},
  3679  		{NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"},
  3680  		{NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"},
  3681  		{NewTaskEvent("Unknown Type, No message"), ""},
  3682  		{NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"},
  3683  	}
  3684  
  3685  	for _, tc := range testcases {
  3686  		tc.event.PopulateEventDisplayMessage()
  3687  		if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg {
  3688  			t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage)
  3689  		}
  3690  	}
  3691  }
  3692  
  3693  func TestNetworkResourcesEquals(t *testing.T) {
  3694  	require := require.New(t)
  3695  	var networkResourcesTest = []struct {
  3696  		input    []*NetworkResource
  3697  		expected bool
  3698  		errorMsg string
  3699  	}{
  3700  		{
  3701  			[]*NetworkResource{
  3702  				{
  3703  					IP:            "10.0.0.1",
  3704  					MBits:         50,
  3705  					ReservedPorts: []Port{{"web", 80}},
  3706  				},
  3707  				{
  3708  					IP:            "10.0.0.1",
  3709  					MBits:         50,
  3710  					ReservedPorts: []Port{{"web", 80}},
  3711  				},
  3712  			},
  3713  			true,
  3714  			"Equal network resources should return true",
  3715  		},
  3716  		{
  3717  			[]*NetworkResource{
  3718  				{
  3719  					IP:            "10.0.0.0",
  3720  					MBits:         50,
  3721  					ReservedPorts: []Port{{"web", 80}},
  3722  				},
  3723  				{
  3724  					IP:            "10.0.0.1",
  3725  					MBits:         50,
  3726  					ReservedPorts: []Port{{"web", 80}},
  3727  				},
  3728  			},
  3729  			false,
  3730  			"Different IP addresses should return false",
  3731  		},
  3732  		{
  3733  			[]*NetworkResource{
  3734  				{
  3735  					IP:            "10.0.0.1",
  3736  					MBits:         40,
  3737  					ReservedPorts: []Port{{"web", 80}},
  3738  				},
  3739  				{
  3740  					IP:            "10.0.0.1",
  3741  					MBits:         50,
  3742  					ReservedPorts: []Port{{"web", 80}},
  3743  				},
  3744  			},
  3745  			false,
  3746  			"Different MBits values should return false",
  3747  		},
  3748  		{
  3749  			[]*NetworkResource{
  3750  				{
  3751  					IP:            "10.0.0.1",
  3752  					MBits:         50,
  3753  					ReservedPorts: []Port{{"web", 80}},
  3754  				},
  3755  				{
  3756  					IP:            "10.0.0.1",
  3757  					MBits:         50,
  3758  					ReservedPorts: []Port{{"web", 80}, {"web", 80}},
  3759  				},
  3760  			},
  3761  			false,
  3762  			"Different ReservedPorts lengths should return false",
  3763  		},
  3764  		{
  3765  			[]*NetworkResource{
  3766  				{
  3767  					IP:            "10.0.0.1",
  3768  					MBits:         50,
  3769  					ReservedPorts: []Port{{"web", 80}},
  3770  				},
  3771  				{
  3772  					IP:            "10.0.0.1",
  3773  					MBits:         50,
  3774  					ReservedPorts: []Port{},
  3775  				},
  3776  			},
  3777  			false,
  3778  			"Empty and non empty ReservedPorts values should return false",
  3779  		},
  3780  		{
  3781  			[]*NetworkResource{
  3782  				{
  3783  					IP:            "10.0.0.1",
  3784  					MBits:         50,
  3785  					ReservedPorts: []Port{{"web", 80}},
  3786  				},
  3787  				{
  3788  					IP:            "10.0.0.1",
  3789  					MBits:         50,
  3790  					ReservedPorts: []Port{{"notweb", 80}},
  3791  				},
  3792  			},
  3793  			false,
  3794  			"Different valued ReservedPorts values should return false",
  3795  		},
  3796  		{
  3797  			[]*NetworkResource{
  3798  				{
  3799  					IP:           "10.0.0.1",
  3800  					MBits:        50,
  3801  					DynamicPorts: []Port{{"web", 80}},
  3802  				},
  3803  				{
  3804  					IP:           "10.0.0.1",
  3805  					MBits:        50,
  3806  					DynamicPorts: []Port{{"web", 80}, {"web", 80}},
  3807  				},
  3808  			},
  3809  			false,
  3810  			"Different DynamicPorts lengths should return false",
  3811  		},
  3812  		{
  3813  			[]*NetworkResource{
  3814  				{
  3815  					IP:           "10.0.0.1",
  3816  					MBits:        50,
  3817  					DynamicPorts: []Port{{"web", 80}},
  3818  				},
  3819  				{
  3820  					IP:           "10.0.0.1",
  3821  					MBits:        50,
  3822  					DynamicPorts: []Port{},
  3823  				},
  3824  			},
  3825  			false,
  3826  			"Empty and non empty DynamicPorts values should return false",
  3827  		},
  3828  		{
  3829  			[]*NetworkResource{
  3830  				{
  3831  					IP:           "10.0.0.1",
  3832  					MBits:        50,
  3833  					DynamicPorts: []Port{{"web", 80}},
  3834  				},
  3835  				{
  3836  					IP:           "10.0.0.1",
  3837  					MBits:        50,
  3838  					DynamicPorts: []Port{{"notweb", 80}},
  3839  				},
  3840  			},
  3841  			false,
  3842  			"Different valued DynamicPorts values should return false",
  3843  		},
  3844  	}
  3845  	for _, testCase := range networkResourcesTest {
  3846  		first := testCase.input[0]
  3847  		second := testCase.input[1]
  3848  		require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg)
  3849  	}
  3850  }
  3851  
  3852  func TestNode_Canonicalize(t *testing.T) {
  3853  	t.Parallel()
  3854  	require := require.New(t)
  3855  
  3856  	// Make sure the eligiblity is set properly
  3857  	node := &Node{}
  3858  	node.Canonicalize()
  3859  	require.Equal(NodeSchedulingEligible, node.SchedulingEligibility)
  3860  
  3861  	node = &Node{
  3862  		Drain: true,
  3863  	}
  3864  	node.Canonicalize()
  3865  	require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility)
  3866  }
  3867  
  3868  func TestNode_Copy(t *testing.T) {
  3869  	t.Parallel()
  3870  	require := require.New(t)
  3871  
  3872  	node := &Node{
  3873  		ID:         uuid.Generate(),
  3874  		SecretID:   uuid.Generate(),
  3875  		Datacenter: "dc1",
  3876  		Name:       "foobar",
  3877  		Attributes: map[string]string{
  3878  			"kernel.name":        "linux",
  3879  			"arch":               "x86",
  3880  			"nomad.version":      "0.5.0",
  3881  			"driver.exec":        "1",
  3882  			"driver.mock_driver": "1",
  3883  		},
  3884  		Resources: &Resources{
  3885  			CPU:      4000,
  3886  			MemoryMB: 8192,
  3887  			DiskMB:   100 * 1024,
  3888  			IOPS:     150,
  3889  			Networks: []*NetworkResource{
  3890  				{
  3891  					Device: "eth0",
  3892  					CIDR:   "192.168.0.100/32",
  3893  					MBits:  1000,
  3894  				},
  3895  			},
  3896  		},
  3897  		Reserved: &Resources{
  3898  			CPU:      100,
  3899  			MemoryMB: 256,
  3900  			DiskMB:   4 * 1024,
  3901  			Networks: []*NetworkResource{
  3902  				{
  3903  					Device:        "eth0",
  3904  					IP:            "192.168.0.100",
  3905  					ReservedPorts: []Port{{Label: "ssh", Value: 22}},
  3906  					MBits:         1,
  3907  				},
  3908  			},
  3909  		},
  3910  		Links: map[string]string{
  3911  			"consul": "foobar.dc1",
  3912  		},
  3913  		Meta: map[string]string{
  3914  			"pci-dss":  "true",
  3915  			"database": "mysql",
  3916  			"version":  "5.6",
  3917  		},
  3918  		NodeClass:             "linux-medium-pci",
  3919  		Status:                NodeStatusReady,
  3920  		SchedulingEligibility: NodeSchedulingEligible,
  3921  		Drivers: map[string]*DriverInfo{
  3922  			"mock_driver": {
  3923  				Attributes:        map[string]string{"running": "1"},
  3924  				Detected:          true,
  3925  				Healthy:           true,
  3926  				HealthDescription: "Currently active",
  3927  				UpdateTime:        time.Now(),
  3928  			},
  3929  		},
  3930  	}
  3931  	node.ComputeClass()
  3932  
  3933  	node2 := node.Copy()
  3934  
  3935  	require.Equal(node.Attributes, node2.Attributes)
  3936  	require.Equal(node.Resources, node2.Resources)
  3937  	require.Equal(node.Reserved, node2.Reserved)
  3938  	require.Equal(node.Links, node2.Links)
  3939  	require.Equal(node.Meta, node2.Meta)
  3940  	require.Equal(node.Events, node2.Events)
  3941  	require.Equal(node.DrainStrategy, node2.DrainStrategy)
  3942  	require.Equal(node.Drivers, node2.Drivers)
  3943  }
  3944  
  3945  func TestSpread_Validate(t *testing.T) {
  3946  	type tc struct {
  3947  		spread *Spread
  3948  		err    error
  3949  		name   string
  3950  	}
  3951  
  3952  	testCases := []tc{
  3953  		{
  3954  			spread: &Spread{},
  3955  			err:    fmt.Errorf("Missing spread attribute"),
  3956  			name:   "empty spread",
  3957  		},
  3958  		{
  3959  			spread: &Spread{
  3960  				Attribute: "${node.datacenter}",
  3961  				Weight:    -1,
  3962  			},
  3963  			err:  fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
  3964  			name: "Invalid weight",
  3965  		},
  3966  		{
  3967  			spread: &Spread{
  3968  				Attribute: "${node.datacenter}",
  3969  				Weight:    200,
  3970  			},
  3971  			err:  fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
  3972  			name: "Invalid weight",
  3973  		},
  3974  		{
  3975  			spread: &Spread{
  3976  				Attribute: "${node.datacenter}",
  3977  				Weight:    50,
  3978  				SpreadTarget: []*SpreadTarget{
  3979  					{
  3980  						Value:   "dc1",
  3981  						Percent: 25,
  3982  					},
  3983  					{
  3984  						Value:   "dc2",
  3985  						Percent: 150,
  3986  					},
  3987  				},
  3988  			},
  3989  			err:  fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"),
  3990  			name: "Invalid percentages",
  3991  		},
  3992  		{
  3993  			spread: &Spread{
  3994  				Attribute: "${node.datacenter}",
  3995  				Weight:    50,
  3996  				SpreadTarget: []*SpreadTarget{
  3997  					{
  3998  						Value:   "dc1",
  3999  						Percent: 75,
  4000  					},
  4001  					{
  4002  						Value:   "dc2",
  4003  						Percent: 75,
  4004  					},
  4005  				},
  4006  			},
  4007  			err:  fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150),
  4008  			name: "Invalid percentages",
  4009  		},
  4010  		{
  4011  			spread: &Spread{
  4012  				Attribute: "${node.datacenter}",
  4013  				Weight:    50,
  4014  				SpreadTarget: []*SpreadTarget{
  4015  					{
  4016  						Value:   "dc1",
  4017  						Percent: 25,
  4018  					},
  4019  					{
  4020  						Value:   "dc1",
  4021  						Percent: 50,
  4022  					},
  4023  				},
  4024  			},
  4025  			err:  fmt.Errorf("Spread target value \"dc1\" already defined"),
  4026  			name: "No spread targets",
  4027  		},
  4028  		{
  4029  			spread: &Spread{
  4030  				Attribute: "${node.datacenter}",
  4031  				Weight:    50,
  4032  				SpreadTarget: []*SpreadTarget{
  4033  					{
  4034  						Value:   "dc1",
  4035  						Percent: 25,
  4036  					},
  4037  					{
  4038  						Value:   "dc2",
  4039  						Percent: 50,
  4040  					},
  4041  				},
  4042  			},
  4043  			err:  nil,
  4044  			name: "Valid spread",
  4045  		},
  4046  	}
  4047  
  4048  	for _, tc := range testCases {
  4049  		t.Run(tc.name, func(t *testing.T) {
  4050  			err := tc.spread.Validate()
  4051  			if tc.err != nil {
  4052  				require.NotNil(t, err)
  4053  				require.Contains(t, err.Error(), tc.err.Error())
  4054  			} else {
  4055  				require.Nil(t, err)
  4056  			}
  4057  		})
  4058  	}
  4059  }