github.com/smithx10/nomad@v0.9.1-rc1/nomad/structs/structs_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"reflect"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/hashicorp/consul/api"
    12  	multierror "github.com/hashicorp/go-multierror"
    13  	"github.com/hashicorp/nomad/helper/uuid"
    14  	"github.com/kr/pretty"
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  )
    18  
    19  func TestJob_Validate(t *testing.T) {
    20  	j := &Job{}
    21  	err := j.Validate()
    22  	mErr := err.(*multierror.Error)
    23  	if !strings.Contains(mErr.Errors[0].Error(), "job region") {
    24  		t.Fatalf("err: %s", err)
    25  	}
    26  	if !strings.Contains(mErr.Errors[1].Error(), "job ID") {
    27  		t.Fatalf("err: %s", err)
    28  	}
    29  	if !strings.Contains(mErr.Errors[2].Error(), "job name") {
    30  		t.Fatalf("err: %s", err)
    31  	}
    32  	if !strings.Contains(mErr.Errors[3].Error(), "namespace") {
    33  		t.Fatalf("err: %s", err)
    34  	}
    35  	if !strings.Contains(mErr.Errors[4].Error(), "job type") {
    36  		t.Fatalf("err: %s", err)
    37  	}
    38  	if !strings.Contains(mErr.Errors[5].Error(), "priority") {
    39  		t.Fatalf("err: %s", err)
    40  	}
    41  	if !strings.Contains(mErr.Errors[6].Error(), "datacenters") {
    42  		t.Fatalf("err: %s", err)
    43  	}
    44  	if !strings.Contains(mErr.Errors[7].Error(), "task groups") {
    45  		t.Fatalf("err: %s", err)
    46  	}
    47  
    48  	j = &Job{
    49  		Type: "invalid-job-type",
    50  	}
    51  	err = j.Validate()
    52  	if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) {
    53  		t.Errorf("expected %s but found: %v", expected, err)
    54  	}
    55  
    56  	j = &Job{
    57  		Type: JobTypeService,
    58  		Periodic: &PeriodicConfig{
    59  			Enabled: true,
    60  		},
    61  	}
    62  	err = j.Validate()
    63  	mErr = err.(*multierror.Error)
    64  	if !strings.Contains(mErr.Error(), "Periodic") {
    65  		t.Fatalf("err: %s", err)
    66  	}
    67  
    68  	j = &Job{
    69  		Region:      "global",
    70  		ID:          uuid.Generate(),
    71  		Namespace:   "test",
    72  		Name:        "my-job",
    73  		Type:        JobTypeService,
    74  		Priority:    50,
    75  		Datacenters: []string{"dc1"},
    76  		TaskGroups: []*TaskGroup{
    77  			{
    78  				Name: "web",
    79  				RestartPolicy: &RestartPolicy{
    80  					Interval: 5 * time.Minute,
    81  					Delay:    10 * time.Second,
    82  					Attempts: 10,
    83  				},
    84  			},
    85  			{
    86  				Name: "web",
    87  				RestartPolicy: &RestartPolicy{
    88  					Interval: 5 * time.Minute,
    89  					Delay:    10 * time.Second,
    90  					Attempts: 10,
    91  				},
    92  			},
    93  			{
    94  				RestartPolicy: &RestartPolicy{
    95  					Interval: 5 * time.Minute,
    96  					Delay:    10 * time.Second,
    97  					Attempts: 10,
    98  				},
    99  			},
   100  		},
   101  	}
   102  	err = j.Validate()
   103  	mErr = err.(*multierror.Error)
   104  	if !strings.Contains(mErr.Errors[0].Error(), "2 redefines 'web' from group 1") {
   105  		t.Fatalf("err: %s", err)
   106  	}
   107  	if !strings.Contains(mErr.Errors[1].Error(), "group 3 missing name") {
   108  		t.Fatalf("err: %s", err)
   109  	}
   110  	if !strings.Contains(mErr.Errors[2].Error(), "Task group web validation failed") {
   111  		t.Fatalf("err: %s", err)
   112  	}
   113  }
   114  
   115  func TestJob_Warnings(t *testing.T) {
   116  	cases := []struct {
   117  		Name     string
   118  		Job      *Job
   119  		Expected []string
   120  	}{
   121  		{
   122  			Name:     "Higher counts for update stanza",
   123  			Expected: []string{"max parallel count is greater"},
   124  			Job: &Job{
   125  				Type: JobTypeService,
   126  				TaskGroups: []*TaskGroup{
   127  					{
   128  						Name:  "foo",
   129  						Count: 2,
   130  						Update: &UpdateStrategy{
   131  							MaxParallel: 10,
   132  						},
   133  					},
   134  				},
   135  			},
   136  		},
   137  	}
   138  
   139  	for _, c := range cases {
   140  		t.Run(c.Name, func(t *testing.T) {
   141  			warnings := c.Job.Warnings()
   142  			if warnings == nil {
   143  				if len(c.Expected) == 0 {
   144  					return
   145  				} else {
   146  					t.Fatal("Got no warnings when they were expected")
   147  				}
   148  			}
   149  
   150  			a := warnings.Error()
   151  			for _, e := range c.Expected {
   152  				if !strings.Contains(a, e) {
   153  					t.Fatalf("Got warnings %q; didn't contain %q", a, e)
   154  				}
   155  			}
   156  		})
   157  	}
   158  }
   159  
   160  func TestJob_SpecChanged(t *testing.T) {
   161  	// Get a base test job
   162  	base := testJob()
   163  
   164  	// Only modify the indexes/mutable state of the job
   165  	mutatedBase := base.Copy()
   166  	mutatedBase.Status = "foo"
   167  	mutatedBase.ModifyIndex = base.ModifyIndex + 100
   168  
   169  	// changed contains a spec change that should be detected
   170  	change := base.Copy()
   171  	change.Priority = 99
   172  
   173  	cases := []struct {
   174  		Name     string
   175  		Original *Job
   176  		New      *Job
   177  		Changed  bool
   178  	}{
   179  		{
   180  			Name:     "Same job except mutable indexes",
   181  			Changed:  false,
   182  			Original: base,
   183  			New:      mutatedBase,
   184  		},
   185  		{
   186  			Name:     "Different",
   187  			Changed:  true,
   188  			Original: base,
   189  			New:      change,
   190  		},
   191  	}
   192  
   193  	for _, c := range cases {
   194  		t.Run(c.Name, func(t *testing.T) {
   195  			if actual := c.Original.SpecChanged(c.New); actual != c.Changed {
   196  				t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed)
   197  			}
   198  		})
   199  	}
   200  }
   201  
   202  func testJob() *Job {
   203  	return &Job{
   204  		Region:      "global",
   205  		ID:          uuid.Generate(),
   206  		Namespace:   "test",
   207  		Name:        "my-job",
   208  		Type:        JobTypeService,
   209  		Priority:    50,
   210  		AllAtOnce:   false,
   211  		Datacenters: []string{"dc1"},
   212  		Constraints: []*Constraint{
   213  			{
   214  				LTarget: "$attr.kernel.name",
   215  				RTarget: "linux",
   216  				Operand: "=",
   217  			},
   218  		},
   219  		Periodic: &PeriodicConfig{
   220  			Enabled: false,
   221  		},
   222  		TaskGroups: []*TaskGroup{
   223  			{
   224  				Name:          "web",
   225  				Count:         10,
   226  				EphemeralDisk: DefaultEphemeralDisk(),
   227  				RestartPolicy: &RestartPolicy{
   228  					Mode:     RestartPolicyModeFail,
   229  					Attempts: 3,
   230  					Interval: 10 * time.Minute,
   231  					Delay:    1 * time.Minute,
   232  				},
   233  				ReschedulePolicy: &ReschedulePolicy{
   234  					Interval:      5 * time.Minute,
   235  					Attempts:      10,
   236  					Delay:         5 * time.Second,
   237  					DelayFunction: "constant",
   238  				},
   239  				Tasks: []*Task{
   240  					{
   241  						Name:   "web",
   242  						Driver: "exec",
   243  						Config: map[string]interface{}{
   244  							"command": "/bin/date",
   245  						},
   246  						Env: map[string]string{
   247  							"FOO": "bar",
   248  						},
   249  						Artifacts: []*TaskArtifact{
   250  							{
   251  								GetterSource: "http://foo.com",
   252  							},
   253  						},
   254  						Services: []*Service{
   255  							{
   256  								Name:      "${TASK}-frontend",
   257  								PortLabel: "http",
   258  							},
   259  						},
   260  						Resources: &Resources{
   261  							CPU:      500,
   262  							MemoryMB: 256,
   263  							Networks: []*NetworkResource{
   264  								{
   265  									MBits:        50,
   266  									DynamicPorts: []Port{{Label: "http"}},
   267  								},
   268  							},
   269  						},
   270  						LogConfig: &LogConfig{
   271  							MaxFiles:      10,
   272  							MaxFileSizeMB: 1,
   273  						},
   274  					},
   275  				},
   276  				Meta: map[string]string{
   277  					"elb_check_type":     "http",
   278  					"elb_check_interval": "30s",
   279  					"elb_check_min":      "3",
   280  				},
   281  			},
   282  		},
   283  		Meta: map[string]string{
   284  			"owner": "armon",
   285  		},
   286  	}
   287  }
   288  
   289  func TestJob_Copy(t *testing.T) {
   290  	j := testJob()
   291  	c := j.Copy()
   292  	if !reflect.DeepEqual(j, c) {
   293  		t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j)
   294  	}
   295  }
   296  
   297  func TestJob_IsPeriodic(t *testing.T) {
   298  	j := &Job{
   299  		Type: JobTypeService,
   300  		Periodic: &PeriodicConfig{
   301  			Enabled: true,
   302  		},
   303  	}
   304  	if !j.IsPeriodic() {
   305  		t.Fatalf("IsPeriodic() returned false on periodic job")
   306  	}
   307  
   308  	j = &Job{
   309  		Type: JobTypeService,
   310  	}
   311  	if j.IsPeriodic() {
   312  		t.Fatalf("IsPeriodic() returned true on non-periodic job")
   313  	}
   314  }
   315  
   316  func TestJob_IsPeriodicActive(t *testing.T) {
   317  	cases := []struct {
   318  		job    *Job
   319  		active bool
   320  	}{
   321  		{
   322  			job: &Job{
   323  				Type: JobTypeService,
   324  				Periodic: &PeriodicConfig{
   325  					Enabled: true,
   326  				},
   327  			},
   328  			active: true,
   329  		},
   330  		{
   331  			job: &Job{
   332  				Type: JobTypeService,
   333  				Periodic: &PeriodicConfig{
   334  					Enabled: false,
   335  				},
   336  			},
   337  			active: false,
   338  		},
   339  		{
   340  			job: &Job{
   341  				Type: JobTypeService,
   342  				Periodic: &PeriodicConfig{
   343  					Enabled: true,
   344  				},
   345  				Stop: true,
   346  			},
   347  			active: false,
   348  		},
   349  		{
   350  			job: &Job{
   351  				Type: JobTypeService,
   352  				Periodic: &PeriodicConfig{
   353  					Enabled: false,
   354  				},
   355  				ParameterizedJob: &ParameterizedJobConfig{},
   356  			},
   357  			active: false,
   358  		},
   359  	}
   360  
   361  	for i, c := range cases {
   362  		if act := c.job.IsPeriodicActive(); act != c.active {
   363  			t.Fatalf("case %d failed: got %v; want %v", i, act, c.active)
   364  		}
   365  	}
   366  }
   367  
   368  func TestJob_SystemJob_Validate(t *testing.T) {
   369  	j := testJob()
   370  	j.Type = JobTypeSystem
   371  	j.TaskGroups[0].ReschedulePolicy = nil
   372  	j.Canonicalize()
   373  
   374  	err := j.Validate()
   375  	if err == nil || !strings.Contains(err.Error(), "exceed") {
   376  		t.Fatalf("expect error due to count")
   377  	}
   378  
   379  	j.TaskGroups[0].Count = 0
   380  	if err := j.Validate(); err != nil {
   381  		t.Fatalf("unexpected err: %v", err)
   382  	}
   383  
   384  	j.TaskGroups[0].Count = 1
   385  	if err := j.Validate(); err != nil {
   386  		t.Fatalf("unexpected err: %v", err)
   387  	}
   388  
   389  	// Add affinities at job, task group and task level, that should fail validation
   390  
   391  	j.Affinities = []*Affinity{{
   392  		Operand: "=",
   393  		LTarget: "${node.datacenter}",
   394  		RTarget: "dc1",
   395  	}}
   396  	j.TaskGroups[0].Affinities = []*Affinity{{
   397  		Operand: "=",
   398  		LTarget: "${meta.rack}",
   399  		RTarget: "r1",
   400  	}}
   401  	j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{
   402  		Operand: "=",
   403  		LTarget: "${meta.rack}",
   404  		RTarget: "r1",
   405  	}}
   406  	err = j.Validate()
   407  	require.NotNil(t, err)
   408  	require.Contains(t, err.Error(), "System jobs may not have an affinity stanza")
   409  
   410  	// Add spread at job and task group level, that should fail validation
   411  	j.Spreads = []*Spread{{
   412  		Attribute: "${node.datacenter}",
   413  		Weight:    100,
   414  	}}
   415  	j.TaskGroups[0].Spreads = []*Spread{{
   416  		Attribute: "${node.datacenter}",
   417  		Weight:    100,
   418  	}}
   419  
   420  	err = j.Validate()
   421  	require.NotNil(t, err)
   422  	require.Contains(t, err.Error(), "System jobs may not have a spread stanza")
   423  
   424  }
   425  
   426  func TestJob_VaultPolicies(t *testing.T) {
   427  	j0 := &Job{}
   428  	e0 := make(map[string]map[string]*Vault, 0)
   429  
   430  	vj1 := &Vault{
   431  		Policies: []string{
   432  			"p1",
   433  			"p2",
   434  		},
   435  	}
   436  	vj2 := &Vault{
   437  		Policies: []string{
   438  			"p3",
   439  			"p4",
   440  		},
   441  	}
   442  	vj3 := &Vault{
   443  		Policies: []string{
   444  			"p5",
   445  		},
   446  	}
   447  	j1 := &Job{
   448  		TaskGroups: []*TaskGroup{
   449  			{
   450  				Name: "foo",
   451  				Tasks: []*Task{
   452  					{
   453  						Name: "t1",
   454  					},
   455  					{
   456  						Name:  "t2",
   457  						Vault: vj1,
   458  					},
   459  				},
   460  			},
   461  			{
   462  				Name: "bar",
   463  				Tasks: []*Task{
   464  					{
   465  						Name:  "t3",
   466  						Vault: vj2,
   467  					},
   468  					{
   469  						Name:  "t4",
   470  						Vault: vj3,
   471  					},
   472  				},
   473  			},
   474  		},
   475  	}
   476  
   477  	e1 := map[string]map[string]*Vault{
   478  		"foo": {
   479  			"t2": vj1,
   480  		},
   481  		"bar": {
   482  			"t3": vj2,
   483  			"t4": vj3,
   484  		},
   485  	}
   486  
   487  	cases := []struct {
   488  		Job      *Job
   489  		Expected map[string]map[string]*Vault
   490  	}{
   491  		{
   492  			Job:      j0,
   493  			Expected: e0,
   494  		},
   495  		{
   496  			Job:      j1,
   497  			Expected: e1,
   498  		},
   499  	}
   500  
   501  	for i, c := range cases {
   502  		got := c.Job.VaultPolicies()
   503  		if !reflect.DeepEqual(got, c.Expected) {
   504  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   505  		}
   506  	}
   507  }
   508  
   509  func TestJob_RequiredSignals(t *testing.T) {
   510  	j0 := &Job{}
   511  	e0 := make(map[string]map[string][]string, 0)
   512  
   513  	vj1 := &Vault{
   514  		Policies:   []string{"p1"},
   515  		ChangeMode: VaultChangeModeNoop,
   516  	}
   517  	vj2 := &Vault{
   518  		Policies:     []string{"p1"},
   519  		ChangeMode:   VaultChangeModeSignal,
   520  		ChangeSignal: "SIGUSR1",
   521  	}
   522  	tj1 := &Template{
   523  		SourcePath: "foo",
   524  		DestPath:   "bar",
   525  		ChangeMode: TemplateChangeModeNoop,
   526  	}
   527  	tj2 := &Template{
   528  		SourcePath:   "foo",
   529  		DestPath:     "bar",
   530  		ChangeMode:   TemplateChangeModeSignal,
   531  		ChangeSignal: "SIGUSR2",
   532  	}
   533  	j1 := &Job{
   534  		TaskGroups: []*TaskGroup{
   535  			{
   536  				Name: "foo",
   537  				Tasks: []*Task{
   538  					{
   539  						Name: "t1",
   540  					},
   541  					{
   542  						Name:      "t2",
   543  						Vault:     vj2,
   544  						Templates: []*Template{tj2},
   545  					},
   546  				},
   547  			},
   548  			{
   549  				Name: "bar",
   550  				Tasks: []*Task{
   551  					{
   552  						Name:      "t3",
   553  						Vault:     vj1,
   554  						Templates: []*Template{tj1},
   555  					},
   556  					{
   557  						Name:  "t4",
   558  						Vault: vj2,
   559  					},
   560  				},
   561  			},
   562  		},
   563  	}
   564  
   565  	e1 := map[string]map[string][]string{
   566  		"foo": {
   567  			"t2": {"SIGUSR1", "SIGUSR2"},
   568  		},
   569  		"bar": {
   570  			"t4": {"SIGUSR1"},
   571  		},
   572  	}
   573  
   574  	j2 := &Job{
   575  		TaskGroups: []*TaskGroup{
   576  			{
   577  				Name: "foo",
   578  				Tasks: []*Task{
   579  					{
   580  						Name:       "t1",
   581  						KillSignal: "SIGQUIT",
   582  					},
   583  				},
   584  			},
   585  		},
   586  	}
   587  
   588  	e2 := map[string]map[string][]string{
   589  		"foo": {
   590  			"t1": {"SIGQUIT"},
   591  		},
   592  	}
   593  
   594  	cases := []struct {
   595  		Job      *Job
   596  		Expected map[string]map[string][]string
   597  	}{
   598  		{
   599  			Job:      j0,
   600  			Expected: e0,
   601  		},
   602  		{
   603  			Job:      j1,
   604  			Expected: e1,
   605  		},
   606  		{
   607  			Job:      j2,
   608  			Expected: e2,
   609  		},
   610  	}
   611  
   612  	for i, c := range cases {
   613  		got := c.Job.RequiredSignals()
   614  		if !reflect.DeepEqual(got, c.Expected) {
   615  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   616  		}
   617  	}
   618  }
   619  
   620  func TestTaskGroup_Validate(t *testing.T) {
   621  	j := testJob()
   622  	tg := &TaskGroup{
   623  		Count: -1,
   624  		RestartPolicy: &RestartPolicy{
   625  			Interval: 5 * time.Minute,
   626  			Delay:    10 * time.Second,
   627  			Attempts: 10,
   628  			Mode:     RestartPolicyModeDelay,
   629  		},
   630  		ReschedulePolicy: &ReschedulePolicy{
   631  			Interval: 5 * time.Minute,
   632  			Attempts: 5,
   633  			Delay:    5 * time.Second,
   634  		},
   635  	}
   636  	err := tg.Validate(j)
   637  	mErr := err.(*multierror.Error)
   638  	if !strings.Contains(mErr.Errors[0].Error(), "group name") {
   639  		t.Fatalf("err: %s", err)
   640  	}
   641  	if !strings.Contains(mErr.Errors[1].Error(), "count can't be negative") {
   642  		t.Fatalf("err: %s", err)
   643  	}
   644  	if !strings.Contains(mErr.Errors[2].Error(), "Missing tasks") {
   645  		t.Fatalf("err: %s", err)
   646  	}
   647  
   648  	tg = &TaskGroup{
   649  		Tasks: []*Task{
   650  			{
   651  				Name: "task-a",
   652  				Resources: &Resources{
   653  					Networks: []*NetworkResource{
   654  						{
   655  							ReservedPorts: []Port{{Label: "foo", Value: 123}},
   656  						},
   657  					},
   658  				},
   659  			},
   660  			{
   661  				Name: "task-b",
   662  				Resources: &Resources{
   663  					Networks: []*NetworkResource{
   664  						{
   665  							ReservedPorts: []Port{{Label: "foo", Value: 123}},
   666  						},
   667  					},
   668  				},
   669  			},
   670  		},
   671  	}
   672  	err = tg.Validate(&Job{})
   673  	expected := `Static port 123 already reserved by task-a:foo`
   674  	if !strings.Contains(err.Error(), expected) {
   675  		t.Errorf("expected %s but found: %v", expected, err)
   676  	}
   677  
   678  	tg = &TaskGroup{
   679  		Tasks: []*Task{
   680  			{
   681  				Name: "task-a",
   682  				Resources: &Resources{
   683  					Networks: []*NetworkResource{
   684  						{
   685  							ReservedPorts: []Port{
   686  								{Label: "foo", Value: 123},
   687  								{Label: "bar", Value: 123},
   688  							},
   689  						},
   690  					},
   691  				},
   692  			},
   693  		},
   694  	}
   695  	err = tg.Validate(&Job{})
   696  	expected = `Static port 123 already reserved by task-a:foo`
   697  	if !strings.Contains(err.Error(), expected) {
   698  		t.Errorf("expected %s but found: %v", expected, err)
   699  	}
   700  
   701  	tg = &TaskGroup{
   702  		Name:  "web",
   703  		Count: 1,
   704  		Tasks: []*Task{
   705  			{Name: "web", Leader: true},
   706  			{Name: "web", Leader: true},
   707  			{},
   708  		},
   709  		RestartPolicy: &RestartPolicy{
   710  			Interval: 5 * time.Minute,
   711  			Delay:    10 * time.Second,
   712  			Attempts: 10,
   713  			Mode:     RestartPolicyModeDelay,
   714  		},
   715  		ReschedulePolicy: &ReschedulePolicy{
   716  			Interval:      5 * time.Minute,
   717  			Attempts:      10,
   718  			Delay:         5 * time.Second,
   719  			DelayFunction: "constant",
   720  		},
   721  	}
   722  
   723  	err = tg.Validate(j)
   724  	mErr = err.(*multierror.Error)
   725  	if !strings.Contains(mErr.Errors[0].Error(), "should have an ephemeral disk object") {
   726  		t.Fatalf("err: %s", err)
   727  	}
   728  	if !strings.Contains(mErr.Errors[1].Error(), "2 redefines 'web' from task 1") {
   729  		t.Fatalf("err: %s", err)
   730  	}
   731  	if !strings.Contains(mErr.Errors[2].Error(), "Task 3 missing name") {
   732  		t.Fatalf("err: %s", err)
   733  	}
   734  	if !strings.Contains(mErr.Errors[3].Error(), "Only one task may be marked as leader") {
   735  		t.Fatalf("err: %s", err)
   736  	}
   737  	if !strings.Contains(mErr.Errors[4].Error(), "Task web validation failed") {
   738  		t.Fatalf("err: %s", err)
   739  	}
   740  
   741  	tg = &TaskGroup{
   742  		Name:  "web",
   743  		Count: 1,
   744  		Tasks: []*Task{
   745  			{Name: "web", Leader: true},
   746  		},
   747  		Update: DefaultUpdateStrategy.Copy(),
   748  	}
   749  	j.Type = JobTypeBatch
   750  	err = tg.Validate(j)
   751  	if !strings.Contains(err.Error(), "does not allow update block") {
   752  		t.Fatalf("err: %s", err)
   753  	}
   754  
   755  	tg = &TaskGroup{
   756  		Count: -1,
   757  		RestartPolicy: &RestartPolicy{
   758  			Interval: 5 * time.Minute,
   759  			Delay:    10 * time.Second,
   760  			Attempts: 10,
   761  			Mode:     RestartPolicyModeDelay,
   762  		},
   763  		ReschedulePolicy: &ReschedulePolicy{
   764  			Interval: 5 * time.Minute,
   765  			Attempts: 5,
   766  			Delay:    5 * time.Second,
   767  		},
   768  	}
   769  	j.Type = JobTypeSystem
   770  	err = tg.Validate(j)
   771  	if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") {
   772  		t.Fatalf("err: %s", err)
   773  	}
   774  }
   775  
   776  func TestTask_Validate(t *testing.T) {
   777  	task := &Task{}
   778  	ephemeralDisk := DefaultEphemeralDisk()
   779  	err := task.Validate(ephemeralDisk, JobTypeBatch)
   780  	mErr := err.(*multierror.Error)
   781  	if !strings.Contains(mErr.Errors[0].Error(), "task name") {
   782  		t.Fatalf("err: %s", err)
   783  	}
   784  	if !strings.Contains(mErr.Errors[1].Error(), "task driver") {
   785  		t.Fatalf("err: %s", err)
   786  	}
   787  	if !strings.Contains(mErr.Errors[2].Error(), "task resources") {
   788  		t.Fatalf("err: %s", err)
   789  	}
   790  
   791  	task = &Task{Name: "web/foo"}
   792  	err = task.Validate(ephemeralDisk, JobTypeBatch)
   793  	mErr = err.(*multierror.Error)
   794  	if !strings.Contains(mErr.Errors[0].Error(), "slashes") {
   795  		t.Fatalf("err: %s", err)
   796  	}
   797  
   798  	task = &Task{
   799  		Name:   "web",
   800  		Driver: "docker",
   801  		Resources: &Resources{
   802  			CPU:      100,
   803  			MemoryMB: 100,
   804  		},
   805  		LogConfig: DefaultLogConfig(),
   806  	}
   807  	ephemeralDisk.SizeMB = 200
   808  	err = task.Validate(ephemeralDisk, JobTypeBatch)
   809  	if err != nil {
   810  		t.Fatalf("err: %s", err)
   811  	}
   812  
   813  	task.Constraints = append(task.Constraints,
   814  		&Constraint{
   815  			Operand: ConstraintDistinctHosts,
   816  		},
   817  		&Constraint{
   818  			Operand: ConstraintDistinctProperty,
   819  			LTarget: "${meta.rack}",
   820  		})
   821  
   822  	err = task.Validate(ephemeralDisk, JobTypeBatch)
   823  	mErr = err.(*multierror.Error)
   824  	if !strings.Contains(mErr.Errors[0].Error(), "task level: distinct_hosts") {
   825  		t.Fatalf("err: %s", err)
   826  	}
   827  	if !strings.Contains(mErr.Errors[1].Error(), "task level: distinct_property") {
   828  		t.Fatalf("err: %s", err)
   829  	}
   830  }
   831  
   832  func TestTask_Validate_Services(t *testing.T) {
   833  	s1 := &Service{
   834  		Name:      "service-name",
   835  		PortLabel: "bar",
   836  		Checks: []*ServiceCheck{
   837  			{
   838  				Name:     "check-name",
   839  				Type:     ServiceCheckTCP,
   840  				Interval: 0 * time.Second,
   841  			},
   842  			{
   843  				Name:    "check-name",
   844  				Type:    ServiceCheckTCP,
   845  				Timeout: 2 * time.Second,
   846  			},
   847  			{
   848  				Name:     "check-name",
   849  				Type:     ServiceCheckTCP,
   850  				Interval: 1 * time.Second,
   851  			},
   852  		},
   853  	}
   854  
   855  	s2 := &Service{
   856  		Name:      "service-name",
   857  		PortLabel: "bar",
   858  	}
   859  
   860  	s3 := &Service{
   861  		Name:      "service-A",
   862  		PortLabel: "a",
   863  	}
   864  	s4 := &Service{
   865  		Name:      "service-A",
   866  		PortLabel: "b",
   867  	}
   868  
   869  	ephemeralDisk := DefaultEphemeralDisk()
   870  	ephemeralDisk.SizeMB = 200
   871  	task := &Task{
   872  		Name:   "web",
   873  		Driver: "docker",
   874  		Resources: &Resources{
   875  			CPU:      100,
   876  			MemoryMB: 100,
   877  		},
   878  		Services: []*Service{s1, s2},
   879  	}
   880  
   881  	task1 := &Task{
   882  		Name:      "web",
   883  		Driver:    "docker",
   884  		Resources: DefaultResources(),
   885  		Services:  []*Service{s3, s4},
   886  		LogConfig: DefaultLogConfig(),
   887  	}
   888  	task1.Resources.Networks = []*NetworkResource{
   889  		{
   890  			MBits: 10,
   891  			DynamicPorts: []Port{
   892  				{
   893  					Label: "a",
   894  					Value: 1000,
   895  				},
   896  				{
   897  					Label: "b",
   898  					Value: 2000,
   899  				},
   900  			},
   901  		},
   902  	}
   903  
   904  	err := task.Validate(ephemeralDisk, JobTypeService)
   905  	if err == nil {
   906  		t.Fatal("expected an error")
   907  	}
   908  
   909  	if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") {
   910  		t.Fatalf("err: %v", err)
   911  	}
   912  
   913  	if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") {
   914  		t.Fatalf("err: %v", err)
   915  	}
   916  
   917  	if !strings.Contains(err.Error(), "missing required value interval") {
   918  		t.Fatalf("err: %v", err)
   919  	}
   920  
   921  	if !strings.Contains(err.Error(), "cannot be less than") {
   922  		t.Fatalf("err: %v", err)
   923  	}
   924  
   925  	if err = task1.Validate(ephemeralDisk, JobTypeService); err != nil {
   926  		t.Fatalf("err : %v", err)
   927  	}
   928  }
   929  
   930  func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) {
   931  	ephemeralDisk := DefaultEphemeralDisk()
   932  	getTask := func(s *Service) *Task {
   933  		task := &Task{
   934  			Name:      "web",
   935  			Driver:    "docker",
   936  			Resources: DefaultResources(),
   937  			Services:  []*Service{s},
   938  			LogConfig: DefaultLogConfig(),
   939  		}
   940  		task.Resources.Networks = []*NetworkResource{
   941  			{
   942  				MBits: 10,
   943  				DynamicPorts: []Port{
   944  					{
   945  						Label: "http",
   946  						Value: 80,
   947  					},
   948  				},
   949  			},
   950  		}
   951  		return task
   952  	}
   953  
   954  	cases := []*Service{
   955  		{
   956  			// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
   957  			Name:        "DriverModeWithLabel",
   958  			PortLabel:   "http",
   959  			AddressMode: AddressModeDriver,
   960  		},
   961  		{
   962  			Name:        "DriverModeWithPort",
   963  			PortLabel:   "80",
   964  			AddressMode: AddressModeDriver,
   965  		},
   966  		{
   967  			Name:        "HostModeWithLabel",
   968  			PortLabel:   "http",
   969  			AddressMode: AddressModeHost,
   970  		},
   971  		{
   972  			Name:        "HostModeWithoutLabel",
   973  			AddressMode: AddressModeHost,
   974  		},
   975  		{
   976  			Name:        "DriverModeWithoutLabel",
   977  			AddressMode: AddressModeDriver,
   978  		},
   979  	}
   980  
   981  	for _, service := range cases {
   982  		task := getTask(service)
   983  		t.Run(service.Name, func(t *testing.T) {
   984  			if err := task.Validate(ephemeralDisk, JobTypeService); err != nil {
   985  				t.Fatalf("unexpected err: %v", err)
   986  			}
   987  		})
   988  	}
   989  }
   990  
   991  func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) {
   992  	ephemeralDisk := DefaultEphemeralDisk()
   993  	getTask := func(s *Service) *Task {
   994  		task := &Task{
   995  			Name:      "web",
   996  			Driver:    "docker",
   997  			Resources: DefaultResources(),
   998  			Services:  []*Service{s},
   999  			LogConfig: DefaultLogConfig(),
  1000  		}
  1001  		task.Resources.Networks = []*NetworkResource{
  1002  			{
  1003  				MBits: 10,
  1004  				DynamicPorts: []Port{
  1005  					{
  1006  						Label: "http",
  1007  						Value: 80,
  1008  					},
  1009  				},
  1010  			},
  1011  		}
  1012  		return task
  1013  	}
  1014  
  1015  	cases := []*Service{
  1016  		{
  1017  			// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
  1018  			Name:        "DriverModeWithLabel",
  1019  			PortLabel:   "asdf",
  1020  			AddressMode: AddressModeDriver,
  1021  		},
  1022  		{
  1023  			Name:        "HostModeWithLabel",
  1024  			PortLabel:   "asdf",
  1025  			AddressMode: AddressModeHost,
  1026  		},
  1027  		{
  1028  			Name:        "HostModeWithPort",
  1029  			PortLabel:   "80",
  1030  			AddressMode: AddressModeHost,
  1031  		},
  1032  	}
  1033  
  1034  	for _, service := range cases {
  1035  		task := getTask(service)
  1036  		t.Run(service.Name, func(t *testing.T) {
  1037  			err := task.Validate(ephemeralDisk, JobTypeService)
  1038  			if err == nil {
  1039  				t.Fatalf("expected an error")
  1040  			}
  1041  			//t.Logf("err: %v", err)
  1042  		})
  1043  	}
  1044  }
  1045  
  1046  func TestTask_Validate_Service_Check(t *testing.T) {
  1047  
  1048  	invalidCheck := ServiceCheck{
  1049  		Name:     "check-name",
  1050  		Command:  "/bin/true",
  1051  		Type:     ServiceCheckScript,
  1052  		Interval: 10 * time.Second,
  1053  	}
  1054  
  1055  	err := invalidCheck.validate()
  1056  	if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") {
  1057  		t.Fatalf("expected a timeout validation error but received: %q", err)
  1058  	}
  1059  
  1060  	check1 := ServiceCheck{
  1061  		Name:     "check-name",
  1062  		Type:     ServiceCheckTCP,
  1063  		Interval: 10 * time.Second,
  1064  		Timeout:  2 * time.Second,
  1065  	}
  1066  
  1067  	if err := check1.validate(); err != nil {
  1068  		t.Fatalf("err: %v", err)
  1069  	}
  1070  
  1071  	check1.InitialStatus = "foo"
  1072  	err = check1.validate()
  1073  	if err == nil {
  1074  		t.Fatal("Expected an error")
  1075  	}
  1076  
  1077  	if !strings.Contains(err.Error(), "invalid initial check state (foo)") {
  1078  		t.Fatalf("err: %v", err)
  1079  	}
  1080  
  1081  	check1.InitialStatus = api.HealthCritical
  1082  	err = check1.validate()
  1083  	if err != nil {
  1084  		t.Fatalf("err: %v", err)
  1085  	}
  1086  
  1087  	check1.InitialStatus = api.HealthPassing
  1088  	err = check1.validate()
  1089  	if err != nil {
  1090  		t.Fatalf("err: %v", err)
  1091  	}
  1092  
  1093  	check1.InitialStatus = ""
  1094  	err = check1.validate()
  1095  	if err != nil {
  1096  		t.Fatalf("err: %v", err)
  1097  	}
  1098  
  1099  	check2 := ServiceCheck{
  1100  		Name:     "check-name-2",
  1101  		Type:     ServiceCheckHTTP,
  1102  		Interval: 10 * time.Second,
  1103  		Timeout:  2 * time.Second,
  1104  		Path:     "/foo/bar",
  1105  	}
  1106  
  1107  	err = check2.validate()
  1108  	if err != nil {
  1109  		t.Fatalf("err: %v", err)
  1110  	}
  1111  
  1112  	check2.Path = ""
  1113  	err = check2.validate()
  1114  	if err == nil {
  1115  		t.Fatal("Expected an error")
  1116  	}
  1117  	if !strings.Contains(err.Error(), "valid http path") {
  1118  		t.Fatalf("err: %v", err)
  1119  	}
  1120  
  1121  	check2.Path = "http://www.example.com"
  1122  	err = check2.validate()
  1123  	if err == nil {
  1124  		t.Fatal("Expected an error")
  1125  	}
  1126  	if !strings.Contains(err.Error(), "relative http path") {
  1127  		t.Fatalf("err: %v", err)
  1128  	}
  1129  }
  1130  
  1131  // TestTask_Validate_Service_Check_AddressMode asserts that checks do not
  1132  // inherit address mode but do inherit ports.
  1133  func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
  1134  	getTask := func(s *Service) *Task {
  1135  		return &Task{
  1136  			Resources: &Resources{
  1137  				Networks: []*NetworkResource{
  1138  					{
  1139  						DynamicPorts: []Port{
  1140  							{
  1141  								Label: "http",
  1142  								Value: 9999,
  1143  							},
  1144  						},
  1145  					},
  1146  				},
  1147  			},
  1148  			Services: []*Service{s},
  1149  		}
  1150  	}
  1151  
  1152  	cases := []struct {
  1153  		Service     *Service
  1154  		ErrContains string
  1155  	}{
  1156  		{
  1157  			Service: &Service{
  1158  				Name:        "invalid-driver",
  1159  				PortLabel:   "80",
  1160  				AddressMode: "host",
  1161  			},
  1162  			ErrContains: `port label "80" referenced`,
  1163  		},
  1164  		{
  1165  			Service: &Service{
  1166  				Name:        "http-driver-fail-1",
  1167  				PortLabel:   "80",
  1168  				AddressMode: "driver",
  1169  				Checks: []*ServiceCheck{
  1170  					{
  1171  						Name:     "invalid-check-1",
  1172  						Type:     "tcp",
  1173  						Interval: time.Second,
  1174  						Timeout:  time.Second,
  1175  					},
  1176  				},
  1177  			},
  1178  			ErrContains: `check "invalid-check-1" cannot use a numeric port`,
  1179  		},
  1180  		{
  1181  			Service: &Service{
  1182  				Name:        "http-driver-fail-2",
  1183  				PortLabel:   "80",
  1184  				AddressMode: "driver",
  1185  				Checks: []*ServiceCheck{
  1186  					{
  1187  						Name:      "invalid-check-2",
  1188  						Type:      "tcp",
  1189  						PortLabel: "80",
  1190  						Interval:  time.Second,
  1191  						Timeout:   time.Second,
  1192  					},
  1193  				},
  1194  			},
  1195  			ErrContains: `check "invalid-check-2" cannot use a numeric port`,
  1196  		},
  1197  		{
  1198  			Service: &Service{
  1199  				Name:        "http-driver-fail-3",
  1200  				PortLabel:   "80",
  1201  				AddressMode: "driver",
  1202  				Checks: []*ServiceCheck{
  1203  					{
  1204  						Name:      "invalid-check-3",
  1205  						Type:      "tcp",
  1206  						PortLabel: "missing-port-label",
  1207  						Interval:  time.Second,
  1208  						Timeout:   time.Second,
  1209  					},
  1210  				},
  1211  			},
  1212  			ErrContains: `port label "missing-port-label" referenced`,
  1213  		},
  1214  		{
  1215  			Service: &Service{
  1216  				Name:        "http-driver-passes",
  1217  				PortLabel:   "80",
  1218  				AddressMode: "driver",
  1219  				Checks: []*ServiceCheck{
  1220  					{
  1221  						Name:     "valid-script-check",
  1222  						Type:     "script",
  1223  						Command:  "ok",
  1224  						Interval: time.Second,
  1225  						Timeout:  time.Second,
  1226  					},
  1227  					{
  1228  						Name:      "valid-host-check",
  1229  						Type:      "tcp",
  1230  						PortLabel: "http",
  1231  						Interval:  time.Second,
  1232  						Timeout:   time.Second,
  1233  					},
  1234  					{
  1235  						Name:        "valid-driver-check",
  1236  						Type:        "tcp",
  1237  						AddressMode: "driver",
  1238  						Interval:    time.Second,
  1239  						Timeout:     time.Second,
  1240  					},
  1241  				},
  1242  			},
  1243  		},
  1244  		{
  1245  			Service: &Service{
  1246  				Name: "empty-address-3673-passes-1",
  1247  				Checks: []*ServiceCheck{
  1248  					{
  1249  						Name:      "valid-port-label",
  1250  						Type:      "tcp",
  1251  						PortLabel: "http",
  1252  						Interval:  time.Second,
  1253  						Timeout:   time.Second,
  1254  					},
  1255  					{
  1256  						Name:     "empty-is-ok",
  1257  						Type:     "script",
  1258  						Command:  "ok",
  1259  						Interval: time.Second,
  1260  						Timeout:  time.Second,
  1261  					},
  1262  				},
  1263  			},
  1264  		},
  1265  		{
  1266  			Service: &Service{
  1267  				Name: "empty-address-3673-passes-2",
  1268  			},
  1269  		},
  1270  		{
  1271  			Service: &Service{
  1272  				Name: "empty-address-3673-fails",
  1273  				Checks: []*ServiceCheck{
  1274  					{
  1275  						Name:     "empty-is-not-ok",
  1276  						Type:     "tcp",
  1277  						Interval: time.Second,
  1278  						Timeout:  time.Second,
  1279  					},
  1280  				},
  1281  			},
  1282  			ErrContains: `invalid: check requires a port but neither check nor service`,
  1283  		},
  1284  	}
  1285  
  1286  	for _, tc := range cases {
  1287  		tc := tc
  1288  		task := getTask(tc.Service)
  1289  		t.Run(tc.Service.Name, func(t *testing.T) {
  1290  			err := validateServices(task)
  1291  			if err == nil && tc.ErrContains == "" {
  1292  				// Ok!
  1293  				return
  1294  			}
  1295  			if err == nil {
  1296  				t.Fatalf("no error returned. expected: %s", tc.ErrContains)
  1297  			}
  1298  			if !strings.Contains(err.Error(), tc.ErrContains) {
  1299  				t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
  1300  			}
  1301  		})
  1302  	}
  1303  }
  1304  
  1305  func TestTask_Validate_Service_Check_GRPC(t *testing.T) {
  1306  	t.Parallel()
  1307  	// Bad (no port)
  1308  	invalidGRPC := &ServiceCheck{
  1309  		Type:     ServiceCheckGRPC,
  1310  		Interval: time.Second,
  1311  		Timeout:  time.Second,
  1312  	}
  1313  	service := &Service{
  1314  		Name:   "test",
  1315  		Checks: []*ServiceCheck{invalidGRPC},
  1316  	}
  1317  
  1318  	assert.Error(t, service.Validate())
  1319  
  1320  	// Good
  1321  	service.Checks[0] = &ServiceCheck{
  1322  		Type:      ServiceCheckGRPC,
  1323  		Interval:  time.Second,
  1324  		Timeout:   time.Second,
  1325  		PortLabel: "some-port-label",
  1326  	}
  1327  
  1328  	assert.NoError(t, service.Validate())
  1329  }
  1330  
  1331  func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
  1332  	t.Parallel()
  1333  	invalidCheckRestart := &CheckRestart{
  1334  		Limit: -1,
  1335  		Grace: -1,
  1336  	}
  1337  
  1338  	err := invalidCheckRestart.Validate()
  1339  	assert.NotNil(t, err, "invalidateCheckRestart.Validate()")
  1340  	assert.Len(t, err.(*multierror.Error).Errors, 2)
  1341  
  1342  	validCheckRestart := &CheckRestart{}
  1343  	assert.Nil(t, validCheckRestart.Validate())
  1344  
  1345  	validCheckRestart.Limit = 1
  1346  	validCheckRestart.Grace = 1
  1347  	assert.Nil(t, validCheckRestart.Validate())
  1348  }
  1349  
  1350  func TestTask_Validate_LogConfig(t *testing.T) {
  1351  	task := &Task{
  1352  		LogConfig: DefaultLogConfig(),
  1353  	}
  1354  	ephemeralDisk := &EphemeralDisk{
  1355  		SizeMB: 1,
  1356  	}
  1357  
  1358  	err := task.Validate(ephemeralDisk, JobTypeService)
  1359  	mErr := err.(*multierror.Error)
  1360  	if !strings.Contains(mErr.Errors[3].Error(), "log storage") {
  1361  		t.Fatalf("err: %s", err)
  1362  	}
  1363  }
  1364  
  1365  func TestTask_Validate_Template(t *testing.T) {
  1366  
  1367  	bad := &Template{}
  1368  	task := &Task{
  1369  		Templates: []*Template{bad},
  1370  	}
  1371  	ephemeralDisk := &EphemeralDisk{
  1372  		SizeMB: 1,
  1373  	}
  1374  
  1375  	err := task.Validate(ephemeralDisk, JobTypeService)
  1376  	if !strings.Contains(err.Error(), "Template 1 validation failed") {
  1377  		t.Fatalf("err: %s", err)
  1378  	}
  1379  
  1380  	// Have two templates that share the same destination
  1381  	good := &Template{
  1382  		SourcePath: "foo",
  1383  		DestPath:   "local/foo",
  1384  		ChangeMode: "noop",
  1385  	}
  1386  
  1387  	task.Templates = []*Template{good, good}
  1388  	err = task.Validate(ephemeralDisk, JobTypeService)
  1389  	if !strings.Contains(err.Error(), "same destination as") {
  1390  		t.Fatalf("err: %s", err)
  1391  	}
  1392  
  1393  	// Env templates can't use signals
  1394  	task.Templates = []*Template{
  1395  		{
  1396  			Envvars:    true,
  1397  			ChangeMode: "signal",
  1398  		},
  1399  	}
  1400  
  1401  	err = task.Validate(ephemeralDisk, JobTypeService)
  1402  	if err == nil {
  1403  		t.Fatalf("expected error from Template.Validate")
  1404  	}
  1405  	if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) {
  1406  		t.Errorf("expected to find %q but found %v", expected, err)
  1407  	}
  1408  }
  1409  
  1410  func TestTemplate_Validate(t *testing.T) {
  1411  	cases := []struct {
  1412  		Tmpl         *Template
  1413  		Fail         bool
  1414  		ContainsErrs []string
  1415  	}{
  1416  		{
  1417  			Tmpl: &Template{},
  1418  			Fail: true,
  1419  			ContainsErrs: []string{
  1420  				"specify a source path",
  1421  				"specify a destination",
  1422  				TemplateChangeModeInvalidError.Error(),
  1423  			},
  1424  		},
  1425  		{
  1426  			Tmpl: &Template{
  1427  				Splay: -100,
  1428  			},
  1429  			Fail: true,
  1430  			ContainsErrs: []string{
  1431  				"positive splay",
  1432  			},
  1433  		},
  1434  		{
  1435  			Tmpl: &Template{
  1436  				ChangeMode: "foo",
  1437  			},
  1438  			Fail: true,
  1439  			ContainsErrs: []string{
  1440  				TemplateChangeModeInvalidError.Error(),
  1441  			},
  1442  		},
  1443  		{
  1444  			Tmpl: &Template{
  1445  				ChangeMode: "signal",
  1446  			},
  1447  			Fail: true,
  1448  			ContainsErrs: []string{
  1449  				"specify signal value",
  1450  			},
  1451  		},
  1452  		{
  1453  			Tmpl: &Template{
  1454  				SourcePath: "foo",
  1455  				DestPath:   "../../root",
  1456  				ChangeMode: "noop",
  1457  			},
  1458  			Fail: true,
  1459  			ContainsErrs: []string{
  1460  				"destination escapes",
  1461  			},
  1462  		},
  1463  		{
  1464  			Tmpl: &Template{
  1465  				SourcePath: "foo",
  1466  				DestPath:   "local/foo",
  1467  				ChangeMode: "noop",
  1468  			},
  1469  			Fail: false,
  1470  		},
  1471  		{
  1472  			Tmpl: &Template{
  1473  				SourcePath: "foo",
  1474  				DestPath:   "local/foo",
  1475  				ChangeMode: "noop",
  1476  				Perms:      "0444",
  1477  			},
  1478  			Fail: false,
  1479  		},
  1480  		{
  1481  			Tmpl: &Template{
  1482  				SourcePath: "foo",
  1483  				DestPath:   "local/foo",
  1484  				ChangeMode: "noop",
  1485  				Perms:      "zza",
  1486  			},
  1487  			Fail: true,
  1488  			ContainsErrs: []string{
  1489  				"as octal",
  1490  			},
  1491  		},
  1492  	}
  1493  
  1494  	for i, c := range cases {
  1495  		err := c.Tmpl.Validate()
  1496  		if err != nil {
  1497  			if !c.Fail {
  1498  				t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err)
  1499  			}
  1500  
  1501  			e := err.Error()
  1502  			for _, exp := range c.ContainsErrs {
  1503  				if !strings.Contains(e, exp) {
  1504  					t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e)
  1505  				}
  1506  			}
  1507  		} else if c.Fail {
  1508  			t.Fatalf("Case %d: should have failed: %v", i+1, err)
  1509  		}
  1510  	}
  1511  }
  1512  
  1513  func TestConstraint_Validate(t *testing.T) {
  1514  	c := &Constraint{}
  1515  	err := c.Validate()
  1516  	mErr := err.(*multierror.Error)
  1517  	if !strings.Contains(mErr.Errors[0].Error(), "Missing constraint operand") {
  1518  		t.Fatalf("err: %s", err)
  1519  	}
  1520  
  1521  	c = &Constraint{
  1522  		LTarget: "$attr.kernel.name",
  1523  		RTarget: "linux",
  1524  		Operand: "=",
  1525  	}
  1526  	err = c.Validate()
  1527  	if err != nil {
  1528  		t.Fatalf("err: %v", err)
  1529  	}
  1530  
  1531  	// Perform additional regexp validation
  1532  	c.Operand = ConstraintRegex
  1533  	c.RTarget = "(foo"
  1534  	err = c.Validate()
  1535  	mErr = err.(*multierror.Error)
  1536  	if !strings.Contains(mErr.Errors[0].Error(), "missing closing") {
  1537  		t.Fatalf("err: %s", err)
  1538  	}
  1539  
  1540  	// Perform version validation
  1541  	c.Operand = ConstraintVersion
  1542  	c.RTarget = "~> foo"
  1543  	err = c.Validate()
  1544  	mErr = err.(*multierror.Error)
  1545  	if !strings.Contains(mErr.Errors[0].Error(), "Malformed constraint") {
  1546  		t.Fatalf("err: %s", err)
  1547  	}
  1548  
  1549  	// Perform distinct_property validation
  1550  	c.Operand = ConstraintDistinctProperty
  1551  	c.RTarget = "0"
  1552  	err = c.Validate()
  1553  	mErr = err.(*multierror.Error)
  1554  	if !strings.Contains(mErr.Errors[0].Error(), "count of 1 or greater") {
  1555  		t.Fatalf("err: %s", err)
  1556  	}
  1557  
  1558  	c.RTarget = "-1"
  1559  	err = c.Validate()
  1560  	mErr = err.(*multierror.Error)
  1561  	if !strings.Contains(mErr.Errors[0].Error(), "to uint64") {
  1562  		t.Fatalf("err: %s", err)
  1563  	}
  1564  
  1565  	// Perform distinct_hosts validation
  1566  	c.Operand = ConstraintDistinctHosts
  1567  	c.LTarget = ""
  1568  	c.RTarget = ""
  1569  	if err := c.Validate(); err != nil {
  1570  		t.Fatalf("expected valid constraint: %v", err)
  1571  	}
  1572  
  1573  	// Perform set_contains* validation
  1574  	c.RTarget = ""
  1575  	for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} {
  1576  		c.Operand = o
  1577  		err = c.Validate()
  1578  		mErr = err.(*multierror.Error)
  1579  		if !strings.Contains(mErr.Errors[0].Error(), "requires an RTarget") {
  1580  			t.Fatalf("err: %s", err)
  1581  		}
  1582  	}
  1583  
  1584  	// Perform LTarget validation
  1585  	c.Operand = ConstraintRegex
  1586  	c.RTarget = "foo"
  1587  	c.LTarget = ""
  1588  	err = c.Validate()
  1589  	mErr = err.(*multierror.Error)
  1590  	if !strings.Contains(mErr.Errors[0].Error(), "No LTarget") {
  1591  		t.Fatalf("err: %s", err)
  1592  	}
  1593  
  1594  	// Perform constraint type validation
  1595  	c.Operand = "foo"
  1596  	err = c.Validate()
  1597  	mErr = err.(*multierror.Error)
  1598  	if !strings.Contains(mErr.Errors[0].Error(), "Unknown constraint type") {
  1599  		t.Fatalf("err: %s", err)
  1600  	}
  1601  }
  1602  
  1603  func TestAffinity_Validate(t *testing.T) {
  1604  
  1605  	type tc struct {
  1606  		affinity *Affinity
  1607  		err      error
  1608  		name     string
  1609  	}
  1610  
  1611  	testCases := []tc{
  1612  		{
  1613  			affinity: &Affinity{},
  1614  			err:      fmt.Errorf("Missing affinity operand"),
  1615  		},
  1616  		{
  1617  			affinity: &Affinity{
  1618  				Operand: "foo",
  1619  				LTarget: "${meta.node_class}",
  1620  				Weight:  10,
  1621  			},
  1622  			err: fmt.Errorf("Unknown affinity operator \"foo\""),
  1623  		},
  1624  		{
  1625  			affinity: &Affinity{
  1626  				Operand: "=",
  1627  				LTarget: "${meta.node_class}",
  1628  				Weight:  10,
  1629  			},
  1630  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  1631  		},
  1632  		{
  1633  			affinity: &Affinity{
  1634  				Operand: "=",
  1635  				LTarget: "${meta.node_class}",
  1636  				RTarget: "c4",
  1637  				Weight:  0,
  1638  			},
  1639  			err: fmt.Errorf("Affinity weight cannot be zero"),
  1640  		},
  1641  		{
  1642  			affinity: &Affinity{
  1643  				Operand: "=",
  1644  				LTarget: "${meta.node_class}",
  1645  				RTarget: "c4",
  1646  				Weight:  110,
  1647  			},
  1648  			err: fmt.Errorf("Affinity weight must be within the range [-100,100]"),
  1649  		},
  1650  		{
  1651  			affinity: &Affinity{
  1652  				Operand: "=",
  1653  				LTarget: "${node.class}",
  1654  				Weight:  10,
  1655  			},
  1656  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  1657  		},
  1658  		{
  1659  			affinity: &Affinity{
  1660  				Operand: "version",
  1661  				LTarget: "${meta.os}",
  1662  				RTarget: ">>2.0",
  1663  				Weight:  110,
  1664  			},
  1665  			err: fmt.Errorf("Version affinity is invalid"),
  1666  		},
  1667  		{
  1668  			affinity: &Affinity{
  1669  				Operand: "regexp",
  1670  				LTarget: "${meta.os}",
  1671  				RTarget: "\\K2.0",
  1672  				Weight:  100,
  1673  			},
  1674  			err: fmt.Errorf("Regular expression failed to compile"),
  1675  		},
  1676  	}
  1677  
  1678  	for _, tc := range testCases {
  1679  		t.Run(tc.name, func(t *testing.T) {
  1680  			err := tc.affinity.Validate()
  1681  			if tc.err != nil {
  1682  				require.NotNil(t, err)
  1683  				require.Contains(t, err.Error(), tc.err.Error())
  1684  			} else {
  1685  				require.Nil(t, err)
  1686  			}
  1687  		})
  1688  	}
  1689  }
  1690  
  1691  func TestUpdateStrategy_Validate(t *testing.T) {
  1692  	u := &UpdateStrategy{
  1693  		MaxParallel:      0,
  1694  		HealthCheck:      "foo",
  1695  		MinHealthyTime:   -10,
  1696  		HealthyDeadline:  -15,
  1697  		ProgressDeadline: -25,
  1698  		AutoRevert:       false,
  1699  		Canary:           -1,
  1700  	}
  1701  
  1702  	err := u.Validate()
  1703  	mErr := err.(*multierror.Error)
  1704  	if !strings.Contains(mErr.Errors[0].Error(), "Invalid health check given") {
  1705  		t.Fatalf("err: %s", err)
  1706  	}
  1707  	if !strings.Contains(mErr.Errors[1].Error(), "Max parallel can not be less than one") {
  1708  		t.Fatalf("err: %s", err)
  1709  	}
  1710  	if !strings.Contains(mErr.Errors[2].Error(), "Canary count can not be less than zero") {
  1711  		t.Fatalf("err: %s", err)
  1712  	}
  1713  	if !strings.Contains(mErr.Errors[3].Error(), "Minimum healthy time may not be less than zero") {
  1714  		t.Fatalf("err: %s", err)
  1715  	}
  1716  	if !strings.Contains(mErr.Errors[4].Error(), "Healthy deadline must be greater than zero") {
  1717  		t.Fatalf("err: %s", err)
  1718  	}
  1719  	if !strings.Contains(mErr.Errors[5].Error(), "Progress deadline must be zero or greater") {
  1720  		t.Fatalf("err: %s", err)
  1721  	}
  1722  	if !strings.Contains(mErr.Errors[6].Error(), "Minimum healthy time must be less than healthy deadline") {
  1723  		t.Fatalf("err: %s", err)
  1724  	}
  1725  	if !strings.Contains(mErr.Errors[7].Error(), "Healthy deadline must be less than progress deadline") {
  1726  		t.Fatalf("err: %s", err)
  1727  	}
  1728  }
  1729  
  1730  func TestResource_NetIndex(t *testing.T) {
  1731  	r := &Resources{
  1732  		Networks: []*NetworkResource{
  1733  			{Device: "eth0"},
  1734  			{Device: "lo0"},
  1735  			{Device: ""},
  1736  		},
  1737  	}
  1738  	if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 {
  1739  		t.Fatalf("Bad: %d", idx)
  1740  	}
  1741  	if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 {
  1742  		t.Fatalf("Bad: %d", idx)
  1743  	}
  1744  	if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 {
  1745  		t.Fatalf("Bad: %d", idx)
  1746  	}
  1747  }
  1748  
  1749  func TestResource_Superset(t *testing.T) {
  1750  	r1 := &Resources{
  1751  		CPU:      2000,
  1752  		MemoryMB: 2048,
  1753  		DiskMB:   10000,
  1754  	}
  1755  	r2 := &Resources{
  1756  		CPU:      2000,
  1757  		MemoryMB: 1024,
  1758  		DiskMB:   5000,
  1759  	}
  1760  
  1761  	if s, _ := r1.Superset(r1); !s {
  1762  		t.Fatalf("bad")
  1763  	}
  1764  	if s, _ := r1.Superset(r2); !s {
  1765  		t.Fatalf("bad")
  1766  	}
  1767  	if s, _ := r2.Superset(r1); s {
  1768  		t.Fatalf("bad")
  1769  	}
  1770  	if s, _ := r2.Superset(r2); !s {
  1771  		t.Fatalf("bad")
  1772  	}
  1773  }
  1774  
  1775  func TestResource_Add(t *testing.T) {
  1776  	r1 := &Resources{
  1777  		CPU:      2000,
  1778  		MemoryMB: 2048,
  1779  		DiskMB:   10000,
  1780  		Networks: []*NetworkResource{
  1781  			{
  1782  				CIDR:          "10.0.0.0/8",
  1783  				MBits:         100,
  1784  				ReservedPorts: []Port{{"ssh", 22}},
  1785  			},
  1786  		},
  1787  	}
  1788  	r2 := &Resources{
  1789  		CPU:      2000,
  1790  		MemoryMB: 1024,
  1791  		DiskMB:   5000,
  1792  		Networks: []*NetworkResource{
  1793  			{
  1794  				IP:            "10.0.0.1",
  1795  				MBits:         50,
  1796  				ReservedPorts: []Port{{"web", 80}},
  1797  			},
  1798  		},
  1799  	}
  1800  
  1801  	err := r1.Add(r2)
  1802  	if err != nil {
  1803  		t.Fatalf("Err: %v", err)
  1804  	}
  1805  
  1806  	expect := &Resources{
  1807  		CPU:      3000,
  1808  		MemoryMB: 3072,
  1809  		DiskMB:   15000,
  1810  		Networks: []*NetworkResource{
  1811  			{
  1812  				CIDR:          "10.0.0.0/8",
  1813  				MBits:         150,
  1814  				ReservedPorts: []Port{{"ssh", 22}, {"web", 80}},
  1815  			},
  1816  		},
  1817  	}
  1818  
  1819  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  1820  		t.Fatalf("bad: %#v %#v", expect, r1)
  1821  	}
  1822  }
  1823  
  1824  func TestResource_Add_Network(t *testing.T) {
  1825  	r1 := &Resources{}
  1826  	r2 := &Resources{
  1827  		Networks: []*NetworkResource{
  1828  			{
  1829  				MBits:        50,
  1830  				DynamicPorts: []Port{{"http", 0}, {"https", 0}},
  1831  			},
  1832  		},
  1833  	}
  1834  	r3 := &Resources{
  1835  		Networks: []*NetworkResource{
  1836  			{
  1837  				MBits:        25,
  1838  				DynamicPorts: []Port{{"admin", 0}},
  1839  			},
  1840  		},
  1841  	}
  1842  
  1843  	err := r1.Add(r2)
  1844  	if err != nil {
  1845  		t.Fatalf("Err: %v", err)
  1846  	}
  1847  	err = r1.Add(r3)
  1848  	if err != nil {
  1849  		t.Fatalf("Err: %v", err)
  1850  	}
  1851  
  1852  	expect := &Resources{
  1853  		Networks: []*NetworkResource{
  1854  			{
  1855  				MBits:        75,
  1856  				DynamicPorts: []Port{{"http", 0}, {"https", 0}, {"admin", 0}},
  1857  			},
  1858  		},
  1859  	}
  1860  
  1861  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  1862  		t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0])
  1863  	}
  1864  }
  1865  
  1866  func TestComparableResources_Subtract(t *testing.T) {
  1867  	r1 := &ComparableResources{
  1868  		Flattened: AllocatedTaskResources{
  1869  			Cpu: AllocatedCpuResources{
  1870  				CpuShares: 2000,
  1871  			},
  1872  			Memory: AllocatedMemoryResources{
  1873  				MemoryMB: 2048,
  1874  			},
  1875  			Networks: []*NetworkResource{
  1876  				{
  1877  					CIDR:          "10.0.0.0/8",
  1878  					MBits:         100,
  1879  					ReservedPorts: []Port{{"ssh", 22}},
  1880  				},
  1881  			},
  1882  		},
  1883  		Shared: AllocatedSharedResources{
  1884  			DiskMB: 10000,
  1885  		},
  1886  	}
  1887  
  1888  	r2 := &ComparableResources{
  1889  		Flattened: AllocatedTaskResources{
  1890  			Cpu: AllocatedCpuResources{
  1891  				CpuShares: 1000,
  1892  			},
  1893  			Memory: AllocatedMemoryResources{
  1894  				MemoryMB: 1024,
  1895  			},
  1896  			Networks: []*NetworkResource{
  1897  				{
  1898  					CIDR:          "10.0.0.0/8",
  1899  					MBits:         20,
  1900  					ReservedPorts: []Port{{"ssh", 22}},
  1901  				},
  1902  			},
  1903  		},
  1904  		Shared: AllocatedSharedResources{
  1905  			DiskMB: 5000,
  1906  		},
  1907  	}
  1908  	r1.Subtract(r2)
  1909  
  1910  	expect := &ComparableResources{
  1911  		Flattened: AllocatedTaskResources{
  1912  			Cpu: AllocatedCpuResources{
  1913  				CpuShares: 1000,
  1914  			},
  1915  			Memory: AllocatedMemoryResources{
  1916  				MemoryMB: 1024,
  1917  			},
  1918  			Networks: []*NetworkResource{
  1919  				{
  1920  					CIDR:          "10.0.0.0/8",
  1921  					MBits:         100,
  1922  					ReservedPorts: []Port{{"ssh", 22}},
  1923  				},
  1924  			},
  1925  		},
  1926  		Shared: AllocatedSharedResources{
  1927  			DiskMB: 5000,
  1928  		},
  1929  	}
  1930  
  1931  	require := require.New(t)
  1932  	require.Equal(expect, r1)
  1933  }
  1934  
  1935  func TestEncodeDecode(t *testing.T) {
  1936  	type FooRequest struct {
  1937  		Foo string
  1938  		Bar int
  1939  		Baz bool
  1940  	}
  1941  	arg := &FooRequest{
  1942  		Foo: "test",
  1943  		Bar: 42,
  1944  		Baz: true,
  1945  	}
  1946  	buf, err := Encode(1, arg)
  1947  	if err != nil {
  1948  		t.Fatalf("err: %v", err)
  1949  	}
  1950  
  1951  	var out FooRequest
  1952  	err = Decode(buf[1:], &out)
  1953  	if err != nil {
  1954  		t.Fatalf("err: %v", err)
  1955  	}
  1956  
  1957  	if !reflect.DeepEqual(arg, &out) {
  1958  		t.Fatalf("bad: %#v %#v", arg, out)
  1959  	}
  1960  }
  1961  
  1962  func BenchmarkEncodeDecode(b *testing.B) {
  1963  	job := testJob()
  1964  
  1965  	for i := 0; i < b.N; i++ {
  1966  		buf, err := Encode(1, job)
  1967  		if err != nil {
  1968  			b.Fatalf("err: %v", err)
  1969  		}
  1970  
  1971  		var out Job
  1972  		err = Decode(buf[1:], &out)
  1973  		if err != nil {
  1974  			b.Fatalf("err: %v", err)
  1975  		}
  1976  	}
  1977  }
  1978  
  1979  func TestInvalidServiceCheck(t *testing.T) {
  1980  	s := Service{
  1981  		Name:      "service-name",
  1982  		PortLabel: "bar",
  1983  		Checks: []*ServiceCheck{
  1984  			{
  1985  				Name: "check-name",
  1986  				Type: "lol",
  1987  			},
  1988  		},
  1989  	}
  1990  	if err := s.Validate(); err == nil {
  1991  		t.Fatalf("Service should be invalid (invalid type)")
  1992  	}
  1993  
  1994  	s = Service{
  1995  		Name:      "service.name",
  1996  		PortLabel: "bar",
  1997  	}
  1998  	if err := s.ValidateName(s.Name); err == nil {
  1999  		t.Fatalf("Service should be invalid (contains a dot): %v", err)
  2000  	}
  2001  
  2002  	s = Service{
  2003  		Name:      "-my-service",
  2004  		PortLabel: "bar",
  2005  	}
  2006  	if err := s.Validate(); err == nil {
  2007  		t.Fatalf("Service should be invalid (begins with a hyphen): %v", err)
  2008  	}
  2009  
  2010  	s = Service{
  2011  		Name:      "my-service-${NOMAD_META_FOO}",
  2012  		PortLabel: "bar",
  2013  	}
  2014  	if err := s.Validate(); err != nil {
  2015  		t.Fatalf("Service should be valid: %v", err)
  2016  	}
  2017  
  2018  	s = Service{
  2019  		Name:      "my_service-${NOMAD_META_FOO}",
  2020  		PortLabel: "bar",
  2021  	}
  2022  	if err := s.Validate(); err == nil {
  2023  		t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err)
  2024  	}
  2025  
  2026  	s = Service{
  2027  		Name:      "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456",
  2028  		PortLabel: "bar",
  2029  	}
  2030  	if err := s.ValidateName(s.Name); err == nil {
  2031  		t.Fatalf("Service should be invalid (too long): %v", err)
  2032  	}
  2033  
  2034  	s = Service{
  2035  		Name: "service-name",
  2036  		Checks: []*ServiceCheck{
  2037  			{
  2038  				Name:     "check-tcp",
  2039  				Type:     ServiceCheckTCP,
  2040  				Interval: 5 * time.Second,
  2041  				Timeout:  2 * time.Second,
  2042  			},
  2043  			{
  2044  				Name:     "check-http",
  2045  				Type:     ServiceCheckHTTP,
  2046  				Path:     "/foo",
  2047  				Interval: 5 * time.Second,
  2048  				Timeout:  2 * time.Second,
  2049  			},
  2050  		},
  2051  	}
  2052  	if err := s.Validate(); err == nil {
  2053  		t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err)
  2054  	}
  2055  
  2056  	s = Service{
  2057  		Name: "service-name",
  2058  		Checks: []*ServiceCheck{
  2059  			{
  2060  				Name:     "check-script",
  2061  				Type:     ServiceCheckScript,
  2062  				Command:  "/bin/date",
  2063  				Interval: 5 * time.Second,
  2064  				Timeout:  2 * time.Second,
  2065  			},
  2066  		},
  2067  	}
  2068  	if err := s.Validate(); err != nil {
  2069  		t.Fatalf("un-expected error: %v", err)
  2070  	}
  2071  }
  2072  
  2073  func TestDistinctCheckID(t *testing.T) {
  2074  	c1 := ServiceCheck{
  2075  		Name:     "web-health",
  2076  		Type:     "http",
  2077  		Path:     "/health",
  2078  		Interval: 2 * time.Second,
  2079  		Timeout:  3 * time.Second,
  2080  	}
  2081  	c2 := ServiceCheck{
  2082  		Name:     "web-health",
  2083  		Type:     "http",
  2084  		Path:     "/health1",
  2085  		Interval: 2 * time.Second,
  2086  		Timeout:  3 * time.Second,
  2087  	}
  2088  
  2089  	c3 := ServiceCheck{
  2090  		Name:     "web-health",
  2091  		Type:     "http",
  2092  		Path:     "/health",
  2093  		Interval: 4 * time.Second,
  2094  		Timeout:  3 * time.Second,
  2095  	}
  2096  	serviceID := "123"
  2097  	c1Hash := c1.Hash(serviceID)
  2098  	c2Hash := c2.Hash(serviceID)
  2099  	c3Hash := c3.Hash(serviceID)
  2100  
  2101  	if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash {
  2102  		t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash)
  2103  	}
  2104  
  2105  }
  2106  
  2107  func TestService_Canonicalize(t *testing.T) {
  2108  	job := "example"
  2109  	taskGroup := "cache"
  2110  	task := "redis"
  2111  
  2112  	s := Service{
  2113  		Name: "${TASK}-db",
  2114  	}
  2115  
  2116  	s.Canonicalize(job, taskGroup, task)
  2117  	if s.Name != "redis-db" {
  2118  		t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name)
  2119  	}
  2120  
  2121  	s.Name = "db"
  2122  	s.Canonicalize(job, taskGroup, task)
  2123  	if s.Name != "db" {
  2124  		t.Fatalf("Expected name: %v, Actual: %v", "redis-db", s.Name)
  2125  	}
  2126  
  2127  	s.Name = "${JOB}-${TASKGROUP}-${TASK}-db"
  2128  	s.Canonicalize(job, taskGroup, task)
  2129  	if s.Name != "example-cache-redis-db" {
  2130  		t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name)
  2131  	}
  2132  
  2133  	s.Name = "${BASE}-db"
  2134  	s.Canonicalize(job, taskGroup, task)
  2135  	if s.Name != "example-cache-redis-db" {
  2136  		t.Fatalf("Expected name: %v, Actual: %v", "example-cache-redis-db", s.Name)
  2137  	}
  2138  
  2139  }
  2140  
  2141  func TestJob_ExpandServiceNames(t *testing.T) {
  2142  	j := &Job{
  2143  		Name: "my-job",
  2144  		TaskGroups: []*TaskGroup{
  2145  			{
  2146  				Name: "web",
  2147  				Tasks: []*Task{
  2148  					{
  2149  						Name: "frontend",
  2150  						Services: []*Service{
  2151  							{
  2152  								Name: "${BASE}-default",
  2153  							},
  2154  							{
  2155  								Name: "jmx",
  2156  							},
  2157  						},
  2158  					},
  2159  				},
  2160  			},
  2161  			{
  2162  				Name: "admin",
  2163  				Tasks: []*Task{
  2164  					{
  2165  						Name: "admin-web",
  2166  					},
  2167  				},
  2168  			},
  2169  		},
  2170  	}
  2171  
  2172  	j.Canonicalize()
  2173  
  2174  	service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name
  2175  	if service1Name != "my-job-web-frontend-default" {
  2176  		t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name)
  2177  	}
  2178  
  2179  	service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name
  2180  	if service2Name != "jmx" {
  2181  		t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name)
  2182  	}
  2183  
  2184  }
  2185  
  2186  func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
  2187  	// Create a config that is enabled but with no interval specified.
  2188  	p := &PeriodicConfig{Enabled: true}
  2189  	if err := p.Validate(); err == nil {
  2190  		t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid")
  2191  	}
  2192  
  2193  	// Create a config that is enabled, with a spec but no type specified.
  2194  	p = &PeriodicConfig{Enabled: true, Spec: "foo"}
  2195  	if err := p.Validate(); err == nil {
  2196  		t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid")
  2197  	}
  2198  
  2199  	// Create a config that is enabled, with a spec type but no spec specified.
  2200  	p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron}
  2201  	if err := p.Validate(); err == nil {
  2202  		t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid")
  2203  	}
  2204  
  2205  	// Create a config that is enabled, with a bad time zone.
  2206  	p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
  2207  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
  2208  		t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
  2209  	}
  2210  }
  2211  
  2212  func TestPeriodicConfig_InvalidCron(t *testing.T) {
  2213  	specs := []string{"foo", "* *", "@foo"}
  2214  	for _, spec := range specs {
  2215  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  2216  		p.Canonicalize()
  2217  		if err := p.Validate(); err == nil {
  2218  			t.Fatal("Invalid cron spec")
  2219  		}
  2220  	}
  2221  }
  2222  
  2223  func TestPeriodicConfig_ValidCron(t *testing.T) {
  2224  	specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"}
  2225  	for _, spec := range specs {
  2226  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  2227  		p.Canonicalize()
  2228  		if err := p.Validate(); err != nil {
  2229  			t.Fatal("Passed valid cron")
  2230  		}
  2231  	}
  2232  }
  2233  
  2234  func TestPeriodicConfig_NextCron(t *testing.T) {
  2235  	require := require.New(t)
  2236  
  2237  	type testExpectation struct {
  2238  		Time     time.Time
  2239  		HasError bool
  2240  		ErrorMsg string
  2241  	}
  2242  
  2243  	from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC)
  2244  	specs := []string{"0 0 29 2 * 1980",
  2245  		"*/5 * * * *",
  2246  		"1 15-0 * * 1-5"}
  2247  	expected := []*testExpectation{
  2248  		{
  2249  			Time:     time.Time{},
  2250  			HasError: false,
  2251  		},
  2252  		{
  2253  			Time:     time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC),
  2254  			HasError: false,
  2255  		},
  2256  		{
  2257  			Time:     time.Time{},
  2258  			HasError: true,
  2259  			ErrorMsg: "failed parsing cron expression",
  2260  		},
  2261  	}
  2262  
  2263  	for i, spec := range specs {
  2264  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  2265  		p.Canonicalize()
  2266  		n, err := p.Next(from)
  2267  		nextExpected := expected[i]
  2268  
  2269  		require.Equal(nextExpected.Time, n)
  2270  		require.Equal(err != nil, nextExpected.HasError)
  2271  		if err != nil {
  2272  			require.True(strings.Contains(err.Error(), nextExpected.ErrorMsg))
  2273  		}
  2274  	}
  2275  }
  2276  
  2277  func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
  2278  	zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"}
  2279  	for _, zone := range zones {
  2280  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
  2281  		p.Canonicalize()
  2282  		if err := p.Validate(); err != nil {
  2283  			t.Fatalf("Valid tz errored: %v", err)
  2284  		}
  2285  	}
  2286  }
  2287  
  2288  func TestPeriodicConfig_DST(t *testing.T) {
  2289  	require := require.New(t)
  2290  
  2291  	// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
  2292  	p := &PeriodicConfig{
  2293  		Enabled:  true,
  2294  		SpecType: PeriodicSpecCron,
  2295  		Spec:     "0 2 11-12 3 * 2017",
  2296  		TimeZone: "America/Los_Angeles",
  2297  	}
  2298  	p.Canonicalize()
  2299  
  2300  	t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location)
  2301  	t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location)
  2302  
  2303  	// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
  2304  	e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC)
  2305  	e2 := time.Date(2017, time.March, 12, 9, 0, 0, 0, time.UTC)
  2306  
  2307  	n1, err := p.Next(t1)
  2308  	require.Nil(err)
  2309  
  2310  	n2, err := p.Next(t2)
  2311  	require.Nil(err)
  2312  
  2313  	require.Equal(e1, n1.UTC())
  2314  	require.Equal(e2, n2.UTC())
  2315  }
  2316  
  2317  func TestRestartPolicy_Validate(t *testing.T) {
  2318  	// Policy with acceptable restart options passes
  2319  	p := &RestartPolicy{
  2320  		Mode:     RestartPolicyModeFail,
  2321  		Attempts: 0,
  2322  		Interval: 5 * time.Second,
  2323  	}
  2324  	if err := p.Validate(); err != nil {
  2325  		t.Fatalf("err: %v", err)
  2326  	}
  2327  
  2328  	// Policy with ambiguous restart options fails
  2329  	p = &RestartPolicy{
  2330  		Mode:     RestartPolicyModeDelay,
  2331  		Attempts: 0,
  2332  		Interval: 5 * time.Second,
  2333  	}
  2334  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") {
  2335  		t.Fatalf("expect ambiguity error, got: %v", err)
  2336  	}
  2337  
  2338  	// Bad policy mode fails
  2339  	p = &RestartPolicy{
  2340  		Mode:     "nope",
  2341  		Attempts: 1,
  2342  		Interval: 5 * time.Second,
  2343  	}
  2344  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") {
  2345  		t.Fatalf("expect mode error, got: %v", err)
  2346  	}
  2347  
  2348  	// Fails when attempts*delay does not fit inside interval
  2349  	p = &RestartPolicy{
  2350  		Mode:     RestartPolicyModeDelay,
  2351  		Attempts: 3,
  2352  		Delay:    5 * time.Second,
  2353  		Interval: 5 * time.Second,
  2354  	}
  2355  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") {
  2356  		t.Fatalf("expect restart interval error, got: %v", err)
  2357  	}
  2358  
  2359  	// Fails when interval is to small
  2360  	p = &RestartPolicy{
  2361  		Mode:     RestartPolicyModeDelay,
  2362  		Attempts: 3,
  2363  		Delay:    5 * time.Second,
  2364  		Interval: 2 * time.Second,
  2365  	}
  2366  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") {
  2367  		t.Fatalf("expect interval too small error, got: %v", err)
  2368  	}
  2369  }
  2370  
  2371  func TestReschedulePolicy_Validate(t *testing.T) {
  2372  	type testCase struct {
  2373  		desc             string
  2374  		ReschedulePolicy *ReschedulePolicy
  2375  		errors           []error
  2376  	}
  2377  
  2378  	testCases := []testCase{
  2379  		{
  2380  			desc: "Nil",
  2381  		},
  2382  		{
  2383  			desc: "Disabled",
  2384  			ReschedulePolicy: &ReschedulePolicy{
  2385  				Attempts: 0,
  2386  				Interval: 0 * time.Second},
  2387  		},
  2388  		{
  2389  			desc: "Disabled",
  2390  			ReschedulePolicy: &ReschedulePolicy{
  2391  				Attempts: -1,
  2392  				Interval: 5 * time.Minute},
  2393  		},
  2394  		{
  2395  			desc: "Valid Linear Delay",
  2396  			ReschedulePolicy: &ReschedulePolicy{
  2397  				Attempts:      1,
  2398  				Interval:      5 * time.Minute,
  2399  				Delay:         10 * time.Second,
  2400  				DelayFunction: "constant"},
  2401  		},
  2402  		{
  2403  			desc: "Valid Exponential Delay",
  2404  			ReschedulePolicy: &ReschedulePolicy{
  2405  				Attempts:      5,
  2406  				Interval:      1 * time.Hour,
  2407  				Delay:         30 * time.Second,
  2408  				MaxDelay:      5 * time.Minute,
  2409  				DelayFunction: "exponential"},
  2410  		},
  2411  		{
  2412  			desc: "Valid Fibonacci Delay",
  2413  			ReschedulePolicy: &ReschedulePolicy{
  2414  				Attempts:      5,
  2415  				Interval:      15 * time.Minute,
  2416  				Delay:         10 * time.Second,
  2417  				MaxDelay:      5 * time.Minute,
  2418  				DelayFunction: "fibonacci"},
  2419  		},
  2420  		{
  2421  			desc: "Invalid delay function",
  2422  			ReschedulePolicy: &ReschedulePolicy{
  2423  				Attempts:      1,
  2424  				Interval:      1 * time.Second,
  2425  				DelayFunction: "blah"},
  2426  			errors: []error{
  2427  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  2428  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2429  				fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions),
  2430  			},
  2431  		},
  2432  		{
  2433  			desc: "Invalid delay ceiling",
  2434  			ReschedulePolicy: &ReschedulePolicy{
  2435  				Attempts:      1,
  2436  				Interval:      8 * time.Second,
  2437  				DelayFunction: "exponential",
  2438  				Delay:         15 * time.Second,
  2439  				MaxDelay:      5 * time.Second},
  2440  			errors: []error{
  2441  				fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)",
  2442  					15*time.Second, 5*time.Second),
  2443  			},
  2444  		},
  2445  		{
  2446  			desc: "Invalid delay and interval",
  2447  			ReschedulePolicy: &ReschedulePolicy{
  2448  				Attempts:      1,
  2449  				Interval:      1 * time.Second,
  2450  				DelayFunction: "constant"},
  2451  			errors: []error{
  2452  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  2453  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2454  			},
  2455  		}, {
  2456  			// Should suggest 2h40m as the interval
  2457  			desc: "Invalid Attempts - linear delay",
  2458  			ReschedulePolicy: &ReschedulePolicy{
  2459  				Attempts:      10,
  2460  				Interval:      1 * time.Hour,
  2461  				Delay:         20 * time.Minute,
  2462  				DelayFunction: "constant",
  2463  			},
  2464  			errors: []error{
  2465  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+
  2466  					" delay function %q", 3, time.Hour, 20*time.Minute, "constant"),
  2467  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  2468  					200*time.Minute, 10),
  2469  			},
  2470  		},
  2471  		{
  2472  			// Should suggest 4h40m as the interval
  2473  			// Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40}
  2474  			desc: "Invalid Attempts - exponential delay",
  2475  			ReschedulePolicy: &ReschedulePolicy{
  2476  				Attempts:      10,
  2477  				Interval:      30 * time.Minute,
  2478  				Delay:         5 * time.Minute,
  2479  				MaxDelay:      40 * time.Minute,
  2480  				DelayFunction: "exponential",
  2481  			},
  2482  			errors: []error{
  2483  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  2484  					"delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute,
  2485  					"exponential", 40*time.Minute),
  2486  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  2487  					280*time.Minute, 10),
  2488  			},
  2489  		},
  2490  		{
  2491  			// Should suggest 8h as the interval
  2492  			// Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80}
  2493  			desc: "Invalid Attempts - fibonacci delay",
  2494  			ReschedulePolicy: &ReschedulePolicy{
  2495  				Attempts:      10,
  2496  				Interval:      1 * time.Hour,
  2497  				Delay:         20 * time.Minute,
  2498  				MaxDelay:      80 * time.Minute,
  2499  				DelayFunction: "fibonacci",
  2500  			},
  2501  			errors: []error{
  2502  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  2503  					"delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute,
  2504  					"fibonacci", 80*time.Minute),
  2505  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  2506  					480*time.Minute, 10),
  2507  			},
  2508  		},
  2509  		{
  2510  			desc: "Ambiguous Unlimited config, has both attempts and unlimited set",
  2511  			ReschedulePolicy: &ReschedulePolicy{
  2512  				Attempts:      1,
  2513  				Unlimited:     true,
  2514  				DelayFunction: "exponential",
  2515  				Delay:         5 * time.Minute,
  2516  				MaxDelay:      1 * time.Hour,
  2517  			},
  2518  			errors: []error{
  2519  				fmt.Errorf("Interval must be a non zero value if Attempts > 0"),
  2520  				fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true),
  2521  			},
  2522  		},
  2523  		{
  2524  			desc: "Invalid Unlimited config",
  2525  			ReschedulePolicy: &ReschedulePolicy{
  2526  				Attempts:      1,
  2527  				Interval:      1 * time.Second,
  2528  				Unlimited:     true,
  2529  				DelayFunction: "exponential",
  2530  			},
  2531  			errors: []error{
  2532  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2533  				fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  2534  			},
  2535  		},
  2536  		{
  2537  			desc: "Valid Unlimited config",
  2538  			ReschedulePolicy: &ReschedulePolicy{
  2539  				Unlimited:     true,
  2540  				DelayFunction: "exponential",
  2541  				Delay:         5 * time.Second,
  2542  				MaxDelay:      1 * time.Hour,
  2543  			},
  2544  		},
  2545  	}
  2546  
  2547  	for _, tc := range testCases {
  2548  		t.Run(tc.desc, func(t *testing.T) {
  2549  			require := require.New(t)
  2550  			gotErr := tc.ReschedulePolicy.Validate()
  2551  			if tc.errors != nil {
  2552  				// Validate all errors
  2553  				for _, err := range tc.errors {
  2554  					require.Contains(gotErr.Error(), err.Error())
  2555  				}
  2556  			} else {
  2557  				require.Nil(gotErr)
  2558  			}
  2559  		})
  2560  	}
  2561  }
  2562  
  2563  func TestAllocation_Index(t *testing.T) {
  2564  	a1 := Allocation{
  2565  		Name:      "example.cache[1]",
  2566  		TaskGroup: "cache",
  2567  		JobID:     "example",
  2568  		Job: &Job{
  2569  			ID:         "example",
  2570  			TaskGroups: []*TaskGroup{{Name: "cache"}}},
  2571  	}
  2572  	e1 := uint(1)
  2573  	a2 := a1.Copy()
  2574  	a2.Name = "example.cache[713127]"
  2575  	e2 := uint(713127)
  2576  
  2577  	if a1.Index() != e1 || a2.Index() != e2 {
  2578  		t.Fatalf("Got %d and %d", a1.Index(), a2.Index())
  2579  	}
  2580  }
  2581  
  2582  func TestTaskArtifact_Validate_Source(t *testing.T) {
  2583  	valid := &TaskArtifact{GetterSource: "google.com"}
  2584  	if err := valid.Validate(); err != nil {
  2585  		t.Fatalf("unexpected error: %v", err)
  2586  	}
  2587  }
  2588  
  2589  func TestTaskArtifact_Validate_Dest(t *testing.T) {
  2590  	valid := &TaskArtifact{GetterSource: "google.com"}
  2591  	if err := valid.Validate(); err != nil {
  2592  		t.Fatalf("unexpected error: %v", err)
  2593  	}
  2594  
  2595  	valid.RelativeDest = "local/"
  2596  	if err := valid.Validate(); err != nil {
  2597  		t.Fatalf("unexpected error: %v", err)
  2598  	}
  2599  
  2600  	valid.RelativeDest = "local/.."
  2601  	if err := valid.Validate(); err != nil {
  2602  		t.Fatalf("unexpected error: %v", err)
  2603  	}
  2604  
  2605  	valid.RelativeDest = "local/../../.."
  2606  	if err := valid.Validate(); err == nil {
  2607  		t.Fatalf("expected error: %v", err)
  2608  	}
  2609  }
  2610  
  2611  // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the
  2612  // fields change.
  2613  func TestTaskArtifact_Hash(t *testing.T) {
  2614  	t.Parallel()
  2615  
  2616  	cases := []TaskArtifact{
  2617  		{},
  2618  		{
  2619  			GetterSource: "a",
  2620  		},
  2621  		{
  2622  			GetterSource: "b",
  2623  		},
  2624  		{
  2625  			GetterSource:  "b",
  2626  			GetterOptions: map[string]string{"c": "c"},
  2627  		},
  2628  		{
  2629  			GetterSource: "b",
  2630  			GetterOptions: map[string]string{
  2631  				"c": "c",
  2632  				"d": "d",
  2633  			},
  2634  		},
  2635  		{
  2636  			GetterSource: "b",
  2637  			GetterOptions: map[string]string{
  2638  				"c": "c",
  2639  				"d": "e",
  2640  			},
  2641  		},
  2642  		{
  2643  			GetterSource: "b",
  2644  			GetterOptions: map[string]string{
  2645  				"c": "c",
  2646  				"d": "e",
  2647  			},
  2648  			GetterMode: "f",
  2649  		},
  2650  		{
  2651  			GetterSource: "b",
  2652  			GetterOptions: map[string]string{
  2653  				"c": "c",
  2654  				"d": "e",
  2655  			},
  2656  			GetterMode: "g",
  2657  		},
  2658  		{
  2659  			GetterSource: "b",
  2660  			GetterOptions: map[string]string{
  2661  				"c": "c",
  2662  				"d": "e",
  2663  			},
  2664  			GetterMode:   "g",
  2665  			RelativeDest: "h",
  2666  		},
  2667  		{
  2668  			GetterSource: "b",
  2669  			GetterOptions: map[string]string{
  2670  				"c": "c",
  2671  				"d": "e",
  2672  			},
  2673  			GetterMode:   "g",
  2674  			RelativeDest: "i",
  2675  		},
  2676  	}
  2677  
  2678  	// Map of hash to source
  2679  	hashes := make(map[string]TaskArtifact, len(cases))
  2680  	for _, tc := range cases {
  2681  		h := tc.Hash()
  2682  
  2683  		// Hash should be deterministic
  2684  		require.Equal(t, h, tc.Hash())
  2685  
  2686  		// Hash should be unique
  2687  		if orig, ok := hashes[h]; ok {
  2688  			require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n",
  2689  				pretty.Sprint(tc), pretty.Sprint(orig),
  2690  			)
  2691  		}
  2692  		hashes[h] = tc
  2693  	}
  2694  
  2695  	require.Len(t, hashes, len(cases))
  2696  }
  2697  
  2698  func TestAllocation_ShouldMigrate(t *testing.T) {
  2699  	alloc := Allocation{
  2700  		PreviousAllocation: "123",
  2701  		TaskGroup:          "foo",
  2702  		Job: &Job{
  2703  			TaskGroups: []*TaskGroup{
  2704  				{
  2705  					Name: "foo",
  2706  					EphemeralDisk: &EphemeralDisk{
  2707  						Migrate: true,
  2708  						Sticky:  true,
  2709  					},
  2710  				},
  2711  			},
  2712  		},
  2713  	}
  2714  
  2715  	if !alloc.ShouldMigrate() {
  2716  		t.Fatalf("bad: %v", alloc)
  2717  	}
  2718  
  2719  	alloc1 := Allocation{
  2720  		PreviousAllocation: "123",
  2721  		TaskGroup:          "foo",
  2722  		Job: &Job{
  2723  			TaskGroups: []*TaskGroup{
  2724  				{
  2725  					Name:          "foo",
  2726  					EphemeralDisk: &EphemeralDisk{},
  2727  				},
  2728  			},
  2729  		},
  2730  	}
  2731  
  2732  	if alloc1.ShouldMigrate() {
  2733  		t.Fatalf("bad: %v", alloc)
  2734  	}
  2735  
  2736  	alloc2 := Allocation{
  2737  		PreviousAllocation: "123",
  2738  		TaskGroup:          "foo",
  2739  		Job: &Job{
  2740  			TaskGroups: []*TaskGroup{
  2741  				{
  2742  					Name: "foo",
  2743  					EphemeralDisk: &EphemeralDisk{
  2744  						Sticky:  false,
  2745  						Migrate: true,
  2746  					},
  2747  				},
  2748  			},
  2749  		},
  2750  	}
  2751  
  2752  	if alloc2.ShouldMigrate() {
  2753  		t.Fatalf("bad: %v", alloc)
  2754  	}
  2755  
  2756  	alloc3 := Allocation{
  2757  		PreviousAllocation: "123",
  2758  		TaskGroup:          "foo",
  2759  		Job: &Job{
  2760  			TaskGroups: []*TaskGroup{
  2761  				{
  2762  					Name: "foo",
  2763  				},
  2764  			},
  2765  		},
  2766  	}
  2767  
  2768  	if alloc3.ShouldMigrate() {
  2769  		t.Fatalf("bad: %v", alloc)
  2770  	}
  2771  
  2772  	// No previous
  2773  	alloc4 := Allocation{
  2774  		TaskGroup: "foo",
  2775  		Job: &Job{
  2776  			TaskGroups: []*TaskGroup{
  2777  				{
  2778  					Name: "foo",
  2779  					EphemeralDisk: &EphemeralDisk{
  2780  						Migrate: true,
  2781  						Sticky:  true,
  2782  					},
  2783  				},
  2784  			},
  2785  		},
  2786  	}
  2787  
  2788  	if alloc4.ShouldMigrate() {
  2789  		t.Fatalf("bad: %v", alloc4)
  2790  	}
  2791  }
  2792  
  2793  func TestTaskArtifact_Validate_Checksum(t *testing.T) {
  2794  	cases := []struct {
  2795  		Input *TaskArtifact
  2796  		Err   bool
  2797  	}{
  2798  		{
  2799  			&TaskArtifact{
  2800  				GetterSource: "foo.com",
  2801  				GetterOptions: map[string]string{
  2802  					"checksum": "no-type",
  2803  				},
  2804  			},
  2805  			true,
  2806  		},
  2807  		{
  2808  			&TaskArtifact{
  2809  				GetterSource: "foo.com",
  2810  				GetterOptions: map[string]string{
  2811  					"checksum": "md5:toosmall",
  2812  				},
  2813  			},
  2814  			true,
  2815  		},
  2816  		{
  2817  			&TaskArtifact{
  2818  				GetterSource: "foo.com",
  2819  				GetterOptions: map[string]string{
  2820  					"checksum": "invalid:type",
  2821  				},
  2822  			},
  2823  			true,
  2824  		},
  2825  		{
  2826  			&TaskArtifact{
  2827  				GetterSource: "foo.com",
  2828  				GetterOptions: map[string]string{
  2829  					"checksum": "md5:${ARTIFACT_CHECKSUM}",
  2830  				},
  2831  			},
  2832  			false,
  2833  		},
  2834  	}
  2835  
  2836  	for i, tc := range cases {
  2837  		err := tc.Input.Validate()
  2838  		if (err != nil) != tc.Err {
  2839  			t.Fatalf("case %d: %v", i, err)
  2840  			continue
  2841  		}
  2842  	}
  2843  }
  2844  
  2845  func TestAllocation_Terminated(t *testing.T) {
  2846  	type desiredState struct {
  2847  		ClientStatus  string
  2848  		DesiredStatus string
  2849  		Terminated    bool
  2850  	}
  2851  
  2852  	harness := []desiredState{
  2853  		{
  2854  			ClientStatus:  AllocClientStatusPending,
  2855  			DesiredStatus: AllocDesiredStatusStop,
  2856  			Terminated:    false,
  2857  		},
  2858  		{
  2859  			ClientStatus:  AllocClientStatusRunning,
  2860  			DesiredStatus: AllocDesiredStatusStop,
  2861  			Terminated:    false,
  2862  		},
  2863  		{
  2864  			ClientStatus:  AllocClientStatusFailed,
  2865  			DesiredStatus: AllocDesiredStatusStop,
  2866  			Terminated:    true,
  2867  		},
  2868  		{
  2869  			ClientStatus:  AllocClientStatusFailed,
  2870  			DesiredStatus: AllocDesiredStatusRun,
  2871  			Terminated:    true,
  2872  		},
  2873  	}
  2874  
  2875  	for _, state := range harness {
  2876  		alloc := Allocation{}
  2877  		alloc.DesiredStatus = state.DesiredStatus
  2878  		alloc.ClientStatus = state.ClientStatus
  2879  		if alloc.Terminated() != state.Terminated {
  2880  			t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated())
  2881  		}
  2882  	}
  2883  }
  2884  
  2885  func TestAllocation_ShouldReschedule(t *testing.T) {
  2886  	type testCase struct {
  2887  		Desc               string
  2888  		FailTime           time.Time
  2889  		ClientStatus       string
  2890  		DesiredStatus      string
  2891  		ReschedulePolicy   *ReschedulePolicy
  2892  		RescheduleTrackers []*RescheduleEvent
  2893  		ShouldReschedule   bool
  2894  	}
  2895  
  2896  	fail := time.Now()
  2897  
  2898  	harness := []testCase{
  2899  		{
  2900  			Desc:             "Reschedule when desired state is stop",
  2901  			ClientStatus:     AllocClientStatusPending,
  2902  			DesiredStatus:    AllocDesiredStatusStop,
  2903  			FailTime:         fail,
  2904  			ReschedulePolicy: nil,
  2905  			ShouldReschedule: false,
  2906  		},
  2907  		{
  2908  			Desc:             "Disabled rescheduling",
  2909  			ClientStatus:     AllocClientStatusFailed,
  2910  			DesiredStatus:    AllocDesiredStatusRun,
  2911  			FailTime:         fail,
  2912  			ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute},
  2913  			ShouldReschedule: false,
  2914  		},
  2915  		{
  2916  			Desc:             "Reschedule when client status is complete",
  2917  			ClientStatus:     AllocClientStatusComplete,
  2918  			DesiredStatus:    AllocDesiredStatusRun,
  2919  			FailTime:         fail,
  2920  			ReschedulePolicy: nil,
  2921  			ShouldReschedule: false,
  2922  		},
  2923  		{
  2924  			Desc:             "Reschedule with nil reschedule policy",
  2925  			ClientStatus:     AllocClientStatusFailed,
  2926  			DesiredStatus:    AllocDesiredStatusRun,
  2927  			FailTime:         fail,
  2928  			ReschedulePolicy: nil,
  2929  			ShouldReschedule: false,
  2930  		},
  2931  		{
  2932  			Desc:             "Reschedule with unlimited and attempts >0",
  2933  			ClientStatus:     AllocClientStatusFailed,
  2934  			DesiredStatus:    AllocDesiredStatusRun,
  2935  			FailTime:         fail,
  2936  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true},
  2937  			ShouldReschedule: true,
  2938  		},
  2939  		{
  2940  			Desc:             "Reschedule when client status is complete",
  2941  			ClientStatus:     AllocClientStatusComplete,
  2942  			DesiredStatus:    AllocDesiredStatusRun,
  2943  			FailTime:         fail,
  2944  			ReschedulePolicy: nil,
  2945  			ShouldReschedule: false,
  2946  		},
  2947  		{
  2948  			Desc:             "Reschedule with policy when client status complete",
  2949  			ClientStatus:     AllocClientStatusComplete,
  2950  			DesiredStatus:    AllocDesiredStatusRun,
  2951  			FailTime:         fail,
  2952  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  2953  			ShouldReschedule: false,
  2954  		},
  2955  		{
  2956  			Desc:             "Reschedule with no previous attempts",
  2957  			ClientStatus:     AllocClientStatusFailed,
  2958  			DesiredStatus:    AllocDesiredStatusRun,
  2959  			FailTime:         fail,
  2960  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  2961  			ShouldReschedule: true,
  2962  		},
  2963  		{
  2964  			Desc:             "Reschedule with leftover attempts",
  2965  			ClientStatus:     AllocClientStatusFailed,
  2966  			DesiredStatus:    AllocDesiredStatusRun,
  2967  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  2968  			FailTime:         fail,
  2969  			RescheduleTrackers: []*RescheduleEvent{
  2970  				{
  2971  					RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(),
  2972  				},
  2973  			},
  2974  			ShouldReschedule: true,
  2975  		},
  2976  		{
  2977  			Desc:             "Reschedule with too old previous attempts",
  2978  			ClientStatus:     AllocClientStatusFailed,
  2979  			DesiredStatus:    AllocDesiredStatusRun,
  2980  			FailTime:         fail,
  2981  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute},
  2982  			RescheduleTrackers: []*RescheduleEvent{
  2983  				{
  2984  					RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(),
  2985  				},
  2986  			},
  2987  			ShouldReschedule: true,
  2988  		},
  2989  		{
  2990  			Desc:             "Reschedule with no leftover attempts",
  2991  			ClientStatus:     AllocClientStatusFailed,
  2992  			DesiredStatus:    AllocDesiredStatusRun,
  2993  			FailTime:         fail,
  2994  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  2995  			RescheduleTrackers: []*RescheduleEvent{
  2996  				{
  2997  					RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
  2998  				},
  2999  				{
  3000  					RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(),
  3001  				},
  3002  			},
  3003  			ShouldReschedule: false,
  3004  		},
  3005  	}
  3006  
  3007  	for _, state := range harness {
  3008  		alloc := Allocation{}
  3009  		alloc.DesiredStatus = state.DesiredStatus
  3010  		alloc.ClientStatus = state.ClientStatus
  3011  		alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers}
  3012  
  3013  		t.Run(state.Desc, func(t *testing.T) {
  3014  			if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule {
  3015  				t.Fatalf("expected %v but got %v", state.ShouldReschedule, got)
  3016  			}
  3017  		})
  3018  
  3019  	}
  3020  }
  3021  
  3022  func TestAllocation_LastEventTime(t *testing.T) {
  3023  	type testCase struct {
  3024  		desc                  string
  3025  		taskState             map[string]*TaskState
  3026  		expectedLastEventTime time.Time
  3027  	}
  3028  
  3029  	t1 := time.Now().UTC()
  3030  
  3031  	testCases := []testCase{
  3032  		{
  3033  			desc:                  "nil task state",
  3034  			expectedLastEventTime: t1,
  3035  		},
  3036  		{
  3037  			desc:                  "empty task state",
  3038  			taskState:             make(map[string]*TaskState),
  3039  			expectedLastEventTime: t1,
  3040  		},
  3041  		{
  3042  			desc: "Finished At not set",
  3043  			taskState: map[string]*TaskState{"foo": {State: "start",
  3044  				StartedAt: t1.Add(-2 * time.Hour)}},
  3045  			expectedLastEventTime: t1,
  3046  		},
  3047  		{
  3048  			desc: "One finished ",
  3049  			taskState: map[string]*TaskState{"foo": {State: "start",
  3050  				StartedAt:  t1.Add(-2 * time.Hour),
  3051  				FinishedAt: t1.Add(-1 * time.Hour)}},
  3052  			expectedLastEventTime: t1.Add(-1 * time.Hour),
  3053  		},
  3054  		{
  3055  			desc: "Multiple task groups",
  3056  			taskState: map[string]*TaskState{"foo": {State: "start",
  3057  				StartedAt:  t1.Add(-2 * time.Hour),
  3058  				FinishedAt: t1.Add(-1 * time.Hour)},
  3059  				"bar": {State: "start",
  3060  					StartedAt:  t1.Add(-2 * time.Hour),
  3061  					FinishedAt: t1.Add(-40 * time.Minute)}},
  3062  			expectedLastEventTime: t1.Add(-40 * time.Minute),
  3063  		},
  3064  		{
  3065  			desc: "No finishedAt set, one task event, should use modify time",
  3066  			taskState: map[string]*TaskState{"foo": {
  3067  				State:     "run",
  3068  				StartedAt: t1.Add(-2 * time.Hour),
  3069  				Events: []*TaskEvent{
  3070  					{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
  3071  				}},
  3072  			},
  3073  			expectedLastEventTime: t1,
  3074  		},
  3075  	}
  3076  	for _, tc := range testCases {
  3077  		t.Run(tc.desc, func(t *testing.T) {
  3078  			alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()}
  3079  			alloc.TaskStates = tc.taskState
  3080  			require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime())
  3081  		})
  3082  	}
  3083  }
  3084  
  3085  func TestAllocation_NextDelay(t *testing.T) {
  3086  	type testCase struct {
  3087  		desc                       string
  3088  		reschedulePolicy           *ReschedulePolicy
  3089  		alloc                      *Allocation
  3090  		expectedRescheduleTime     time.Time
  3091  		expectedRescheduleEligible bool
  3092  	}
  3093  	now := time.Now()
  3094  	testCases := []testCase{
  3095  		{
  3096  			desc: "Allocation hasn't failed yet",
  3097  			reschedulePolicy: &ReschedulePolicy{
  3098  				DelayFunction: "constant",
  3099  				Delay:         5 * time.Second,
  3100  			},
  3101  			alloc:                      &Allocation{},
  3102  			expectedRescheduleTime:     time.Time{},
  3103  			expectedRescheduleEligible: false,
  3104  		},
  3105  		{
  3106  			desc:                       "Allocation has no reschedule policy",
  3107  			alloc:                      &Allocation{},
  3108  			expectedRescheduleTime:     time.Time{},
  3109  			expectedRescheduleEligible: false,
  3110  		},
  3111  		{
  3112  			desc: "Allocation lacks task state",
  3113  			reschedulePolicy: &ReschedulePolicy{
  3114  				DelayFunction: "constant",
  3115  				Delay:         5 * time.Second,
  3116  				Unlimited:     true,
  3117  			},
  3118  			alloc:                      &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
  3119  			expectedRescheduleTime:     now.UTC().Add(5 * time.Second),
  3120  			expectedRescheduleEligible: true,
  3121  		},
  3122  		{
  3123  			desc: "linear delay, unlimited restarts, no reschedule tracker",
  3124  			reschedulePolicy: &ReschedulePolicy{
  3125  				DelayFunction: "constant",
  3126  				Delay:         5 * time.Second,
  3127  				Unlimited:     true,
  3128  			},
  3129  			alloc: &Allocation{
  3130  				ClientStatus: AllocClientStatusFailed,
  3131  				TaskStates: map[string]*TaskState{"foo": {State: "dead",
  3132  					StartedAt:  now.Add(-1 * time.Hour),
  3133  					FinishedAt: now.Add(-2 * time.Second)}},
  3134  			},
  3135  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3136  			expectedRescheduleEligible: true,
  3137  		},
  3138  		{
  3139  			desc: "linear delay with reschedule tracker",
  3140  			reschedulePolicy: &ReschedulePolicy{
  3141  				DelayFunction: "constant",
  3142  				Delay:         5 * time.Second,
  3143  				Interval:      10 * time.Minute,
  3144  				Attempts:      2,
  3145  			},
  3146  			alloc: &Allocation{
  3147  				ClientStatus: AllocClientStatusFailed,
  3148  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3149  					StartedAt:  now.Add(-1 * time.Hour),
  3150  					FinishedAt: now.Add(-2 * time.Second)}},
  3151  				RescheduleTracker: &RescheduleTracker{
  3152  					Events: []*RescheduleEvent{{
  3153  						RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  3154  						Delay:          5 * time.Second,
  3155  					}},
  3156  				}},
  3157  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3158  			expectedRescheduleEligible: true,
  3159  		},
  3160  		{
  3161  			desc: "linear delay with reschedule tracker, attempts exhausted",
  3162  			reschedulePolicy: &ReschedulePolicy{
  3163  				DelayFunction: "constant",
  3164  				Delay:         5 * time.Second,
  3165  				Interval:      10 * time.Minute,
  3166  				Attempts:      2,
  3167  			},
  3168  			alloc: &Allocation{
  3169  				ClientStatus: AllocClientStatusFailed,
  3170  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3171  					StartedAt:  now.Add(-1 * time.Hour),
  3172  					FinishedAt: now.Add(-2 * time.Second)}},
  3173  				RescheduleTracker: &RescheduleTracker{
  3174  					Events: []*RescheduleEvent{
  3175  						{
  3176  							RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
  3177  							Delay:          5 * time.Second,
  3178  						},
  3179  						{
  3180  							RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  3181  							Delay:          5 * time.Second,
  3182  						},
  3183  					},
  3184  				}},
  3185  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3186  			expectedRescheduleEligible: false,
  3187  		},
  3188  		{
  3189  			desc: "exponential delay - no reschedule tracker",
  3190  			reschedulePolicy: &ReschedulePolicy{
  3191  				DelayFunction: "exponential",
  3192  				Delay:         5 * time.Second,
  3193  				MaxDelay:      90 * time.Second,
  3194  				Unlimited:     true,
  3195  			},
  3196  			alloc: &Allocation{
  3197  				ClientStatus: AllocClientStatusFailed,
  3198  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3199  					StartedAt:  now.Add(-1 * time.Hour),
  3200  					FinishedAt: now.Add(-2 * time.Second)}},
  3201  			},
  3202  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3203  			expectedRescheduleEligible: true,
  3204  		},
  3205  		{
  3206  			desc: "exponential delay with reschedule tracker",
  3207  			reschedulePolicy: &ReschedulePolicy{
  3208  				DelayFunction: "exponential",
  3209  				Delay:         5 * time.Second,
  3210  				MaxDelay:      90 * time.Second,
  3211  				Unlimited:     true,
  3212  			},
  3213  			alloc: &Allocation{
  3214  				ClientStatus: AllocClientStatusFailed,
  3215  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3216  					StartedAt:  now.Add(-1 * time.Hour),
  3217  					FinishedAt: now.Add(-2 * time.Second)}},
  3218  				RescheduleTracker: &RescheduleTracker{
  3219  					Events: []*RescheduleEvent{
  3220  						{
  3221  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3222  							Delay:          5 * time.Second,
  3223  						},
  3224  						{
  3225  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3226  							Delay:          10 * time.Second,
  3227  						},
  3228  						{
  3229  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3230  							Delay:          20 * time.Second,
  3231  						},
  3232  					},
  3233  				}},
  3234  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  3235  			expectedRescheduleEligible: true,
  3236  		},
  3237  		{
  3238  			desc: "exponential delay with delay ceiling reached",
  3239  			reschedulePolicy: &ReschedulePolicy{
  3240  				DelayFunction: "exponential",
  3241  				Delay:         5 * time.Second,
  3242  				MaxDelay:      90 * time.Second,
  3243  				Unlimited:     true,
  3244  			},
  3245  			alloc: &Allocation{
  3246  				ClientStatus: AllocClientStatusFailed,
  3247  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3248  					StartedAt:  now.Add(-1 * time.Hour),
  3249  					FinishedAt: now.Add(-15 * time.Second)}},
  3250  				RescheduleTracker: &RescheduleTracker{
  3251  					Events: []*RescheduleEvent{
  3252  						{
  3253  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3254  							Delay:          5 * time.Second,
  3255  						},
  3256  						{
  3257  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3258  							Delay:          10 * time.Second,
  3259  						},
  3260  						{
  3261  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3262  							Delay:          20 * time.Second,
  3263  						},
  3264  						{
  3265  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3266  							Delay:          40 * time.Second,
  3267  						},
  3268  						{
  3269  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  3270  							Delay:          80 * time.Second,
  3271  						},
  3272  					},
  3273  				}},
  3274  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(90 * time.Second),
  3275  			expectedRescheduleEligible: true,
  3276  		},
  3277  		{
  3278  			// Test case where most recent reschedule ran longer than delay ceiling
  3279  			desc: "exponential delay, delay ceiling reset condition met",
  3280  			reschedulePolicy: &ReschedulePolicy{
  3281  				DelayFunction: "exponential",
  3282  				Delay:         5 * time.Second,
  3283  				MaxDelay:      90 * time.Second,
  3284  				Unlimited:     true,
  3285  			},
  3286  			alloc: &Allocation{
  3287  				ClientStatus: AllocClientStatusFailed,
  3288  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3289  					StartedAt:  now.Add(-1 * time.Hour),
  3290  					FinishedAt: now.Add(-15 * time.Minute)}},
  3291  				RescheduleTracker: &RescheduleTracker{
  3292  					Events: []*RescheduleEvent{
  3293  						{
  3294  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3295  							Delay:          5 * time.Second,
  3296  						},
  3297  						{
  3298  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3299  							Delay:          10 * time.Second,
  3300  						},
  3301  						{
  3302  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3303  							Delay:          20 * time.Second,
  3304  						},
  3305  						{
  3306  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3307  							Delay:          40 * time.Second,
  3308  						},
  3309  						{
  3310  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3311  							Delay:          80 * time.Second,
  3312  						},
  3313  						{
  3314  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3315  							Delay:          90 * time.Second,
  3316  						},
  3317  						{
  3318  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3319  							Delay:          90 * time.Second,
  3320  						},
  3321  					},
  3322  				}},
  3323  			expectedRescheduleTime:     now.Add(-15 * time.Minute).Add(5 * time.Second),
  3324  			expectedRescheduleEligible: true,
  3325  		},
  3326  		{
  3327  			desc: "fibonacci delay - no reschedule tracker",
  3328  			reschedulePolicy: &ReschedulePolicy{
  3329  				DelayFunction: "fibonacci",
  3330  				Delay:         5 * time.Second,
  3331  				MaxDelay:      90 * time.Second,
  3332  				Unlimited:     true,
  3333  			},
  3334  			alloc: &Allocation{
  3335  				ClientStatus: AllocClientStatusFailed,
  3336  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3337  					StartedAt:  now.Add(-1 * time.Hour),
  3338  					FinishedAt: now.Add(-2 * time.Second)}}},
  3339  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  3340  			expectedRescheduleEligible: true,
  3341  		},
  3342  		{
  3343  			desc: "fibonacci delay with reschedule tracker",
  3344  			reschedulePolicy: &ReschedulePolicy{
  3345  				DelayFunction: "fibonacci",
  3346  				Delay:         5 * time.Second,
  3347  				MaxDelay:      90 * time.Second,
  3348  				Unlimited:     true,
  3349  			},
  3350  			alloc: &Allocation{
  3351  				ClientStatus: AllocClientStatusFailed,
  3352  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3353  					StartedAt:  now.Add(-1 * time.Hour),
  3354  					FinishedAt: now.Add(-2 * time.Second)}},
  3355  				RescheduleTracker: &RescheduleTracker{
  3356  					Events: []*RescheduleEvent{
  3357  						{
  3358  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3359  							Delay:          5 * time.Second,
  3360  						},
  3361  						{
  3362  							RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(),
  3363  							Delay:          5 * time.Second,
  3364  						},
  3365  					},
  3366  				}},
  3367  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(10 * time.Second),
  3368  			expectedRescheduleEligible: true,
  3369  		},
  3370  		{
  3371  			desc: "fibonacci delay with more events",
  3372  			reschedulePolicy: &ReschedulePolicy{
  3373  				DelayFunction: "fibonacci",
  3374  				Delay:         5 * time.Second,
  3375  				MaxDelay:      90 * time.Second,
  3376  				Unlimited:     true,
  3377  			},
  3378  			alloc: &Allocation{
  3379  				ClientStatus: AllocClientStatusFailed,
  3380  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3381  					StartedAt:  now.Add(-1 * time.Hour),
  3382  					FinishedAt: now.Add(-2 * time.Second)}},
  3383  				RescheduleTracker: &RescheduleTracker{
  3384  					Events: []*RescheduleEvent{
  3385  						{
  3386  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3387  							Delay:          5 * time.Second,
  3388  						},
  3389  						{
  3390  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3391  							Delay:          5 * time.Second,
  3392  						},
  3393  						{
  3394  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3395  							Delay:          10 * time.Second,
  3396  						},
  3397  						{
  3398  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3399  							Delay:          15 * time.Second,
  3400  						},
  3401  						{
  3402  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3403  							Delay:          25 * time.Second,
  3404  						},
  3405  					},
  3406  				}},
  3407  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  3408  			expectedRescheduleEligible: true,
  3409  		},
  3410  		{
  3411  			desc: "fibonacci delay with delay ceiling reached",
  3412  			reschedulePolicy: &ReschedulePolicy{
  3413  				DelayFunction: "fibonacci",
  3414  				Delay:         5 * time.Second,
  3415  				MaxDelay:      50 * time.Second,
  3416  				Unlimited:     true,
  3417  			},
  3418  			alloc: &Allocation{
  3419  				ClientStatus: AllocClientStatusFailed,
  3420  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3421  					StartedAt:  now.Add(-1 * time.Hour),
  3422  					FinishedAt: now.Add(-15 * time.Second)}},
  3423  				RescheduleTracker: &RescheduleTracker{
  3424  					Events: []*RescheduleEvent{
  3425  						{
  3426  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3427  							Delay:          5 * time.Second,
  3428  						},
  3429  						{
  3430  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3431  							Delay:          5 * time.Second,
  3432  						},
  3433  						{
  3434  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3435  							Delay:          10 * time.Second,
  3436  						},
  3437  						{
  3438  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3439  							Delay:          15 * time.Second,
  3440  						},
  3441  						{
  3442  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3443  							Delay:          25 * time.Second,
  3444  						},
  3445  						{
  3446  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  3447  							Delay:          40 * time.Second,
  3448  						},
  3449  					},
  3450  				}},
  3451  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(50 * time.Second),
  3452  			expectedRescheduleEligible: true,
  3453  		},
  3454  		{
  3455  			desc: "fibonacci delay with delay reset condition met",
  3456  			reschedulePolicy: &ReschedulePolicy{
  3457  				DelayFunction: "fibonacci",
  3458  				Delay:         5 * time.Second,
  3459  				MaxDelay:      50 * time.Second,
  3460  				Unlimited:     true,
  3461  			},
  3462  			alloc: &Allocation{
  3463  				ClientStatus: AllocClientStatusFailed,
  3464  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3465  					StartedAt:  now.Add(-1 * time.Hour),
  3466  					FinishedAt: now.Add(-5 * time.Minute)}},
  3467  				RescheduleTracker: &RescheduleTracker{
  3468  					Events: []*RescheduleEvent{
  3469  						{
  3470  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3471  							Delay:          5 * time.Second,
  3472  						},
  3473  						{
  3474  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3475  							Delay:          5 * time.Second,
  3476  						},
  3477  						{
  3478  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3479  							Delay:          10 * time.Second,
  3480  						},
  3481  						{
  3482  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3483  							Delay:          15 * time.Second,
  3484  						},
  3485  						{
  3486  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3487  							Delay:          25 * time.Second,
  3488  						},
  3489  						{
  3490  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3491  							Delay:          40 * time.Second,
  3492  						},
  3493  					},
  3494  				}},
  3495  			expectedRescheduleTime:     now.Add(-5 * time.Minute).Add(5 * time.Second),
  3496  			expectedRescheduleEligible: true,
  3497  		},
  3498  		{
  3499  			desc: "fibonacci delay with the most recent event that reset delay value",
  3500  			reschedulePolicy: &ReschedulePolicy{
  3501  				DelayFunction: "fibonacci",
  3502  				Delay:         5 * time.Second,
  3503  				MaxDelay:      50 * time.Second,
  3504  				Unlimited:     true,
  3505  			},
  3506  			alloc: &Allocation{
  3507  				ClientStatus: AllocClientStatusFailed,
  3508  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  3509  					StartedAt:  now.Add(-1 * time.Hour),
  3510  					FinishedAt: now.Add(-5 * time.Second)}},
  3511  				RescheduleTracker: &RescheduleTracker{
  3512  					Events: []*RescheduleEvent{
  3513  						{
  3514  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  3515  							Delay:          5 * time.Second,
  3516  						},
  3517  						{
  3518  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3519  							Delay:          5 * time.Second,
  3520  						},
  3521  						{
  3522  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3523  							Delay:          10 * time.Second,
  3524  						},
  3525  						{
  3526  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3527  							Delay:          15 * time.Second,
  3528  						},
  3529  						{
  3530  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3531  							Delay:          25 * time.Second,
  3532  						},
  3533  						{
  3534  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3535  							Delay:          40 * time.Second,
  3536  						},
  3537  						{
  3538  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  3539  							Delay:          50 * time.Second,
  3540  						},
  3541  						{
  3542  							RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(),
  3543  							Delay:          5 * time.Second,
  3544  						},
  3545  					},
  3546  				}},
  3547  			expectedRescheduleTime:     now.Add(-5 * time.Second).Add(5 * time.Second),
  3548  			expectedRescheduleEligible: true,
  3549  		},
  3550  	}
  3551  	for _, tc := range testCases {
  3552  		t.Run(tc.desc, func(t *testing.T) {
  3553  			require := require.New(t)
  3554  			j := testJob()
  3555  			if tc.reschedulePolicy != nil {
  3556  				j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy
  3557  			}
  3558  			tc.alloc.Job = j
  3559  			tc.alloc.TaskGroup = j.TaskGroups[0].Name
  3560  			reschedTime, allowed := tc.alloc.NextRescheduleTime()
  3561  			require.Equal(tc.expectedRescheduleEligible, allowed)
  3562  			require.Equal(tc.expectedRescheduleTime, reschedTime)
  3563  		})
  3564  	}
  3565  
  3566  }
  3567  
  3568  func TestRescheduleTracker_Copy(t *testing.T) {
  3569  	type testCase struct {
  3570  		original *RescheduleTracker
  3571  		expected *RescheduleTracker
  3572  	}
  3573  
  3574  	cases := []testCase{
  3575  		{nil, nil},
  3576  		{&RescheduleTracker{Events: []*RescheduleEvent{
  3577  			{RescheduleTime: 2,
  3578  				PrevAllocID: "12",
  3579  				PrevNodeID:  "12",
  3580  				Delay:       30 * time.Second},
  3581  		}}, &RescheduleTracker{Events: []*RescheduleEvent{
  3582  			{RescheduleTime: 2,
  3583  				PrevAllocID: "12",
  3584  				PrevNodeID:  "12",
  3585  				Delay:       30 * time.Second},
  3586  		}}},
  3587  	}
  3588  
  3589  	for _, tc := range cases {
  3590  		if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) {
  3591  			t.Fatalf("expected %v but got %v", *tc.expected, *got)
  3592  		}
  3593  	}
  3594  }
  3595  
  3596  func TestVault_Validate(t *testing.T) {
  3597  	v := &Vault{
  3598  		Env:        true,
  3599  		ChangeMode: VaultChangeModeNoop,
  3600  	}
  3601  
  3602  	if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") {
  3603  		t.Fatalf("Expected policy list empty error")
  3604  	}
  3605  
  3606  	v.Policies = []string{"foo", "root"}
  3607  	v.ChangeMode = VaultChangeModeSignal
  3608  
  3609  	err := v.Validate()
  3610  	if err == nil {
  3611  		t.Fatalf("Expected validation errors")
  3612  	}
  3613  
  3614  	if !strings.Contains(err.Error(), "Signal must") {
  3615  		t.Fatalf("Expected signal empty error")
  3616  	}
  3617  	if !strings.Contains(err.Error(), "root") {
  3618  		t.Fatalf("Expected root error")
  3619  	}
  3620  }
  3621  
  3622  func TestParameterizedJobConfig_Validate(t *testing.T) {
  3623  	d := &ParameterizedJobConfig{
  3624  		Payload: "foo",
  3625  	}
  3626  
  3627  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") {
  3628  		t.Fatalf("Expected unknown payload requirement: %v", err)
  3629  	}
  3630  
  3631  	d.Payload = DispatchPayloadOptional
  3632  	d.MetaOptional = []string{"foo", "bar"}
  3633  	d.MetaRequired = []string{"bar", "baz"}
  3634  
  3635  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") {
  3636  		t.Fatalf("Expected meta not being disjoint error: %v", err)
  3637  	}
  3638  }
  3639  
  3640  func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) {
  3641  	job := testJob()
  3642  	job.ParameterizedJob = &ParameterizedJobConfig{
  3643  		Payload: DispatchPayloadOptional,
  3644  	}
  3645  	job.Type = JobTypeSystem
  3646  
  3647  	if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") {
  3648  		t.Fatalf("Expected bad scheduler tpye: %v", err)
  3649  	}
  3650  }
  3651  
  3652  func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
  3653  	d := &ParameterizedJobConfig{}
  3654  	d.Canonicalize()
  3655  	if d.Payload != DispatchPayloadOptional {
  3656  		t.Fatalf("Canonicalize failed")
  3657  	}
  3658  }
  3659  
  3660  func TestDispatchPayloadConfig_Validate(t *testing.T) {
  3661  	d := &DispatchPayloadConfig{
  3662  		File: "foo",
  3663  	}
  3664  
  3665  	// task/local/haha
  3666  	if err := d.Validate(); err != nil {
  3667  		t.Fatalf("bad: %v", err)
  3668  	}
  3669  
  3670  	// task/haha
  3671  	d.File = "../haha"
  3672  	if err := d.Validate(); err != nil {
  3673  		t.Fatalf("bad: %v", err)
  3674  	}
  3675  
  3676  	// ../haha
  3677  	d.File = "../../../haha"
  3678  	if err := d.Validate(); err == nil {
  3679  		t.Fatalf("bad: %v", err)
  3680  	}
  3681  }
  3682  
  3683  func TestIsRecoverable(t *testing.T) {
  3684  	if IsRecoverable(nil) {
  3685  		t.Errorf("nil should not be recoverable")
  3686  	}
  3687  	if IsRecoverable(NewRecoverableError(nil, true)) {
  3688  		t.Errorf("NewRecoverableError(nil, true) should not be recoverable")
  3689  	}
  3690  	if IsRecoverable(fmt.Errorf("i promise im recoverable")) {
  3691  		t.Errorf("Custom errors should not be recoverable")
  3692  	}
  3693  	if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) {
  3694  		t.Errorf("Explicitly unrecoverable errors should not be recoverable")
  3695  	}
  3696  	if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) {
  3697  		t.Errorf("Explicitly recoverable errors *should* be recoverable")
  3698  	}
  3699  }
  3700  
  3701  func TestACLTokenValidate(t *testing.T) {
  3702  	tk := &ACLToken{}
  3703  
  3704  	// Missing a type
  3705  	err := tk.Validate()
  3706  	assert.NotNil(t, err)
  3707  	if !strings.Contains(err.Error(), "client or management") {
  3708  		t.Fatalf("bad: %v", err)
  3709  	}
  3710  
  3711  	// Missing policies
  3712  	tk.Type = ACLClientToken
  3713  	err = tk.Validate()
  3714  	assert.NotNil(t, err)
  3715  	if !strings.Contains(err.Error(), "missing policies") {
  3716  		t.Fatalf("bad: %v", err)
  3717  	}
  3718  
  3719  	// Invalid policies
  3720  	tk.Type = ACLManagementToken
  3721  	tk.Policies = []string{"foo"}
  3722  	err = tk.Validate()
  3723  	assert.NotNil(t, err)
  3724  	if !strings.Contains(err.Error(), "associated with policies") {
  3725  		t.Fatalf("bad: %v", err)
  3726  	}
  3727  
  3728  	// Name too long policies
  3729  	tk.Name = ""
  3730  	for i := 0; i < 8; i++ {
  3731  		tk.Name += uuid.Generate()
  3732  	}
  3733  	tk.Policies = nil
  3734  	err = tk.Validate()
  3735  	assert.NotNil(t, err)
  3736  	if !strings.Contains(err.Error(), "too long") {
  3737  		t.Fatalf("bad: %v", err)
  3738  	}
  3739  
  3740  	// Make it valid
  3741  	tk.Name = "foo"
  3742  	err = tk.Validate()
  3743  	assert.Nil(t, err)
  3744  }
  3745  
  3746  func TestACLTokenPolicySubset(t *testing.T) {
  3747  	tk := &ACLToken{
  3748  		Type:     ACLClientToken,
  3749  		Policies: []string{"foo", "bar", "baz"},
  3750  	}
  3751  
  3752  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
  3753  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
  3754  	assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
  3755  	assert.Equal(t, true, tk.PolicySubset([]string{}))
  3756  	assert.Equal(t, false, tk.PolicySubset([]string{"foo", "bar", "new"}))
  3757  	assert.Equal(t, false, tk.PolicySubset([]string{"new"}))
  3758  
  3759  	tk = &ACLToken{
  3760  		Type: ACLManagementToken,
  3761  	}
  3762  
  3763  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "baz"}))
  3764  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar"}))
  3765  	assert.Equal(t, true, tk.PolicySubset([]string{"foo"}))
  3766  	assert.Equal(t, true, tk.PolicySubset([]string{}))
  3767  	assert.Equal(t, true, tk.PolicySubset([]string{"foo", "bar", "new"}))
  3768  	assert.Equal(t, true, tk.PolicySubset([]string{"new"}))
  3769  }
  3770  
  3771  func TestACLTokenSetHash(t *testing.T) {
  3772  	tk := &ACLToken{
  3773  		Name:     "foo",
  3774  		Type:     ACLClientToken,
  3775  		Policies: []string{"foo", "bar"},
  3776  		Global:   false,
  3777  	}
  3778  	out1 := tk.SetHash()
  3779  	assert.NotNil(t, out1)
  3780  	assert.NotNil(t, tk.Hash)
  3781  	assert.Equal(t, out1, tk.Hash)
  3782  
  3783  	tk.Policies = []string{"foo"}
  3784  	out2 := tk.SetHash()
  3785  	assert.NotNil(t, out2)
  3786  	assert.NotNil(t, tk.Hash)
  3787  	assert.Equal(t, out2, tk.Hash)
  3788  	assert.NotEqual(t, out1, out2)
  3789  }
  3790  
  3791  func TestACLPolicySetHash(t *testing.T) {
  3792  	ap := &ACLPolicy{
  3793  		Name:        "foo",
  3794  		Description: "great policy",
  3795  		Rules:       "node { policy = \"read\" }",
  3796  	}
  3797  	out1 := ap.SetHash()
  3798  	assert.NotNil(t, out1)
  3799  	assert.NotNil(t, ap.Hash)
  3800  	assert.Equal(t, out1, ap.Hash)
  3801  
  3802  	ap.Rules = "node { policy = \"write\" }"
  3803  	out2 := ap.SetHash()
  3804  	assert.NotNil(t, out2)
  3805  	assert.NotNil(t, ap.Hash)
  3806  	assert.Equal(t, out2, ap.Hash)
  3807  	assert.NotEqual(t, out1, out2)
  3808  }
  3809  
  3810  func TestTaskEventPopulate(t *testing.T) {
  3811  	prepopulatedEvent := NewTaskEvent(TaskSetup)
  3812  	prepopulatedEvent.DisplayMessage = "Hola"
  3813  	testcases := []struct {
  3814  		event       *TaskEvent
  3815  		expectedMsg string
  3816  	}{
  3817  		{nil, ""},
  3818  		{prepopulatedEvent, "Hola"},
  3819  		{NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"},
  3820  		{NewTaskEvent(TaskStarted), "Task started by client"},
  3821  		{NewTaskEvent(TaskReceived), "Task received by client"},
  3822  		{NewTaskEvent(TaskFailedValidation), "Validation of task failed"},
  3823  		{NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"},
  3824  		{NewTaskEvent(TaskSetupFailure), "Task setup failed"},
  3825  		{NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"},
  3826  		{NewTaskEvent(TaskDriverFailure), "Failed to start task"},
  3827  		{NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"},
  3828  		{NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"},
  3829  		{NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"},
  3830  		{NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"},
  3831  		{NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"},
  3832  		{NewTaskEvent(TaskKilling), "Sent interrupt"},
  3833  		{NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"},
  3834  		{NewTaskEvent(TaskKilling).SetKillTimeout(1 * time.Second), "Sent interrupt. Waiting 1s before force killing"},
  3835  		{NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"},
  3836  		{NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""},
  3837  		{NewTaskEvent(TaskKilled), "Task successfully killed"},
  3838  		{NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"},
  3839  		{NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"},
  3840  		{NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"},
  3841  		{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
  3842  		{NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"},
  3843  		{NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"},
  3844  		{NewTaskEvent(TaskSignaling), "Task being sent a signal"},
  3845  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"},
  3846  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"},
  3847  		{NewTaskEvent(TaskRestartSignal), "Task signaled to restart"},
  3848  		{NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"},
  3849  		{NewTaskEvent(TaskDriverMessage).SetDriverMessage("YOLO"), "YOLO"},
  3850  		{NewTaskEvent("Unknown Type, No message"), ""},
  3851  		{NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"},
  3852  	}
  3853  
  3854  	for _, tc := range testcases {
  3855  		tc.event.PopulateEventDisplayMessage()
  3856  		if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg {
  3857  			t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage)
  3858  		}
  3859  	}
  3860  }
  3861  
  3862  func TestNetworkResourcesEquals(t *testing.T) {
  3863  	require := require.New(t)
  3864  	var networkResourcesTest = []struct {
  3865  		input    []*NetworkResource
  3866  		expected bool
  3867  		errorMsg string
  3868  	}{
  3869  		{
  3870  			[]*NetworkResource{
  3871  				{
  3872  					IP:            "10.0.0.1",
  3873  					MBits:         50,
  3874  					ReservedPorts: []Port{{"web", 80}},
  3875  				},
  3876  				{
  3877  					IP:            "10.0.0.1",
  3878  					MBits:         50,
  3879  					ReservedPorts: []Port{{"web", 80}},
  3880  				},
  3881  			},
  3882  			true,
  3883  			"Equal network resources should return true",
  3884  		},
  3885  		{
  3886  			[]*NetworkResource{
  3887  				{
  3888  					IP:            "10.0.0.0",
  3889  					MBits:         50,
  3890  					ReservedPorts: []Port{{"web", 80}},
  3891  				},
  3892  				{
  3893  					IP:            "10.0.0.1",
  3894  					MBits:         50,
  3895  					ReservedPorts: []Port{{"web", 80}},
  3896  				},
  3897  			},
  3898  			false,
  3899  			"Different IP addresses should return false",
  3900  		},
  3901  		{
  3902  			[]*NetworkResource{
  3903  				{
  3904  					IP:            "10.0.0.1",
  3905  					MBits:         40,
  3906  					ReservedPorts: []Port{{"web", 80}},
  3907  				},
  3908  				{
  3909  					IP:            "10.0.0.1",
  3910  					MBits:         50,
  3911  					ReservedPorts: []Port{{"web", 80}},
  3912  				},
  3913  			},
  3914  			false,
  3915  			"Different MBits values should return false",
  3916  		},
  3917  		{
  3918  			[]*NetworkResource{
  3919  				{
  3920  					IP:            "10.0.0.1",
  3921  					MBits:         50,
  3922  					ReservedPorts: []Port{{"web", 80}},
  3923  				},
  3924  				{
  3925  					IP:            "10.0.0.1",
  3926  					MBits:         50,
  3927  					ReservedPorts: []Port{{"web", 80}, {"web", 80}},
  3928  				},
  3929  			},
  3930  			false,
  3931  			"Different ReservedPorts lengths should return false",
  3932  		},
  3933  		{
  3934  			[]*NetworkResource{
  3935  				{
  3936  					IP:            "10.0.0.1",
  3937  					MBits:         50,
  3938  					ReservedPorts: []Port{{"web", 80}},
  3939  				},
  3940  				{
  3941  					IP:            "10.0.0.1",
  3942  					MBits:         50,
  3943  					ReservedPorts: []Port{},
  3944  				},
  3945  			},
  3946  			false,
  3947  			"Empty and non empty ReservedPorts values should return false",
  3948  		},
  3949  		{
  3950  			[]*NetworkResource{
  3951  				{
  3952  					IP:            "10.0.0.1",
  3953  					MBits:         50,
  3954  					ReservedPorts: []Port{{"web", 80}},
  3955  				},
  3956  				{
  3957  					IP:            "10.0.0.1",
  3958  					MBits:         50,
  3959  					ReservedPorts: []Port{{"notweb", 80}},
  3960  				},
  3961  			},
  3962  			false,
  3963  			"Different valued ReservedPorts values should return false",
  3964  		},
  3965  		{
  3966  			[]*NetworkResource{
  3967  				{
  3968  					IP:           "10.0.0.1",
  3969  					MBits:        50,
  3970  					DynamicPorts: []Port{{"web", 80}},
  3971  				},
  3972  				{
  3973  					IP:           "10.0.0.1",
  3974  					MBits:        50,
  3975  					DynamicPorts: []Port{{"web", 80}, {"web", 80}},
  3976  				},
  3977  			},
  3978  			false,
  3979  			"Different DynamicPorts lengths should return false",
  3980  		},
  3981  		{
  3982  			[]*NetworkResource{
  3983  				{
  3984  					IP:           "10.0.0.1",
  3985  					MBits:        50,
  3986  					DynamicPorts: []Port{{"web", 80}},
  3987  				},
  3988  				{
  3989  					IP:           "10.0.0.1",
  3990  					MBits:        50,
  3991  					DynamicPorts: []Port{},
  3992  				},
  3993  			},
  3994  			false,
  3995  			"Empty and non empty DynamicPorts values should return false",
  3996  		},
  3997  		{
  3998  			[]*NetworkResource{
  3999  				{
  4000  					IP:           "10.0.0.1",
  4001  					MBits:        50,
  4002  					DynamicPorts: []Port{{"web", 80}},
  4003  				},
  4004  				{
  4005  					IP:           "10.0.0.1",
  4006  					MBits:        50,
  4007  					DynamicPorts: []Port{{"notweb", 80}},
  4008  				},
  4009  			},
  4010  			false,
  4011  			"Different valued DynamicPorts values should return false",
  4012  		},
  4013  	}
  4014  	for _, testCase := range networkResourcesTest {
  4015  		first := testCase.input[0]
  4016  		second := testCase.input[1]
  4017  		require.Equal(testCase.expected, first.Equals(second), testCase.errorMsg)
  4018  	}
  4019  }
  4020  
  4021  func TestNode_Canonicalize(t *testing.T) {
  4022  	t.Parallel()
  4023  	require := require.New(t)
  4024  
  4025  	// Make sure the eligiblity is set properly
  4026  	node := &Node{}
  4027  	node.Canonicalize()
  4028  	require.Equal(NodeSchedulingEligible, node.SchedulingEligibility)
  4029  
  4030  	node = &Node{
  4031  		Drain: true,
  4032  	}
  4033  	node.Canonicalize()
  4034  	require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility)
  4035  }
  4036  
  4037  func TestNode_Copy(t *testing.T) {
  4038  	t.Parallel()
  4039  	require := require.New(t)
  4040  
  4041  	node := &Node{
  4042  		ID:         uuid.Generate(),
  4043  		SecretID:   uuid.Generate(),
  4044  		Datacenter: "dc1",
  4045  		Name:       "foobar",
  4046  		Attributes: map[string]string{
  4047  			"kernel.name":        "linux",
  4048  			"arch":               "x86",
  4049  			"nomad.version":      "0.5.0",
  4050  			"driver.exec":        "1",
  4051  			"driver.mock_driver": "1",
  4052  		},
  4053  		Resources: &Resources{
  4054  			CPU:      4000,
  4055  			MemoryMB: 8192,
  4056  			DiskMB:   100 * 1024,
  4057  			Networks: []*NetworkResource{
  4058  				{
  4059  					Device: "eth0",
  4060  					CIDR:   "192.168.0.100/32",
  4061  					MBits:  1000,
  4062  				},
  4063  			},
  4064  		},
  4065  		Reserved: &Resources{
  4066  			CPU:      100,
  4067  			MemoryMB: 256,
  4068  			DiskMB:   4 * 1024,
  4069  			Networks: []*NetworkResource{
  4070  				{
  4071  					Device:        "eth0",
  4072  					IP:            "192.168.0.100",
  4073  					ReservedPorts: []Port{{Label: "ssh", Value: 22}},
  4074  					MBits:         1,
  4075  				},
  4076  			},
  4077  		},
  4078  		NodeResources: &NodeResources{
  4079  			Cpu: NodeCpuResources{
  4080  				CpuShares: 4000,
  4081  			},
  4082  			Memory: NodeMemoryResources{
  4083  				MemoryMB: 8192,
  4084  			},
  4085  			Disk: NodeDiskResources{
  4086  				DiskMB: 100 * 1024,
  4087  			},
  4088  			Networks: []*NetworkResource{
  4089  				{
  4090  					Device: "eth0",
  4091  					CIDR:   "192.168.0.100/32",
  4092  					MBits:  1000,
  4093  				},
  4094  			},
  4095  		},
  4096  		ReservedResources: &NodeReservedResources{
  4097  			Cpu: NodeReservedCpuResources{
  4098  				CpuShares: 100,
  4099  			},
  4100  			Memory: NodeReservedMemoryResources{
  4101  				MemoryMB: 256,
  4102  			},
  4103  			Disk: NodeReservedDiskResources{
  4104  				DiskMB: 4 * 1024,
  4105  			},
  4106  			Networks: NodeReservedNetworkResources{
  4107  				ReservedHostPorts: "22",
  4108  			},
  4109  		},
  4110  		Links: map[string]string{
  4111  			"consul": "foobar.dc1",
  4112  		},
  4113  		Meta: map[string]string{
  4114  			"pci-dss":  "true",
  4115  			"database": "mysql",
  4116  			"version":  "5.6",
  4117  		},
  4118  		NodeClass:             "linux-medium-pci",
  4119  		Status:                NodeStatusReady,
  4120  		SchedulingEligibility: NodeSchedulingEligible,
  4121  		Drivers: map[string]*DriverInfo{
  4122  			"mock_driver": {
  4123  				Attributes:        map[string]string{"running": "1"},
  4124  				Detected:          true,
  4125  				Healthy:           true,
  4126  				HealthDescription: "Currently active",
  4127  				UpdateTime:        time.Now(),
  4128  			},
  4129  		},
  4130  	}
  4131  	node.ComputeClass()
  4132  
  4133  	node2 := node.Copy()
  4134  
  4135  	require.Equal(node.Attributes, node2.Attributes)
  4136  	require.Equal(node.Resources, node2.Resources)
  4137  	require.Equal(node.Reserved, node2.Reserved)
  4138  	require.Equal(node.Links, node2.Links)
  4139  	require.Equal(node.Meta, node2.Meta)
  4140  	require.Equal(node.Events, node2.Events)
  4141  	require.Equal(node.DrainStrategy, node2.DrainStrategy)
  4142  	require.Equal(node.Drivers, node2.Drivers)
  4143  }
  4144  
  4145  func TestSpread_Validate(t *testing.T) {
  4146  	type tc struct {
  4147  		spread *Spread
  4148  		err    error
  4149  		name   string
  4150  	}
  4151  
  4152  	testCases := []tc{
  4153  		{
  4154  			spread: &Spread{},
  4155  			err:    fmt.Errorf("Missing spread attribute"),
  4156  			name:   "empty spread",
  4157  		},
  4158  		{
  4159  			spread: &Spread{
  4160  				Attribute: "${node.datacenter}",
  4161  				Weight:    -1,
  4162  			},
  4163  			err:  fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
  4164  			name: "Invalid weight",
  4165  		},
  4166  		{
  4167  			spread: &Spread{
  4168  				Attribute: "${node.datacenter}",
  4169  				Weight:    110,
  4170  			},
  4171  			err:  fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
  4172  			name: "Invalid weight",
  4173  		},
  4174  		{
  4175  			spread: &Spread{
  4176  				Attribute: "${node.datacenter}",
  4177  				Weight:    50,
  4178  				SpreadTarget: []*SpreadTarget{
  4179  					{
  4180  						Value:   "dc1",
  4181  						Percent: 25,
  4182  					},
  4183  					{
  4184  						Value:   "dc2",
  4185  						Percent: 150,
  4186  					},
  4187  				},
  4188  			},
  4189  			err:  fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"),
  4190  			name: "Invalid percentages",
  4191  		},
  4192  		{
  4193  			spread: &Spread{
  4194  				Attribute: "${node.datacenter}",
  4195  				Weight:    50,
  4196  				SpreadTarget: []*SpreadTarget{
  4197  					{
  4198  						Value:   "dc1",
  4199  						Percent: 75,
  4200  					},
  4201  					{
  4202  						Value:   "dc2",
  4203  						Percent: 75,
  4204  					},
  4205  				},
  4206  			},
  4207  			err:  fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150),
  4208  			name: "Invalid percentages",
  4209  		},
  4210  		{
  4211  			spread: &Spread{
  4212  				Attribute: "${node.datacenter}",
  4213  				Weight:    50,
  4214  				SpreadTarget: []*SpreadTarget{
  4215  					{
  4216  						Value:   "dc1",
  4217  						Percent: 25,
  4218  					},
  4219  					{
  4220  						Value:   "dc1",
  4221  						Percent: 50,
  4222  					},
  4223  				},
  4224  			},
  4225  			err:  fmt.Errorf("Spread target value \"dc1\" already defined"),
  4226  			name: "No spread targets",
  4227  		},
  4228  		{
  4229  			spread: &Spread{
  4230  				Attribute: "${node.datacenter}",
  4231  				Weight:    50,
  4232  				SpreadTarget: []*SpreadTarget{
  4233  					{
  4234  						Value:   "dc1",
  4235  						Percent: 25,
  4236  					},
  4237  					{
  4238  						Value:   "dc2",
  4239  						Percent: 50,
  4240  					},
  4241  				},
  4242  			},
  4243  			err:  nil,
  4244  			name: "Valid spread",
  4245  		},
  4246  	}
  4247  
  4248  	for _, tc := range testCases {
  4249  		t.Run(tc.name, func(t *testing.T) {
  4250  			err := tc.spread.Validate()
  4251  			if tc.err != nil {
  4252  				require.NotNil(t, err)
  4253  				require.Contains(t, err.Error(), tc.err.Error())
  4254  			} else {
  4255  				require.Nil(t, err)
  4256  			}
  4257  		})
  4258  	}
  4259  }
  4260  
  4261  func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) {
  4262  	require := require.New(t)
  4263  	cases := []struct {
  4264  		Input  string
  4265  		Parsed []uint64
  4266  		Err    bool
  4267  	}{
  4268  		{
  4269  			"1,2,3",
  4270  			[]uint64{1, 2, 3},
  4271  			false,
  4272  		},
  4273  		{
  4274  			"3,1,2,1,2,3,1-3",
  4275  			[]uint64{1, 2, 3},
  4276  			false,
  4277  		},
  4278  		{
  4279  			"3-1",
  4280  			nil,
  4281  			true,
  4282  		},
  4283  		{
  4284  			"1-3,2-4",
  4285  			[]uint64{1, 2, 3, 4},
  4286  			false,
  4287  		},
  4288  		{
  4289  			"1-3,4,5-5,6,7,8-10",
  4290  			[]uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
  4291  			false,
  4292  		},
  4293  	}
  4294  
  4295  	for i, tc := range cases {
  4296  		r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input}
  4297  		out, err := r.ParseReservedHostPorts()
  4298  		if (err != nil) != tc.Err {
  4299  			t.Fatalf("test case %d: %v", i, err)
  4300  			continue
  4301  		}
  4302  
  4303  		require.Equal(out, tc.Parsed)
  4304  	}
  4305  }