github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/nomad/structs/structs_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"fmt"
     5  	"os"
     6  	"reflect"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/hashicorp/consul/api"
    12  	"github.com/hashicorp/go-multierror"
    13  	"github.com/hashicorp/nomad/ci"
    14  	"github.com/hashicorp/nomad/helper/pointer"
    15  	"github.com/hashicorp/nomad/helper/uuid"
    16  	"github.com/kr/pretty"
    17  	"github.com/stretchr/testify/assert"
    18  	"github.com/stretchr/testify/require"
    19  )
    20  
    21  func TestJob_Validate(t *testing.T) {
    22  	ci.Parallel(t)
    23  
    24  	j := &Job{}
    25  	err := j.Validate()
    26  	requireErrors(t, err,
    27  		"datacenters",
    28  		"job ID",
    29  		"job name",
    30  		"job region",
    31  		"job type",
    32  		"namespace",
    33  		"priority",
    34  		"task groups",
    35  	)
    36  
    37  	j = &Job{
    38  		Type: "invalid-job-type",
    39  	}
    40  	err = j.Validate()
    41  	if expected := `Invalid job type: "invalid-job-type"`; !strings.Contains(err.Error(), expected) {
    42  		t.Errorf("expected %s but found: %v", expected, err)
    43  	}
    44  
    45  	j = &Job{
    46  		Type: JobTypeService,
    47  		Periodic: &PeriodicConfig{
    48  			Enabled: true,
    49  		},
    50  	}
    51  	err = j.Validate()
    52  	require.Error(t, err, "Periodic")
    53  
    54  	j = &Job{
    55  		Region:      "global",
    56  		ID:          uuid.Generate(),
    57  		Namespace:   "test",
    58  		Name:        "my-job",
    59  		Type:        JobTypeService,
    60  		Priority:    50,
    61  		Datacenters: []string{"dc1"},
    62  		TaskGroups: []*TaskGroup{
    63  			{
    64  				Name: "web",
    65  				RestartPolicy: &RestartPolicy{
    66  					Interval: 5 * time.Minute,
    67  					Delay:    10 * time.Second,
    68  					Attempts: 10,
    69  				},
    70  			},
    71  			{
    72  				Name: "web",
    73  				RestartPolicy: &RestartPolicy{
    74  					Interval: 5 * time.Minute,
    75  					Delay:    10 * time.Second,
    76  					Attempts: 10,
    77  				},
    78  			},
    79  			{
    80  				RestartPolicy: &RestartPolicy{
    81  					Interval: 5 * time.Minute,
    82  					Delay:    10 * time.Second,
    83  					Attempts: 10,
    84  				},
    85  			},
    86  		},
    87  	}
    88  	err = j.Validate()
    89  	requireErrors(t, err,
    90  		"2 redefines 'web' from group 1",
    91  		"group 3 missing name",
    92  		"Task group web validation failed",
    93  	)
    94  	// test for empty datacenters
    95  	j = &Job{
    96  		Datacenters: []string{""},
    97  	}
    98  	err = j.Validate()
    99  	require.Error(t, err, "datacenter must be non-empty string")
   100  }
   101  
   102  func TestJob_ValidateScaling(t *testing.T) {
   103  	ci.Parallel(t)
   104  
   105  	require := require.New(t)
   106  
   107  	p := &ScalingPolicy{
   108  		Policy:  nil, // allowed to be nil
   109  		Type:    ScalingPolicyTypeHorizontal,
   110  		Min:     5,
   111  		Max:     5,
   112  		Enabled: true,
   113  	}
   114  	job := testJob()
   115  	job.TaskGroups[0].Scaling = p
   116  	job.TaskGroups[0].Count = 5
   117  
   118  	require.NoError(job.Validate())
   119  
   120  	// min <= max
   121  	p.Max = 0
   122  	p.Min = 10
   123  	err := job.Validate()
   124  	requireErrors(t, err,
   125  		"task group count must not be less than minimum count in scaling policy",
   126  		"task group count must not be greater than maximum count in scaling policy",
   127  	)
   128  
   129  	// count <= max
   130  	p.Max = 0
   131  	p.Min = 5
   132  	job.TaskGroups[0].Count = 5
   133  	err = job.Validate()
   134  	require.Error(err,
   135  		"task group count must not be greater than maximum count in scaling policy",
   136  	)
   137  
   138  	// min <= count
   139  	job.TaskGroups[0].Count = 0
   140  	p.Min = 5
   141  	p.Max = 5
   142  	err = job.Validate()
   143  	require.Error(err,
   144  		"task group count must not be less than minimum count in scaling policy",
   145  	)
   146  }
   147  
   148  func TestJob_ValidateNullChar(t *testing.T) {
   149  	ci.Parallel(t)
   150  
   151  	assert := assert.New(t)
   152  
   153  	// job id should not allow null characters
   154  	job := testJob()
   155  	job.ID = "id_with\000null_character"
   156  	assert.Error(job.Validate(), "null character in job ID should not validate")
   157  
   158  	// job name should not allow null characters
   159  	job.ID = "happy_little_job_id"
   160  	job.Name = "my job name with \000 characters"
   161  	assert.Error(job.Validate(), "null character in job name should not validate")
   162  
   163  	// task group name should not allow null characters
   164  	job.Name = "my job"
   165  	job.TaskGroups[0].Name = "oh_no_another_\000_char"
   166  	assert.Error(job.Validate(), "null character in task group name should not validate")
   167  
   168  	// task name should not allow null characters
   169  	job.TaskGroups[0].Name = "so_much_better"
   170  	job.TaskGroups[0].Tasks[0].Name = "ive_had_it_with_these_\000_chars_in_these_names"
   171  	assert.Error(job.Validate(), "null character in task name should not validate")
   172  }
   173  
   174  func TestJob_Warnings(t *testing.T) {
   175  	ci.Parallel(t)
   176  
   177  	cases := []struct {
   178  		Name     string
   179  		Job      *Job
   180  		Expected []string
   181  	}{
   182  		{
   183  			Name:     "Higher counts for update stanza",
   184  			Expected: []string{"max parallel count is greater"},
   185  			Job: &Job{
   186  				Type: JobTypeService,
   187  				TaskGroups: []*TaskGroup{
   188  					{
   189  						Name:  "foo",
   190  						Count: 2,
   191  						Update: &UpdateStrategy{
   192  							MaxParallel: 10,
   193  						},
   194  					},
   195  				},
   196  			},
   197  		},
   198  		{
   199  			Name:     "AutoPromote mixed TaskGroups",
   200  			Expected: []string{"auto_promote must be true for all groups"},
   201  			Job: &Job{
   202  				Type: JobTypeService,
   203  				TaskGroups: []*TaskGroup{
   204  					{
   205  						Update: &UpdateStrategy{
   206  							AutoPromote: true,
   207  						},
   208  					},
   209  					{
   210  						Update: &UpdateStrategy{
   211  							AutoPromote: false,
   212  							Canary:      1,
   213  						},
   214  					},
   215  				},
   216  			},
   217  		},
   218  		{
   219  			Name:     "no error for mixed but implied AutoPromote",
   220  			Expected: []string{},
   221  			Job: &Job{
   222  				Type: JobTypeService,
   223  				TaskGroups: []*TaskGroup{
   224  					{
   225  						Update: &UpdateStrategy{
   226  							AutoPromote: true,
   227  						},
   228  					},
   229  					{
   230  						Update: &UpdateStrategy{
   231  							AutoPromote: false,
   232  							Canary:      0,
   233  						},
   234  					},
   235  				},
   236  			},
   237  		},
   238  		{
   239  			Name:     "Template.VaultGrace Deprecated",
   240  			Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template stanza."},
   241  			Job: &Job{
   242  				Type: JobTypeService,
   243  				TaskGroups: []*TaskGroup{
   244  					{
   245  						Tasks: []*Task{
   246  							{
   247  								Templates: []*Template{
   248  									{
   249  										VaultGrace: 1,
   250  									},
   251  								},
   252  							},
   253  						},
   254  					},
   255  				},
   256  			},
   257  		},
   258  		{
   259  			Name:     "Update.MaxParallel warning",
   260  			Expected: []string{"Update max parallel count is greater than task group count (5 > 2). A destructive change would result in the simultaneous replacement of all allocations."},
   261  			Job: &Job{
   262  				Type: JobTypeService,
   263  				TaskGroups: []*TaskGroup{
   264  					{
   265  						Count: 2,
   266  						Update: &UpdateStrategy{
   267  							MaxParallel: 5,
   268  						},
   269  					},
   270  				},
   271  			},
   272  		},
   273  		{
   274  			Name:     "Update.MaxParallel no warning",
   275  			Expected: []string{},
   276  			Job: &Job{
   277  				Type: JobTypeService,
   278  				TaskGroups: []*TaskGroup{
   279  					{
   280  						Count: 1,
   281  						Update: &UpdateStrategy{
   282  							MaxParallel: 5,
   283  						},
   284  					},
   285  				},
   286  			},
   287  		},
   288  	}
   289  
   290  	for _, c := range cases {
   291  		t.Run(c.Name, func(t *testing.T) {
   292  			warnings := c.Job.Warnings()
   293  			if warnings == nil {
   294  				if len(c.Expected) == 0 {
   295  					return
   296  				}
   297  				t.Fatal("Got no warnings when they were expected")
   298  			}
   299  
   300  			a := warnings.Error()
   301  			for _, e := range c.Expected {
   302  				if !strings.Contains(a, e) {
   303  					t.Fatalf("Got warnings %q; didn't contain %q", a, e)
   304  				}
   305  			}
   306  		})
   307  	}
   308  }
   309  
   310  func TestJob_SpecChanged(t *testing.T) {
   311  	ci.Parallel(t)
   312  
   313  	// Get a base test job
   314  	base := testJob()
   315  
   316  	// Only modify the indexes/mutable state of the job
   317  	mutatedBase := base.Copy()
   318  	mutatedBase.Status = "foo"
   319  	mutatedBase.ModifyIndex = base.ModifyIndex + 100
   320  
   321  	// changed contains a spec change that should be detected
   322  	change := base.Copy()
   323  	change.Priority = 99
   324  
   325  	cases := []struct {
   326  		Name     string
   327  		Original *Job
   328  		New      *Job
   329  		Changed  bool
   330  	}{
   331  		{
   332  			Name:     "Same job except mutable indexes",
   333  			Changed:  false,
   334  			Original: base,
   335  			New:      mutatedBase,
   336  		},
   337  		{
   338  			Name:     "Different",
   339  			Changed:  true,
   340  			Original: base,
   341  			New:      change,
   342  		},
   343  		{
   344  			Name:     "With Constraints",
   345  			Changed:  false,
   346  			Original: &Job{Constraints: []*Constraint{{"A", "B", "="}}},
   347  			New:      &Job{Constraints: []*Constraint{{"A", "B", "="}}},
   348  		},
   349  		{
   350  			Name:     "With Affinities",
   351  			Changed:  false,
   352  			Original: &Job{Affinities: []*Affinity{{"A", "B", "=", 1}}},
   353  			New:      &Job{Affinities: []*Affinity{{"A", "B", "=", 1}}},
   354  		},
   355  	}
   356  
   357  	for _, c := range cases {
   358  		t.Run(c.Name, func(t *testing.T) {
   359  			if actual := c.Original.SpecChanged(c.New); actual != c.Changed {
   360  				t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed)
   361  			}
   362  		})
   363  	}
   364  }
   365  
   366  func testJob() *Job {
   367  	return &Job{
   368  		Region:      "global",
   369  		ID:          uuid.Generate(),
   370  		Namespace:   "test",
   371  		Name:        "my-job",
   372  		Type:        JobTypeService,
   373  		Priority:    50,
   374  		AllAtOnce:   false,
   375  		Datacenters: []string{"dc1"},
   376  		Constraints: []*Constraint{
   377  			{
   378  				LTarget: "$attr.kernel.name",
   379  				RTarget: "linux",
   380  				Operand: "=",
   381  			},
   382  		},
   383  		Periodic: &PeriodicConfig{
   384  			Enabled: false,
   385  		},
   386  		TaskGroups: []*TaskGroup{
   387  			{
   388  				Name:          "web",
   389  				Count:         10,
   390  				EphemeralDisk: DefaultEphemeralDisk(),
   391  				RestartPolicy: &RestartPolicy{
   392  					Mode:     RestartPolicyModeFail,
   393  					Attempts: 3,
   394  					Interval: 10 * time.Minute,
   395  					Delay:    1 * time.Minute,
   396  				},
   397  				ReschedulePolicy: &ReschedulePolicy{
   398  					Interval:      5 * time.Minute,
   399  					Attempts:      10,
   400  					Delay:         5 * time.Second,
   401  					DelayFunction: "constant",
   402  				},
   403  				Networks: []*NetworkResource{
   404  					{
   405  						DynamicPorts: []Port{
   406  							{Label: "http"},
   407  						},
   408  					},
   409  				},
   410  				Services: []*Service{
   411  					{
   412  						Name:      "${TASK}-frontend",
   413  						PortLabel: "http",
   414  						Provider:  "consul",
   415  					},
   416  				},
   417  				Tasks: []*Task{
   418  					{
   419  						Name:   "web",
   420  						Driver: "exec",
   421  						Config: map[string]interface{}{
   422  							"command": "/bin/date",
   423  						},
   424  						Env: map[string]string{
   425  							"FOO": "bar",
   426  						},
   427  						Artifacts: []*TaskArtifact{
   428  							{
   429  								GetterSource: "http://foo.com",
   430  							},
   431  						},
   432  						Resources: &Resources{
   433  							CPU:      500,
   434  							MemoryMB: 256,
   435  						},
   436  						LogConfig: &LogConfig{
   437  							MaxFiles:      10,
   438  							MaxFileSizeMB: 1,
   439  						},
   440  					},
   441  				},
   442  				Meta: map[string]string{
   443  					"elb_check_type":     "http",
   444  					"elb_check_interval": "30s",
   445  					"elb_check_min":      "3",
   446  				},
   447  			},
   448  		},
   449  		Meta: map[string]string{
   450  			"owner": "armon",
   451  		},
   452  	}
   453  }
   454  
   455  func TestJob_Copy(t *testing.T) {
   456  	ci.Parallel(t)
   457  
   458  	j := testJob()
   459  	c := j.Copy()
   460  	if !reflect.DeepEqual(j, c) {
   461  		t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j)
   462  	}
   463  }
   464  
   465  func TestJob_IsPeriodic(t *testing.T) {
   466  	ci.Parallel(t)
   467  
   468  	j := &Job{
   469  		Type: JobTypeService,
   470  		Periodic: &PeriodicConfig{
   471  			Enabled: true,
   472  		},
   473  	}
   474  	if !j.IsPeriodic() {
   475  		t.Fatalf("IsPeriodic() returned false on periodic job")
   476  	}
   477  
   478  	j = &Job{
   479  		Type: JobTypeService,
   480  	}
   481  	if j.IsPeriodic() {
   482  		t.Fatalf("IsPeriodic() returned true on non-periodic job")
   483  	}
   484  }
   485  
   486  func TestJob_IsPeriodicActive(t *testing.T) {
   487  	ci.Parallel(t)
   488  
   489  	cases := []struct {
   490  		job    *Job
   491  		active bool
   492  	}{
   493  		{
   494  			job: &Job{
   495  				Type: JobTypeService,
   496  				Periodic: &PeriodicConfig{
   497  					Enabled: true,
   498  				},
   499  			},
   500  			active: true,
   501  		},
   502  		{
   503  			job: &Job{
   504  				Type: JobTypeService,
   505  				Periodic: &PeriodicConfig{
   506  					Enabled: false,
   507  				},
   508  			},
   509  			active: false,
   510  		},
   511  		{
   512  			job: &Job{
   513  				Type: JobTypeService,
   514  				Periodic: &PeriodicConfig{
   515  					Enabled: true,
   516  				},
   517  				Stop: true,
   518  			},
   519  			active: false,
   520  		},
   521  		{
   522  			job: &Job{
   523  				Type: JobTypeService,
   524  				Periodic: &PeriodicConfig{
   525  					Enabled: false,
   526  				},
   527  				ParameterizedJob: &ParameterizedJobConfig{},
   528  			},
   529  			active: false,
   530  		},
   531  	}
   532  
   533  	for i, c := range cases {
   534  		if act := c.job.IsPeriodicActive(); act != c.active {
   535  			t.Fatalf("case %d failed: got %v; want %v", i, act, c.active)
   536  		}
   537  	}
   538  }
   539  
   540  func TestJob_SystemJob_Validate(t *testing.T) {
   541  	j := testJob()
   542  	j.Type = JobTypeSystem
   543  	j.TaskGroups[0].ReschedulePolicy = nil
   544  	j.Canonicalize()
   545  
   546  	err := j.Validate()
   547  	if err == nil || !strings.Contains(err.Error(), "exceed") {
   548  		t.Fatalf("expect error due to count")
   549  	}
   550  
   551  	j.TaskGroups[0].Count = 0
   552  	if err := j.Validate(); err != nil {
   553  		t.Fatalf("unexpected err: %v", err)
   554  	}
   555  
   556  	j.TaskGroups[0].Count = 1
   557  	if err := j.Validate(); err != nil {
   558  		t.Fatalf("unexpected err: %v", err)
   559  	}
   560  
   561  	// Add affinities at job, task group and task level, that should fail validation
   562  
   563  	j.Affinities = []*Affinity{{
   564  		Operand: "=",
   565  		LTarget: "${node.datacenter}",
   566  		RTarget: "dc1",
   567  	}}
   568  	j.TaskGroups[0].Affinities = []*Affinity{{
   569  		Operand: "=",
   570  		LTarget: "${meta.rack}",
   571  		RTarget: "r1",
   572  	}}
   573  	j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{
   574  		Operand: "=",
   575  		LTarget: "${meta.rack}",
   576  		RTarget: "r1",
   577  	}}
   578  	err = j.Validate()
   579  	require.NotNil(t, err)
   580  	require.Contains(t, err.Error(), "System jobs may not have an affinity stanza")
   581  
   582  	// Add spread at job and task group level, that should fail validation
   583  	j.Spreads = []*Spread{{
   584  		Attribute: "${node.datacenter}",
   585  		Weight:    100,
   586  	}}
   587  	j.TaskGroups[0].Spreads = []*Spread{{
   588  		Attribute: "${node.datacenter}",
   589  		Weight:    100,
   590  	}}
   591  
   592  	err = j.Validate()
   593  	require.NotNil(t, err)
   594  	require.Contains(t, err.Error(), "System jobs may not have a spread stanza")
   595  
   596  }
   597  
   598  func TestJob_Vault(t *testing.T) {
   599  	ci.Parallel(t)
   600  
   601  	j0 := &Job{}
   602  	e0 := make(map[string]map[string]*Vault, 0)
   603  
   604  	vj1 := &Vault{
   605  		Policies: []string{
   606  			"p1",
   607  			"p2",
   608  		},
   609  	}
   610  	vj2 := &Vault{
   611  		Policies: []string{
   612  			"p3",
   613  			"p4",
   614  		},
   615  	}
   616  	vj3 := &Vault{
   617  		Policies: []string{
   618  			"p5",
   619  		},
   620  	}
   621  	j1 := &Job{
   622  		TaskGroups: []*TaskGroup{
   623  			{
   624  				Name: "foo",
   625  				Tasks: []*Task{
   626  					{
   627  						Name: "t1",
   628  					},
   629  					{
   630  						Name:  "t2",
   631  						Vault: vj1,
   632  					},
   633  				},
   634  			},
   635  			{
   636  				Name: "bar",
   637  				Tasks: []*Task{
   638  					{
   639  						Name:  "t3",
   640  						Vault: vj2,
   641  					},
   642  					{
   643  						Name:  "t4",
   644  						Vault: vj3,
   645  					},
   646  				},
   647  			},
   648  		},
   649  	}
   650  
   651  	e1 := map[string]map[string]*Vault{
   652  		"foo": {
   653  			"t2": vj1,
   654  		},
   655  		"bar": {
   656  			"t3": vj2,
   657  			"t4": vj3,
   658  		},
   659  	}
   660  
   661  	cases := []struct {
   662  		Job      *Job
   663  		Expected map[string]map[string]*Vault
   664  	}{
   665  		{
   666  			Job:      j0,
   667  			Expected: e0,
   668  		},
   669  		{
   670  			Job:      j1,
   671  			Expected: e1,
   672  		},
   673  	}
   674  
   675  	for i, c := range cases {
   676  		got := c.Job.Vault()
   677  		if !reflect.DeepEqual(got, c.Expected) {
   678  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   679  		}
   680  	}
   681  }
   682  
   683  func TestJob_ConnectTasks(t *testing.T) {
   684  	ci.Parallel(t)
   685  	r := require.New(t)
   686  
   687  	j0 := &Job{
   688  		TaskGroups: []*TaskGroup{{
   689  			Name: "tg1",
   690  			Tasks: []*Task{{
   691  				Name: "connect-proxy-task1",
   692  				Kind: "connect-proxy:task1",
   693  			}, {
   694  				Name: "task2",
   695  				Kind: "task2",
   696  			}, {
   697  				Name: "connect-proxy-task3",
   698  				Kind: "connect-proxy:task3",
   699  			}},
   700  		}, {
   701  			Name: "tg2",
   702  			Tasks: []*Task{{
   703  				Name: "task1",
   704  				Kind: "task1",
   705  			}, {
   706  				Name: "connect-proxy-task2",
   707  				Kind: "connect-proxy:task2",
   708  			}},
   709  		}, {
   710  			Name: "tg3",
   711  			Tasks: []*Task{{
   712  				Name: "ingress",
   713  				Kind: "connect-ingress:ingress",
   714  			}},
   715  		}, {
   716  			Name: "tg4",
   717  			Tasks: []*Task{{
   718  				Name: "frontend",
   719  				Kind: "connect-native:uuid-fe",
   720  			}, {
   721  				Name: "generator",
   722  				Kind: "connect-native:uuid-api",
   723  			}},
   724  		}, {
   725  			Name: "tg5",
   726  			Tasks: []*Task{{
   727  				Name: "t1000",
   728  				Kind: "connect-terminating:t1000",
   729  			}},
   730  		}},
   731  	}
   732  
   733  	connectTasks := j0.ConnectTasks()
   734  
   735  	exp := []TaskKind{
   736  		NewTaskKind(ConnectProxyPrefix, "task1"),
   737  		NewTaskKind(ConnectProxyPrefix, "task3"),
   738  		NewTaskKind(ConnectProxyPrefix, "task2"),
   739  		NewTaskKind(ConnectIngressPrefix, "ingress"),
   740  		NewTaskKind(ConnectNativePrefix, "uuid-fe"),
   741  		NewTaskKind(ConnectNativePrefix, "uuid-api"),
   742  		NewTaskKind(ConnectTerminatingPrefix, "t1000"),
   743  	}
   744  
   745  	r.Equal(exp, connectTasks)
   746  }
   747  
   748  func TestJob_RequiredSignals(t *testing.T) {
   749  	ci.Parallel(t)
   750  
   751  	j0 := &Job{}
   752  	e0 := make(map[string]map[string][]string, 0)
   753  
   754  	vj1 := &Vault{
   755  		Policies:   []string{"p1"},
   756  		ChangeMode: VaultChangeModeNoop,
   757  	}
   758  	vj2 := &Vault{
   759  		Policies:     []string{"p1"},
   760  		ChangeMode:   VaultChangeModeSignal,
   761  		ChangeSignal: "SIGUSR1",
   762  	}
   763  	tj1 := &Template{
   764  		SourcePath: "foo",
   765  		DestPath:   "bar",
   766  		ChangeMode: TemplateChangeModeNoop,
   767  	}
   768  	tj2 := &Template{
   769  		SourcePath:   "foo",
   770  		DestPath:     "bar",
   771  		ChangeMode:   TemplateChangeModeSignal,
   772  		ChangeSignal: "SIGUSR2",
   773  	}
   774  	j1 := &Job{
   775  		TaskGroups: []*TaskGroup{
   776  			{
   777  				Name: "foo",
   778  				Tasks: []*Task{
   779  					{
   780  						Name: "t1",
   781  					},
   782  					{
   783  						Name:      "t2",
   784  						Vault:     vj2,
   785  						Templates: []*Template{tj2},
   786  					},
   787  				},
   788  			},
   789  			{
   790  				Name: "bar",
   791  				Tasks: []*Task{
   792  					{
   793  						Name:      "t3",
   794  						Vault:     vj1,
   795  						Templates: []*Template{tj1},
   796  					},
   797  					{
   798  						Name:  "t4",
   799  						Vault: vj2,
   800  					},
   801  				},
   802  			},
   803  		},
   804  	}
   805  
   806  	e1 := map[string]map[string][]string{
   807  		"foo": {
   808  			"t2": {"SIGUSR1", "SIGUSR2"},
   809  		},
   810  		"bar": {
   811  			"t4": {"SIGUSR1"},
   812  		},
   813  	}
   814  
   815  	j2 := &Job{
   816  		TaskGroups: []*TaskGroup{
   817  			{
   818  				Name: "foo",
   819  				Tasks: []*Task{
   820  					{
   821  						Name:       "t1",
   822  						KillSignal: "SIGQUIT",
   823  					},
   824  				},
   825  			},
   826  		},
   827  	}
   828  
   829  	e2 := map[string]map[string][]string{
   830  		"foo": {
   831  			"t1": {"SIGQUIT"},
   832  		},
   833  	}
   834  
   835  	cases := []struct {
   836  		Job      *Job
   837  		Expected map[string]map[string][]string
   838  	}{
   839  		{
   840  			Job:      j0,
   841  			Expected: e0,
   842  		},
   843  		{
   844  			Job:      j1,
   845  			Expected: e1,
   846  		},
   847  		{
   848  			Job:      j2,
   849  			Expected: e2,
   850  		},
   851  	}
   852  
   853  	for i, c := range cases {
   854  		got := c.Job.RequiredSignals()
   855  		if !reflect.DeepEqual(got, c.Expected) {
   856  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   857  		}
   858  	}
   859  }
   860  
   861  // test new Equal comparisons for components of Jobs
   862  func TestJob_PartEqual(t *testing.T) {
   863  	ci.Parallel(t)
   864  
   865  	ns := &Networks{}
   866  	require.True(t, ns.Equal(&Networks{}))
   867  
   868  	ns = &Networks{
   869  		&NetworkResource{Device: "eth0"},
   870  	}
   871  	require.True(t, ns.Equal(&Networks{
   872  		&NetworkResource{Device: "eth0"},
   873  	}))
   874  
   875  	ns = &Networks{
   876  		&NetworkResource{Device: "eth0"},
   877  		&NetworkResource{Device: "eth1"},
   878  		&NetworkResource{Device: "eth2"},
   879  	}
   880  	require.True(t, ns.Equal(&Networks{
   881  		&NetworkResource{Device: "eth2"},
   882  		&NetworkResource{Device: "eth0"},
   883  		&NetworkResource{Device: "eth1"},
   884  	}))
   885  
   886  	cs := &Constraints{
   887  		&Constraint{"left0", "right0", "="},
   888  		&Constraint{"left1", "right1", "="},
   889  		&Constraint{"left2", "right2", "="},
   890  	}
   891  	require.True(t, cs.Equal(&Constraints{
   892  		&Constraint{"left0", "right0", "="},
   893  		&Constraint{"left2", "right2", "="},
   894  		&Constraint{"left1", "right1", "="},
   895  	}))
   896  
   897  	as := &Affinities{
   898  		&Affinity{"left0", "right0", "=", 0},
   899  		&Affinity{"left1", "right1", "=", 0},
   900  		&Affinity{"left2", "right2", "=", 0},
   901  	}
   902  	require.True(t, as.Equal(&Affinities{
   903  		&Affinity{"left0", "right0", "=", 0},
   904  		&Affinity{"left2", "right2", "=", 0},
   905  		&Affinity{"left1", "right1", "=", 0},
   906  	}))
   907  }
   908  
   909  func TestTask_UsesConnect(t *testing.T) {
   910  	ci.Parallel(t)
   911  
   912  	t.Run("normal task", func(t *testing.T) {
   913  		task := testJob().TaskGroups[0].Tasks[0]
   914  		usesConnect := task.UsesConnect()
   915  		require.False(t, usesConnect)
   916  	})
   917  
   918  	t.Run("sidecar proxy", func(t *testing.T) {
   919  		task := &Task{
   920  			Name: "connect-proxy-task1",
   921  			Kind: NewTaskKind(ConnectProxyPrefix, "task1"),
   922  		}
   923  		usesConnect := task.UsesConnect()
   924  		require.True(t, usesConnect)
   925  	})
   926  
   927  	t.Run("native task", func(t *testing.T) {
   928  		task := &Task{
   929  			Name: "task1",
   930  			Kind: NewTaskKind(ConnectNativePrefix, "task1"),
   931  		}
   932  		usesConnect := task.UsesConnect()
   933  		require.True(t, usesConnect)
   934  	})
   935  
   936  	t.Run("ingress gateway", func(t *testing.T) {
   937  		task := &Task{
   938  			Name: "task1",
   939  			Kind: NewTaskKind(ConnectIngressPrefix, "task1"),
   940  		}
   941  		usesConnect := task.UsesConnect()
   942  		require.True(t, usesConnect)
   943  	})
   944  
   945  	t.Run("terminating gateway", func(t *testing.T) {
   946  		task := &Task{
   947  			Name: "task1",
   948  			Kind: NewTaskKind(ConnectTerminatingPrefix, "task1"),
   949  		}
   950  		usesConnect := task.UsesConnect()
   951  		require.True(t, usesConnect)
   952  	})
   953  }
   954  
   955  func TestTaskGroup_UsesConnect(t *testing.T) {
   956  	ci.Parallel(t)
   957  
   958  	try := func(t *testing.T, tg *TaskGroup, exp bool) {
   959  		result := tg.UsesConnect()
   960  		require.Equal(t, exp, result)
   961  	}
   962  
   963  	t.Run("tg uses native", func(t *testing.T) {
   964  		try(t, &TaskGroup{
   965  			Services: []*Service{
   966  				{Connect: nil},
   967  				{Connect: &ConsulConnect{Native: true}},
   968  			},
   969  		}, true)
   970  	})
   971  
   972  	t.Run("tg uses sidecar", func(t *testing.T) {
   973  		try(t, &TaskGroup{
   974  			Services: []*Service{{
   975  				Connect: &ConsulConnect{
   976  					SidecarService: &ConsulSidecarService{
   977  						Port: "9090",
   978  					},
   979  				},
   980  			}},
   981  		}, true)
   982  	})
   983  
   984  	t.Run("tg uses gateway", func(t *testing.T) {
   985  		try(t, &TaskGroup{
   986  			Services: []*Service{{
   987  				Connect: &ConsulConnect{
   988  					Gateway: consulIngressGateway1,
   989  				},
   990  			}},
   991  		}, true)
   992  	})
   993  
   994  	t.Run("tg does not use connect", func(t *testing.T) {
   995  		try(t, &TaskGroup{
   996  			Services: []*Service{
   997  				{Connect: nil},
   998  			},
   999  		}, false)
  1000  	})
  1001  }
  1002  
  1003  func TestTaskGroup_Validate(t *testing.T) {
  1004  	ci.Parallel(t)
  1005  
  1006  	j := testJob()
  1007  	tg := &TaskGroup{
  1008  		Count: -1,
  1009  		RestartPolicy: &RestartPolicy{
  1010  			Interval: 5 * time.Minute,
  1011  			Delay:    10 * time.Second,
  1012  			Attempts: 10,
  1013  			Mode:     RestartPolicyModeDelay,
  1014  		},
  1015  		ReschedulePolicy: &ReschedulePolicy{
  1016  			Interval: 5 * time.Minute,
  1017  			Attempts: 5,
  1018  			Delay:    5 * time.Second,
  1019  		},
  1020  	}
  1021  	err := tg.Validate(j)
  1022  	requireErrors(t, err,
  1023  		"group name",
  1024  		"count can't be negative",
  1025  		"Missing tasks",
  1026  	)
  1027  
  1028  	tg = &TaskGroup{
  1029  		Tasks: []*Task{
  1030  			{
  1031  				Name: "task-a",
  1032  				Resources: &Resources{
  1033  					Networks: []*NetworkResource{
  1034  						{
  1035  							ReservedPorts: []Port{{Label: "foo", Value: 123}},
  1036  						},
  1037  					},
  1038  				},
  1039  			},
  1040  			{
  1041  				Name: "task-b",
  1042  				Resources: &Resources{
  1043  					Networks: []*NetworkResource{
  1044  						{
  1045  							ReservedPorts: []Port{{Label: "foo", Value: 123}},
  1046  						},
  1047  					},
  1048  				},
  1049  			},
  1050  		},
  1051  	}
  1052  	err = tg.Validate(&Job{})
  1053  	expected := `Static port 123 already reserved by task-a:foo`
  1054  	if !strings.Contains(err.Error(), expected) {
  1055  		t.Errorf("expected %s but found: %v", expected, err)
  1056  	}
  1057  
  1058  	tg = &TaskGroup{
  1059  		Tasks: []*Task{
  1060  			{
  1061  				Name: "task-a",
  1062  				Resources: &Resources{
  1063  					Networks: []*NetworkResource{
  1064  						{
  1065  							ReservedPorts: []Port{
  1066  								{Label: "foo", Value: 123},
  1067  								{Label: "bar", Value: 123},
  1068  							},
  1069  						},
  1070  					},
  1071  				},
  1072  			},
  1073  		},
  1074  	}
  1075  	err = tg.Validate(&Job{})
  1076  	expected = `Static port 123 already reserved by task-a:foo`
  1077  	if !strings.Contains(err.Error(), expected) {
  1078  		t.Errorf("expected %s but found: %v", expected, err)
  1079  	}
  1080  
  1081  	tg = &TaskGroup{
  1082  		Name:  "web",
  1083  		Count: 1,
  1084  		Tasks: []*Task{
  1085  			{Name: "web", Leader: true},
  1086  			{Name: "web", Leader: true},
  1087  			{},
  1088  		},
  1089  		RestartPolicy: &RestartPolicy{
  1090  			Interval: 5 * time.Minute,
  1091  			Delay:    10 * time.Second,
  1092  			Attempts: 10,
  1093  			Mode:     RestartPolicyModeDelay,
  1094  		},
  1095  		ReschedulePolicy: &ReschedulePolicy{
  1096  			Interval:      5 * time.Minute,
  1097  			Attempts:      10,
  1098  			Delay:         5 * time.Second,
  1099  			DelayFunction: "constant",
  1100  		},
  1101  	}
  1102  
  1103  	err = tg.Validate(j)
  1104  	requireErrors(t, err,
  1105  		"should have an ephemeral disk object",
  1106  		"2 redefines 'web' from task 1",
  1107  		"Task 3 missing name",
  1108  		"Only one task may be marked as leader",
  1109  		"Task web validation failed",
  1110  	)
  1111  
  1112  	tg = &TaskGroup{
  1113  		Name:  "web",
  1114  		Count: 1,
  1115  		Tasks: []*Task{
  1116  			{Name: "web", Leader: true},
  1117  		},
  1118  		Update: DefaultUpdateStrategy.Copy(),
  1119  	}
  1120  	j.Type = JobTypeBatch
  1121  	err = tg.Validate(j)
  1122  	require.Error(t, err, "does not allow update block")
  1123  
  1124  	tg = &TaskGroup{
  1125  		Count: -1,
  1126  		RestartPolicy: &RestartPolicy{
  1127  			Interval: 5 * time.Minute,
  1128  			Delay:    10 * time.Second,
  1129  			Attempts: 10,
  1130  			Mode:     RestartPolicyModeDelay,
  1131  		},
  1132  		ReschedulePolicy: &ReschedulePolicy{
  1133  			Interval: 5 * time.Minute,
  1134  			Attempts: 5,
  1135  			Delay:    5 * time.Second,
  1136  		},
  1137  	}
  1138  	j.Type = JobTypeSystem
  1139  	err = tg.Validate(j)
  1140  	if !strings.Contains(err.Error(), "System jobs should not have a reschedule policy") {
  1141  		t.Fatalf("err: %s", err)
  1142  	}
  1143  
  1144  	tg = &TaskGroup{
  1145  		Networks: []*NetworkResource{
  1146  			{
  1147  				DynamicPorts: []Port{{"http", 0, 80, ""}},
  1148  			},
  1149  		},
  1150  		Tasks: []*Task{
  1151  			{
  1152  				Resources: &Resources{
  1153  					Networks: []*NetworkResource{
  1154  						{
  1155  							DynamicPorts: []Port{{"http", 0, 80, ""}},
  1156  						},
  1157  					},
  1158  				},
  1159  			},
  1160  		},
  1161  	}
  1162  	err = tg.Validate(j)
  1163  	require.Contains(t, err.Error(), "Port label http already in use")
  1164  
  1165  	tg = &TaskGroup{
  1166  		Volumes: map[string]*VolumeRequest{
  1167  			"foo": {
  1168  				Type:   "nothost",
  1169  				Source: "foo",
  1170  			},
  1171  		},
  1172  		Tasks: []*Task{
  1173  			{
  1174  				Name:      "task-a",
  1175  				Resources: &Resources{},
  1176  			},
  1177  		},
  1178  	}
  1179  	err = tg.Validate(&Job{})
  1180  	require.Contains(t, err.Error(), `volume has unrecognized type nothost`)
  1181  
  1182  	tg = &TaskGroup{
  1183  		Volumes: map[string]*VolumeRequest{
  1184  			"foo": {
  1185  				Type: "host",
  1186  			},
  1187  		},
  1188  		Tasks: []*Task{
  1189  			{
  1190  				Name:      "task-a",
  1191  				Resources: &Resources{},
  1192  			},
  1193  		},
  1194  	}
  1195  	err = tg.Validate(&Job{})
  1196  	require.Contains(t, err.Error(), `volume has an empty source`)
  1197  
  1198  	tg = &TaskGroup{
  1199  		Name: "group-a",
  1200  		Update: &UpdateStrategy{
  1201  			Canary: 1,
  1202  		},
  1203  		Volumes: map[string]*VolumeRequest{
  1204  			"foo": {
  1205  				Type:     "csi",
  1206  				PerAlloc: true,
  1207  			},
  1208  		},
  1209  		Tasks: []*Task{
  1210  			{
  1211  				Name:      "task-a",
  1212  				Resources: &Resources{},
  1213  			},
  1214  		},
  1215  	}
  1216  	err = tg.Validate(&Job{})
  1217  	require.Contains(t, err.Error(), `volume has an empty source`)
  1218  	require.Contains(t, err.Error(), `volume cannot be per_alloc when canaries are in use`)
  1219  	require.Contains(t, err.Error(), `CSI volumes must have an attachment mode`)
  1220  	require.Contains(t, err.Error(), `CSI volumes must have an access mode`)
  1221  
  1222  	tg = &TaskGroup{
  1223  		Volumes: map[string]*VolumeRequest{
  1224  			"foo": {
  1225  				Type: "host",
  1226  			},
  1227  		},
  1228  		Tasks: []*Task{
  1229  			{
  1230  				Name:      "task-a",
  1231  				Resources: &Resources{},
  1232  				VolumeMounts: []*VolumeMount{
  1233  					{
  1234  						Volume: "",
  1235  					},
  1236  				},
  1237  			},
  1238  			{
  1239  				Name:      "task-b",
  1240  				Resources: &Resources{},
  1241  				VolumeMounts: []*VolumeMount{
  1242  					{
  1243  						Volume: "foob",
  1244  					},
  1245  				},
  1246  			},
  1247  		},
  1248  	}
  1249  	err = tg.Validate(&Job{})
  1250  	expected = `Task task-a has a volume mount (0) referencing an empty volume`
  1251  	require.Contains(t, err.Error(), expected)
  1252  
  1253  	expected = `Task task-b has a volume mount (0) referencing undefined volume foob`
  1254  	require.Contains(t, err.Error(), expected)
  1255  
  1256  	taskA := &Task{Name: "task-a"}
  1257  	tg = &TaskGroup{
  1258  		Name: "group-a",
  1259  		Services: []*Service{
  1260  			{
  1261  				Name:     "service-a",
  1262  				Provider: "consul",
  1263  				Checks: []*ServiceCheck{
  1264  					{
  1265  						Name:      "check-a",
  1266  						Type:      "tcp",
  1267  						TaskName:  "task-b",
  1268  						PortLabel: "http",
  1269  						Interval:  time.Duration(1 * time.Second),
  1270  						Timeout:   time.Duration(1 * time.Second),
  1271  					},
  1272  				},
  1273  			},
  1274  		},
  1275  		Tasks: []*Task{taskA},
  1276  	}
  1277  	err = tg.Validate(&Job{})
  1278  	expected = `Check check-a invalid: refers to non-existent task task-b`
  1279  	require.Contains(t, err.Error(), expected)
  1280  
  1281  	tg = &TaskGroup{
  1282  		Name: "group-a",
  1283  		Services: []*Service{
  1284  			{
  1285  				Name:     "service-a",
  1286  				Provider: "nomad",
  1287  			},
  1288  			{
  1289  				Name:     "service-b",
  1290  				Provider: "consul",
  1291  			},
  1292  		},
  1293  		Tasks: []*Task{{Name: "task-a"}},
  1294  	}
  1295  	err = tg.Validate(&Job{})
  1296  	expected = "Multiple service providers used: task group services must use the same provider"
  1297  	require.Contains(t, err.Error(), expected)
  1298  
  1299  	tg = &TaskGroup{
  1300  		Name: "group-a",
  1301  		Services: []*Service{
  1302  			{
  1303  				Name:     "service-a",
  1304  				Provider: "nomad",
  1305  			},
  1306  		},
  1307  		Tasks: []*Task{
  1308  			{
  1309  				Name: "task-a",
  1310  				Services: []*Service{
  1311  					{
  1312  						Name:     "service-b",
  1313  						Provider: "consul",
  1314  					},
  1315  				},
  1316  			},
  1317  		},
  1318  	}
  1319  	err = tg.Validate(&Job{})
  1320  	expected = "Multiple service providers used: task group services must use the same provider"
  1321  	require.Contains(t, err.Error(), expected)
  1322  }
  1323  
  1324  func TestTaskGroupNetwork_Validate(t *testing.T) {
  1325  	ci.Parallel(t)
  1326  
  1327  	cases := []struct {
  1328  		TG          *TaskGroup
  1329  		ErrContains string
  1330  	}{
  1331  		{
  1332  			TG: &TaskGroup{
  1333  				Name: "group-static-value-ok",
  1334  				Networks: Networks{
  1335  					&NetworkResource{
  1336  						ReservedPorts: []Port{
  1337  							{
  1338  								Label: "ok",
  1339  								Value: 65535,
  1340  							},
  1341  						},
  1342  					},
  1343  				},
  1344  			},
  1345  		},
  1346  		{
  1347  			TG: &TaskGroup{
  1348  				Name: "group-dynamic-value-ok",
  1349  				Networks: Networks{
  1350  					&NetworkResource{
  1351  						DynamicPorts: []Port{
  1352  							{
  1353  								Label: "ok",
  1354  								Value: 65535,
  1355  							},
  1356  						},
  1357  					},
  1358  				},
  1359  			},
  1360  		},
  1361  		{
  1362  			TG: &TaskGroup{
  1363  				Name: "group-static-to-ok",
  1364  				Networks: Networks{
  1365  					&NetworkResource{
  1366  						ReservedPorts: []Port{
  1367  							{
  1368  								Label: "ok",
  1369  								To:    65535,
  1370  							},
  1371  						},
  1372  					},
  1373  				},
  1374  			},
  1375  		},
  1376  		{
  1377  			TG: &TaskGroup{
  1378  				Name: "group-dynamic-to-ok",
  1379  				Networks: Networks{
  1380  					&NetworkResource{
  1381  						DynamicPorts: []Port{
  1382  							{
  1383  								Label: "ok",
  1384  								To:    65535,
  1385  							},
  1386  						},
  1387  					},
  1388  				},
  1389  			},
  1390  		},
  1391  		{
  1392  			TG: &TaskGroup{
  1393  				Name: "group-static-value-too-high",
  1394  				Networks: Networks{
  1395  					&NetworkResource{
  1396  						ReservedPorts: []Port{
  1397  							{
  1398  								Label: "too-high",
  1399  								Value: 65536,
  1400  							},
  1401  						},
  1402  					},
  1403  				},
  1404  			},
  1405  			ErrContains: "greater than",
  1406  		},
  1407  		{
  1408  			TG: &TaskGroup{
  1409  				Name: "group-dynamic-value-too-high",
  1410  				Networks: Networks{
  1411  					&NetworkResource{
  1412  						DynamicPorts: []Port{
  1413  							{
  1414  								Label: "too-high",
  1415  								Value: 65536,
  1416  							},
  1417  						},
  1418  					},
  1419  				},
  1420  			},
  1421  			ErrContains: "greater than",
  1422  		},
  1423  		{
  1424  			TG: &TaskGroup{
  1425  				Name: "group-static-to-too-high",
  1426  				Networks: Networks{
  1427  					&NetworkResource{
  1428  						ReservedPorts: []Port{
  1429  							{
  1430  								Label: "too-high",
  1431  								To:    65536,
  1432  							},
  1433  						},
  1434  					},
  1435  				},
  1436  			},
  1437  			ErrContains: "greater than",
  1438  		},
  1439  		{
  1440  			TG: &TaskGroup{
  1441  				Name: "group-dynamic-to-too-high",
  1442  				Networks: Networks{
  1443  					&NetworkResource{
  1444  						DynamicPorts: []Port{
  1445  							{
  1446  								Label: "too-high",
  1447  								To:    65536,
  1448  							},
  1449  						},
  1450  					},
  1451  				},
  1452  			},
  1453  			ErrContains: "greater than",
  1454  		},
  1455  		{
  1456  			TG: &TaskGroup{
  1457  				Name: "group-same-static-port-different-host_network",
  1458  				Networks: Networks{
  1459  					&NetworkResource{
  1460  						ReservedPorts: []Port{
  1461  							{
  1462  								Label:       "net1_http",
  1463  								Value:       80,
  1464  								HostNetwork: "net1",
  1465  							},
  1466  							{
  1467  								Label:       "net2_http",
  1468  								Value:       80,
  1469  								HostNetwork: "net2",
  1470  							},
  1471  						},
  1472  					},
  1473  				},
  1474  			},
  1475  		},
  1476  		{
  1477  			TG: &TaskGroup{
  1478  				Name: "mixing-group-task-ports",
  1479  				Networks: Networks{
  1480  					&NetworkResource{
  1481  						ReservedPorts: []Port{
  1482  							{
  1483  								Label: "group_http",
  1484  								Value: 80,
  1485  							},
  1486  						},
  1487  					},
  1488  				},
  1489  				Tasks: []*Task{
  1490  					{
  1491  						Name: "task1",
  1492  						Resources: &Resources{
  1493  							Networks: Networks{
  1494  								&NetworkResource{
  1495  									ReservedPorts: []Port{
  1496  										{
  1497  											Label: "task_http",
  1498  											Value: 80,
  1499  										},
  1500  									},
  1501  								},
  1502  							},
  1503  						},
  1504  					},
  1505  				},
  1506  			},
  1507  			ErrContains: "already reserved by",
  1508  		},
  1509  		{
  1510  			TG: &TaskGroup{
  1511  				Name: "mixing-group-task-ports-with-host_network",
  1512  				Networks: Networks{
  1513  					&NetworkResource{
  1514  						ReservedPorts: []Port{
  1515  							{
  1516  								Label:       "group_http",
  1517  								Value:       80,
  1518  								HostNetwork: "net1",
  1519  							},
  1520  						},
  1521  					},
  1522  				},
  1523  				Tasks: []*Task{
  1524  					{
  1525  						Name: "task1",
  1526  						Resources: &Resources{
  1527  							Networks: Networks{
  1528  								&NetworkResource{
  1529  									ReservedPorts: []Port{
  1530  										{
  1531  											Label: "task_http",
  1532  											Value: 80,
  1533  										},
  1534  									},
  1535  								},
  1536  							},
  1537  						},
  1538  					},
  1539  				},
  1540  			},
  1541  		},
  1542  		{
  1543  			TG: &TaskGroup{
  1544  				Tasks: []*Task{
  1545  					{Driver: "docker"},
  1546  				},
  1547  				Networks: []*NetworkResource{
  1548  					{
  1549  						Mode:     "bridge",
  1550  						Hostname: "foobar",
  1551  					},
  1552  				},
  1553  			},
  1554  		},
  1555  		{
  1556  			TG: &TaskGroup{
  1557  				Tasks: []*Task{
  1558  					{Name: "hostname-invalid-dns-name"},
  1559  				},
  1560  				Networks: []*NetworkResource{
  1561  					{
  1562  						Mode:     "bridge",
  1563  						Hostname: "............",
  1564  					},
  1565  				},
  1566  			},
  1567  			ErrContains: "Hostname is not a valid DNS name",
  1568  		},
  1569  	}
  1570  
  1571  	for i := range cases {
  1572  		tc := cases[i]
  1573  		t.Run(tc.TG.Name, func(t *testing.T) {
  1574  			err := tc.TG.validateNetworks()
  1575  			t.Logf("%s -> %v", tc.TG.Name, err)
  1576  			if tc.ErrContains == "" {
  1577  				require.NoError(t, err)
  1578  				return
  1579  			}
  1580  
  1581  			require.Error(t, err)
  1582  			require.Contains(t, err.Error(), tc.ErrContains)
  1583  		})
  1584  	}
  1585  }
  1586  
  1587  func TestTask_Validate(t *testing.T) {
  1588  	ci.Parallel(t)
  1589  
  1590  	task := &Task{}
  1591  	ephemeralDisk := DefaultEphemeralDisk()
  1592  	err := task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
  1593  	requireErrors(t, err,
  1594  		"task name",
  1595  		"task driver",
  1596  		"task resources",
  1597  	)
  1598  
  1599  	task = &Task{Name: "web/foo"}
  1600  	err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
  1601  	require.Error(t, err, "slashes")
  1602  
  1603  	task = &Task{
  1604  		Name:   "web",
  1605  		Driver: "docker",
  1606  		Resources: &Resources{
  1607  			CPU:      100,
  1608  			MemoryMB: 100,
  1609  		},
  1610  		LogConfig: DefaultLogConfig(),
  1611  	}
  1612  	ephemeralDisk.SizeMB = 200
  1613  	err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
  1614  	if err != nil {
  1615  		t.Fatalf("err: %s", err)
  1616  	}
  1617  
  1618  	task.Constraints = append(task.Constraints,
  1619  		&Constraint{
  1620  			Operand: ConstraintDistinctHosts,
  1621  		},
  1622  		&Constraint{
  1623  			Operand: ConstraintDistinctProperty,
  1624  			LTarget: "${meta.rack}",
  1625  		})
  1626  
  1627  	err = task.Validate(ephemeralDisk, JobTypeBatch, nil, nil)
  1628  	requireErrors(t, err,
  1629  		"task level: distinct_hosts",
  1630  		"task level: distinct_property",
  1631  	)
  1632  }
  1633  
  1634  func TestTask_Validate_Resources(t *testing.T) {
  1635  	ci.Parallel(t)
  1636  
  1637  	cases := []struct {
  1638  		name string
  1639  		res  *Resources
  1640  		err  string
  1641  	}{
  1642  		{
  1643  			name: "Minimum",
  1644  			res:  MinResources(),
  1645  		},
  1646  		{
  1647  			name: "Default",
  1648  			res:  DefaultResources(),
  1649  		},
  1650  		{
  1651  			name: "Full",
  1652  			res: &Resources{
  1653  				CPU:         1000,
  1654  				MemoryMB:    1000,
  1655  				MemoryMaxMB: 2000,
  1656  				IOPS:        1000,
  1657  				Networks: []*NetworkResource{
  1658  					{
  1659  						Mode:   "host",
  1660  						Device: "localhost",
  1661  						CIDR:   "127.0.0.0/8",
  1662  						IP:     "127.0.0.1",
  1663  						MBits:  1000,
  1664  						DNS: &DNSConfig{
  1665  							Servers:  []string{"localhost"},
  1666  							Searches: []string{"localdomain"},
  1667  							Options:  []string{"ndots:5"},
  1668  						},
  1669  						ReservedPorts: []Port{
  1670  							{
  1671  								Label:       "reserved",
  1672  								Value:       1234,
  1673  								To:          1234,
  1674  								HostNetwork: "loopback",
  1675  							},
  1676  						},
  1677  						DynamicPorts: []Port{
  1678  							{
  1679  								Label:       "dynamic",
  1680  								Value:       5678,
  1681  								To:          5678,
  1682  								HostNetwork: "loopback",
  1683  							},
  1684  						},
  1685  					},
  1686  				},
  1687  			},
  1688  		},
  1689  		{
  1690  			name: "too little cpu",
  1691  			res: &Resources{
  1692  				CPU:      0,
  1693  				MemoryMB: 200,
  1694  			},
  1695  			err: "minimum CPU value is 1",
  1696  		},
  1697  		{
  1698  			name: "too little memory",
  1699  			res: &Resources{
  1700  				CPU:      100,
  1701  				MemoryMB: 1,
  1702  			},
  1703  			err: "minimum MemoryMB value is 10; got 1",
  1704  		},
  1705  		{
  1706  			name: "too little memory max",
  1707  			res: &Resources{
  1708  				CPU:         100,
  1709  				MemoryMB:    200,
  1710  				MemoryMaxMB: 10,
  1711  			},
  1712  			err: "MemoryMaxMB value (10) should be larger than MemoryMB value (200",
  1713  		},
  1714  	}
  1715  
  1716  	for i := range cases {
  1717  		tc := cases[i]
  1718  		t.Run(tc.name, func(t *testing.T) {
  1719  			err := tc.res.Validate()
  1720  			if tc.err == "" {
  1721  				require.NoError(t, err)
  1722  			} else {
  1723  				require.Error(t, err)
  1724  				require.Contains(t, err.Error(), tc.err)
  1725  			}
  1726  		})
  1727  	}
  1728  }
  1729  
  1730  func TestNetworkResource_Copy(t *testing.T) {
  1731  	ci.Parallel(t)
  1732  
  1733  	testCases := []struct {
  1734  		inputNetworkResource *NetworkResource
  1735  		name                 string
  1736  	}{
  1737  		{
  1738  			inputNetworkResource: nil,
  1739  			name:                 "nil input check",
  1740  		},
  1741  		{
  1742  			inputNetworkResource: &NetworkResource{
  1743  				Mode:     "bridge",
  1744  				Device:   "eth0",
  1745  				CIDR:     "10.0.0.1/8",
  1746  				IP:       "10.1.1.13",
  1747  				Hostname: "foobar",
  1748  				MBits:    1000,
  1749  				DNS: &DNSConfig{
  1750  					Servers:  []string{"8.8.8.8", "8.8.4.4"},
  1751  					Searches: []string{"example.com"},
  1752  					Options:  []string{"ndot:2"},
  1753  				},
  1754  				ReservedPorts: []Port{
  1755  					{
  1756  						Label:       "foo",
  1757  						Value:       1313,
  1758  						To:          1313,
  1759  						HostNetwork: "private",
  1760  					},
  1761  				},
  1762  				DynamicPorts: []Port{
  1763  					{
  1764  						Label:       "bar",
  1765  						To:          1414,
  1766  						HostNetwork: "public",
  1767  					},
  1768  				},
  1769  			},
  1770  			name: "fully populated input check",
  1771  		},
  1772  	}
  1773  
  1774  	for _, tc := range testCases {
  1775  		t.Run(tc.name, func(t *testing.T) {
  1776  			output := tc.inputNetworkResource.Copy()
  1777  			assert.Equal(t, tc.inputNetworkResource, output, tc.name)
  1778  
  1779  			if output == nil {
  1780  				return
  1781  			}
  1782  
  1783  			// Assert changes to the copy aren't propagated to the
  1784  			// original
  1785  			output.DNS.Servers[1] = "foo"
  1786  			assert.NotEqual(t, tc.inputNetworkResource, output, tc.name)
  1787  		})
  1788  	}
  1789  }
  1790  
  1791  func TestTask_Validate_Services(t *testing.T) {
  1792  	ci.Parallel(t)
  1793  
  1794  	s1 := &Service{
  1795  		Name:      "service-name",
  1796  		Provider:  "consul",
  1797  		PortLabel: "bar",
  1798  		Checks: []*ServiceCheck{
  1799  			{
  1800  				Name:     "check-name",
  1801  				Type:     ServiceCheckTCP,
  1802  				Interval: 0 * time.Second,
  1803  			},
  1804  			{
  1805  				Name:    "check-name",
  1806  				Type:    ServiceCheckTCP,
  1807  				Timeout: 2 * time.Second,
  1808  			},
  1809  			{
  1810  				Name:     "check-name",
  1811  				Type:     ServiceCheckTCP,
  1812  				Interval: 1 * time.Second,
  1813  			},
  1814  		},
  1815  	}
  1816  
  1817  	s2 := &Service{
  1818  		Name:      "service-name",
  1819  		Provider:  "consul",
  1820  		PortLabel: "bar",
  1821  	}
  1822  
  1823  	s3 := &Service{
  1824  		Name:      "service-A",
  1825  		Provider:  "consul",
  1826  		PortLabel: "a",
  1827  	}
  1828  	s4 := &Service{
  1829  		Name:      "service-A",
  1830  		Provider:  "consul",
  1831  		PortLabel: "b",
  1832  	}
  1833  
  1834  	ephemeralDisk := DefaultEphemeralDisk()
  1835  	ephemeralDisk.SizeMB = 200
  1836  	task := &Task{
  1837  		Name:   "web",
  1838  		Driver: "docker",
  1839  		Resources: &Resources{
  1840  			CPU:      100,
  1841  			MemoryMB: 100,
  1842  		},
  1843  		Services: []*Service{s1, s2},
  1844  	}
  1845  
  1846  	task1 := &Task{
  1847  		Name:      "web",
  1848  		Driver:    "docker",
  1849  		Resources: DefaultResources(),
  1850  		Services:  []*Service{s3, s4},
  1851  		LogConfig: DefaultLogConfig(),
  1852  	}
  1853  	tgNetworks := []*NetworkResource{
  1854  		{
  1855  			MBits: 10,
  1856  			DynamicPorts: []Port{
  1857  				{
  1858  					Label: "a",
  1859  					Value: 1000,
  1860  				},
  1861  				{
  1862  					Label: "b",
  1863  					Value: 2000,
  1864  				},
  1865  			},
  1866  		},
  1867  	}
  1868  
  1869  	err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks)
  1870  	if err == nil {
  1871  		t.Fatal("expected an error")
  1872  	}
  1873  
  1874  	if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") {
  1875  		t.Fatalf("err: %v", err)
  1876  	}
  1877  
  1878  	if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") {
  1879  		t.Fatalf("err: %v", err)
  1880  	}
  1881  
  1882  	if !strings.Contains(err.Error(), "missing required value interval") {
  1883  		t.Fatalf("err: %v", err)
  1884  	}
  1885  
  1886  	if !strings.Contains(err.Error(), "cannot be less than") {
  1887  		t.Fatalf("err: %v", err)
  1888  	}
  1889  
  1890  	if err = task1.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks); err != nil {
  1891  		t.Fatalf("err : %v", err)
  1892  	}
  1893  }
  1894  
  1895  func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) {
  1896  	ci.Parallel(t)
  1897  
  1898  	ephemeralDisk := DefaultEphemeralDisk()
  1899  	getTask := func(s *Service) *Task {
  1900  		task := &Task{
  1901  			Name:      "web",
  1902  			Driver:    "docker",
  1903  			Resources: DefaultResources(),
  1904  			Services:  []*Service{s},
  1905  			LogConfig: DefaultLogConfig(),
  1906  		}
  1907  
  1908  		return task
  1909  	}
  1910  	tgNetworks := []*NetworkResource{
  1911  		{
  1912  			DynamicPorts: []Port{
  1913  				{
  1914  					Label: "http",
  1915  					Value: 80,
  1916  				},
  1917  			},
  1918  		},
  1919  	}
  1920  
  1921  	cases := []*Service{
  1922  		{
  1923  			// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
  1924  			Name:        "DriverModeWithLabel",
  1925  			Provider:    "consul",
  1926  			PortLabel:   "http",
  1927  			AddressMode: AddressModeDriver,
  1928  		},
  1929  		{
  1930  			Name:        "DriverModeWithPort",
  1931  			Provider:    "consul",
  1932  			PortLabel:   "80",
  1933  			AddressMode: AddressModeDriver,
  1934  		},
  1935  		{
  1936  			Name:        "HostModeWithLabel",
  1937  			Provider:    "consul",
  1938  			PortLabel:   "http",
  1939  			AddressMode: AddressModeHost,
  1940  		},
  1941  		{
  1942  			Name:        "HostModeWithoutLabel",
  1943  			Provider:    "consul",
  1944  			AddressMode: AddressModeHost,
  1945  		},
  1946  		{
  1947  			Name:        "DriverModeWithoutLabel",
  1948  			Provider:    "consul",
  1949  			AddressMode: AddressModeDriver,
  1950  		},
  1951  	}
  1952  
  1953  	for _, service := range cases {
  1954  		task := getTask(service)
  1955  		t.Run(service.Name, func(t *testing.T) {
  1956  			if err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks); err != nil {
  1957  				t.Fatalf("unexpected err: %v", err)
  1958  			}
  1959  		})
  1960  	}
  1961  }
  1962  
  1963  func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) {
  1964  	ci.Parallel(t)
  1965  
  1966  	ephemeralDisk := DefaultEphemeralDisk()
  1967  	getTask := func(s *Service) *Task {
  1968  		return &Task{
  1969  			Name:      "web",
  1970  			Driver:    "docker",
  1971  			Resources: DefaultResources(),
  1972  			Services:  []*Service{s},
  1973  			LogConfig: DefaultLogConfig(),
  1974  		}
  1975  	}
  1976  	tgNetworks := []*NetworkResource{
  1977  		{
  1978  			DynamicPorts: []Port{
  1979  				{
  1980  					Label: "http",
  1981  					Value: 80,
  1982  				},
  1983  			},
  1984  		},
  1985  	}
  1986  
  1987  	cases := []*Service{
  1988  		{
  1989  			// https://github.com/hashicorp/nomad/issues/3681#issuecomment-357274177
  1990  			Name:        "DriverModeWithLabel",
  1991  			PortLabel:   "asdf",
  1992  			AddressMode: AddressModeDriver,
  1993  		},
  1994  		{
  1995  			Name:        "HostModeWithLabel",
  1996  			PortLabel:   "asdf",
  1997  			AddressMode: AddressModeHost,
  1998  		},
  1999  		{
  2000  			Name:        "HostModeWithPort",
  2001  			PortLabel:   "80",
  2002  			AddressMode: AddressModeHost,
  2003  		},
  2004  	}
  2005  
  2006  	for _, service := range cases {
  2007  		task := getTask(service)
  2008  		t.Run(service.Name, func(t *testing.T) {
  2009  			err := task.Validate(ephemeralDisk, JobTypeService, nil, tgNetworks)
  2010  			if err == nil {
  2011  				t.Fatalf("expected an error")
  2012  			}
  2013  			//t.Logf("err: %v", err)
  2014  		})
  2015  	}
  2016  }
  2017  
  2018  func TestTask_Validate_Service_Check(t *testing.T) {
  2019  	ci.Parallel(t)
  2020  
  2021  	invalidCheck := ServiceCheck{
  2022  		Name:     "check-name",
  2023  		Command:  "/bin/true",
  2024  		Type:     ServiceCheckScript,
  2025  		Interval: 10 * time.Second,
  2026  	}
  2027  
  2028  	err := invalidCheck.validateConsul()
  2029  	if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") {
  2030  		t.Fatalf("expected a timeout validation error but received: %q", err)
  2031  	}
  2032  
  2033  	check1 := ServiceCheck{
  2034  		Name:     "check-name",
  2035  		Type:     ServiceCheckTCP,
  2036  		Interval: 10 * time.Second,
  2037  		Timeout:  2 * time.Second,
  2038  	}
  2039  
  2040  	if err := check1.validateConsul(); err != nil {
  2041  		t.Fatalf("err: %v", err)
  2042  	}
  2043  
  2044  	check1.InitialStatus = "foo"
  2045  	err = check1.validateConsul()
  2046  	if err == nil {
  2047  		t.Fatal("Expected an error")
  2048  	}
  2049  
  2050  	if !strings.Contains(err.Error(), "invalid initial check state (foo)") {
  2051  		t.Fatalf("err: %v", err)
  2052  	}
  2053  
  2054  	check1.InitialStatus = api.HealthCritical
  2055  	err = check1.validateConsul()
  2056  	if err != nil {
  2057  		t.Fatalf("err: %v", err)
  2058  	}
  2059  
  2060  	check1.InitialStatus = api.HealthPassing
  2061  	err = check1.validateConsul()
  2062  	if err != nil {
  2063  		t.Fatalf("err: %v", err)
  2064  	}
  2065  
  2066  	check1.InitialStatus = ""
  2067  	err = check1.validateConsul()
  2068  	if err != nil {
  2069  		t.Fatalf("err: %v", err)
  2070  	}
  2071  
  2072  	check2 := ServiceCheck{
  2073  		Name:     "check-name-2",
  2074  		Type:     ServiceCheckHTTP,
  2075  		Interval: 10 * time.Second,
  2076  		Timeout:  2 * time.Second,
  2077  		Path:     "/foo/bar",
  2078  	}
  2079  
  2080  	err = check2.validateConsul()
  2081  	if err != nil {
  2082  		t.Fatalf("err: %v", err)
  2083  	}
  2084  
  2085  	check2.Path = ""
  2086  	err = check2.validateConsul()
  2087  	if err == nil {
  2088  		t.Fatal("Expected an error")
  2089  	}
  2090  	if !strings.Contains(err.Error(), "http type must have http path") {
  2091  		t.Fatalf("err: %v", err)
  2092  	}
  2093  
  2094  	check2.Path = "http://www.example.com"
  2095  	err = check2.validateConsul()
  2096  	if err == nil {
  2097  		t.Fatal("Expected an error")
  2098  	}
  2099  	if !strings.Contains(err.Error(), "relative http path") {
  2100  		t.Fatalf("err: %v", err)
  2101  	}
  2102  
  2103  	t.Run("check expose", func(t *testing.T) {
  2104  		t.Run("type http", func(t *testing.T) {
  2105  			require.NoError(t, (&ServiceCheck{
  2106  				Type:     ServiceCheckHTTP,
  2107  				Interval: 1 * time.Second,
  2108  				Timeout:  1 * time.Second,
  2109  				Path:     "/health",
  2110  				Expose:   true,
  2111  			}).validateConsul())
  2112  		})
  2113  		t.Run("type tcp", func(t *testing.T) {
  2114  			require.EqualError(t, (&ServiceCheck{
  2115  				Type:     ServiceCheckTCP,
  2116  				Interval: 1 * time.Second,
  2117  				Timeout:  1 * time.Second,
  2118  				Expose:   true,
  2119  			}).validateConsul(), "expose may only be set on HTTP or gRPC checks")
  2120  		})
  2121  	})
  2122  }
  2123  
  2124  // TestTask_Validate_Service_Check_AddressMode asserts that checks do not
  2125  // inherit address mode but do inherit ports.
  2126  func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
  2127  	ci.Parallel(t)
  2128  
  2129  	getTask := func(s *Service) (*Task, *TaskGroup) {
  2130  		return &Task{
  2131  				Services: []*Service{s},
  2132  			}, &TaskGroup{
  2133  				Networks: []*NetworkResource{
  2134  					{
  2135  						DynamicPorts: []Port{
  2136  							{
  2137  								Label: "http",
  2138  								Value: 9999,
  2139  							},
  2140  						},
  2141  					},
  2142  				},
  2143  			}
  2144  	}
  2145  
  2146  	cases := []struct {
  2147  		Service     *Service
  2148  		ErrContains string
  2149  	}{
  2150  		{
  2151  			Service: &Service{
  2152  				Name:        "invalid-driver",
  2153  				Provider:    "consul",
  2154  				PortLabel:   "80",
  2155  				AddressMode: "host",
  2156  			},
  2157  			ErrContains: `port label "80" referenced`,
  2158  		},
  2159  		{
  2160  			Service: &Service{
  2161  				Name:        "http-driver-fail-1",
  2162  				PortLabel:   "80",
  2163  				AddressMode: "driver",
  2164  				Checks: []*ServiceCheck{
  2165  					{
  2166  						Name:     "invalid-check-1",
  2167  						Type:     "tcp",
  2168  						Interval: time.Second,
  2169  						Timeout:  time.Second,
  2170  					},
  2171  				},
  2172  			},
  2173  			ErrContains: `check "invalid-check-1" cannot use a numeric port`,
  2174  		},
  2175  		{
  2176  			Service: &Service{
  2177  				Name:        "http-driver-fail-2",
  2178  				Provider:    "consul",
  2179  				PortLabel:   "80",
  2180  				AddressMode: "driver",
  2181  				Checks: []*ServiceCheck{
  2182  					{
  2183  						Name:      "invalid-check-2",
  2184  						Type:      "tcp",
  2185  						PortLabel: "80",
  2186  						Interval:  time.Second,
  2187  						Timeout:   time.Second,
  2188  					},
  2189  				},
  2190  			},
  2191  			ErrContains: `check "invalid-check-2" cannot use a numeric port`,
  2192  		},
  2193  		{
  2194  			Service: &Service{
  2195  				Name:        "http-driver-fail-3",
  2196  				Provider:    "consul",
  2197  				PortLabel:   "80",
  2198  				AddressMode: "driver",
  2199  				Checks: []*ServiceCheck{
  2200  					{
  2201  						Name:      "invalid-check-3",
  2202  						Type:      "tcp",
  2203  						PortLabel: "missing-port-label",
  2204  						Interval:  time.Second,
  2205  						Timeout:   time.Second,
  2206  					},
  2207  				},
  2208  			},
  2209  			ErrContains: `port label "missing-port-label" referenced`,
  2210  		},
  2211  		{
  2212  			Service: &Service{
  2213  				Name:        "http-driver-passes",
  2214  				Provider:    "consul",
  2215  				PortLabel:   "80",
  2216  				AddressMode: "driver",
  2217  				Checks: []*ServiceCheck{
  2218  					{
  2219  						Name:     "valid-script-check",
  2220  						Type:     "script",
  2221  						Command:  "ok",
  2222  						Interval: time.Second,
  2223  						Timeout:  time.Second,
  2224  					},
  2225  					{
  2226  						Name:      "valid-host-check",
  2227  						Type:      "tcp",
  2228  						PortLabel: "http",
  2229  						Interval:  time.Second,
  2230  						Timeout:   time.Second,
  2231  					},
  2232  					{
  2233  						Name:        "valid-driver-check",
  2234  						Type:        "tcp",
  2235  						AddressMode: "driver",
  2236  						Interval:    time.Second,
  2237  						Timeout:     time.Second,
  2238  					},
  2239  				},
  2240  			},
  2241  		},
  2242  		{
  2243  			Service: &Service{
  2244  				Name:     "empty-address-3673-passes-1",
  2245  				Provider: "consul",
  2246  				Checks: []*ServiceCheck{
  2247  					{
  2248  						Name:      "valid-port-label",
  2249  						Type:      "tcp",
  2250  						PortLabel: "http",
  2251  						Interval:  time.Second,
  2252  						Timeout:   time.Second,
  2253  					},
  2254  					{
  2255  						Name:     "empty-is-ok",
  2256  						Type:     "script",
  2257  						Command:  "ok",
  2258  						Interval: time.Second,
  2259  						Timeout:  time.Second,
  2260  					},
  2261  				},
  2262  			},
  2263  		},
  2264  		{
  2265  			Service: &Service{
  2266  				Name: "empty-address-3673-passes-2",
  2267  			},
  2268  		},
  2269  		{
  2270  			Service: &Service{
  2271  				Name:     "empty-address-3673-fails",
  2272  				Provider: "consul",
  2273  				Checks: []*ServiceCheck{
  2274  					{
  2275  						Name:     "empty-is-not-ok",
  2276  						Type:     "tcp",
  2277  						Interval: time.Second,
  2278  						Timeout:  time.Second,
  2279  					},
  2280  				},
  2281  			},
  2282  			ErrContains: `invalid: check requires a port but neither check nor service`,
  2283  		},
  2284  		{
  2285  			Service: &Service{
  2286  				Name:    "conect-block-on-task-level",
  2287  				Connect: &ConsulConnect{SidecarService: &ConsulSidecarService{}},
  2288  			},
  2289  			ErrContains: `cannot have "connect" block`,
  2290  		},
  2291  	}
  2292  
  2293  	for _, tc := range cases {
  2294  		tc := tc
  2295  		task, tg := getTask(tc.Service)
  2296  		t.Run(tc.Service.Name, func(t *testing.T) {
  2297  			err := validateServices(task, tg.Networks)
  2298  			if err == nil && tc.ErrContains == "" {
  2299  				// Ok!
  2300  				return
  2301  			}
  2302  			if err == nil {
  2303  				t.Fatalf("no error returned. expected: %s", tc.ErrContains)
  2304  			}
  2305  			if !strings.Contains(err.Error(), tc.ErrContains) {
  2306  				t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
  2307  			}
  2308  		})
  2309  	}
  2310  }
  2311  
  2312  func TestTask_Validate_Service_Check_GRPC(t *testing.T) {
  2313  	ci.Parallel(t)
  2314  	// Bad (no port)
  2315  	invalidGRPC := &ServiceCheck{
  2316  		Type:     ServiceCheckGRPC,
  2317  		Interval: time.Second,
  2318  		Timeout:  time.Second,
  2319  	}
  2320  	service := &Service{
  2321  		Name:     "test",
  2322  		Provider: "consul",
  2323  		Checks:   []*ServiceCheck{invalidGRPC},
  2324  	}
  2325  
  2326  	assert.Error(t, service.Validate())
  2327  
  2328  	// Good
  2329  	service.Checks[0] = &ServiceCheck{
  2330  		Type:      ServiceCheckGRPC,
  2331  		Interval:  time.Second,
  2332  		Timeout:   time.Second,
  2333  		PortLabel: "some-port-label",
  2334  	}
  2335  
  2336  	assert.NoError(t, service.Validate())
  2337  }
  2338  
  2339  func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
  2340  	ci.Parallel(t)
  2341  	invalidCheckRestart := &CheckRestart{
  2342  		Limit: -1,
  2343  		Grace: -1,
  2344  	}
  2345  
  2346  	err := invalidCheckRestart.Validate()
  2347  	assert.NotNil(t, err, "invalidateCheckRestart.Validate()")
  2348  	assert.Len(t, err.(*multierror.Error).Errors, 2)
  2349  
  2350  	validCheckRestart := &CheckRestart{}
  2351  	assert.Nil(t, validCheckRestart.Validate())
  2352  
  2353  	validCheckRestart.Limit = 1
  2354  	validCheckRestart.Grace = 1
  2355  	assert.Nil(t, validCheckRestart.Validate())
  2356  }
  2357  
  2358  func TestTask_Validate_ConnectProxyKind(t *testing.T) {
  2359  	ci.Parallel(t)
  2360  
  2361  	ephemeralDisk := DefaultEphemeralDisk()
  2362  	getTask := func(kind TaskKind, leader bool) *Task {
  2363  		task := &Task{
  2364  			Name:      "web",
  2365  			Driver:    "docker",
  2366  			Resources: DefaultResources(),
  2367  			LogConfig: DefaultLogConfig(),
  2368  			Kind:      kind,
  2369  			Leader:    leader,
  2370  		}
  2371  		task.Resources.Networks = []*NetworkResource{
  2372  			{
  2373  				MBits: 10,
  2374  				DynamicPorts: []Port{
  2375  					{
  2376  						Label: "http",
  2377  						Value: 80,
  2378  					},
  2379  				},
  2380  			},
  2381  		}
  2382  		return task
  2383  	}
  2384  
  2385  	cases := []struct {
  2386  		Desc        string
  2387  		Kind        TaskKind
  2388  		Leader      bool
  2389  		Service     *Service
  2390  		TgService   []*Service
  2391  		ErrContains string
  2392  	}{
  2393  		{
  2394  			Desc: "Not connect",
  2395  			Kind: "test",
  2396  		},
  2397  		{
  2398  			Desc: "Invalid because of service in task definition",
  2399  			Kind: "connect-proxy:redis",
  2400  			Service: &Service{
  2401  				Name: "redis",
  2402  			},
  2403  			ErrContains: "Connect proxy task must not have a service stanza",
  2404  		},
  2405  		{
  2406  			Desc:   "Leader should not be set",
  2407  			Kind:   "connect-proxy:redis",
  2408  			Leader: true,
  2409  			Service: &Service{
  2410  				Name: "redis",
  2411  			},
  2412  			ErrContains: "Connect proxy task must not have leader set",
  2413  		},
  2414  		{
  2415  			Desc: "Service name invalid",
  2416  			Kind: "connect-proxy:redis:test",
  2417  			Service: &Service{
  2418  				Name: "redis",
  2419  			},
  2420  			ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`,
  2421  		},
  2422  		{
  2423  			Desc:        "Service name not found in group",
  2424  			Kind:        "connect-proxy:redis",
  2425  			ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
  2426  		},
  2427  		{
  2428  			Desc: "Connect stanza not configured in group",
  2429  			Kind: "connect-proxy:redis",
  2430  			TgService: []*Service{{
  2431  				Name: "redis",
  2432  			}},
  2433  			ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
  2434  		},
  2435  		{
  2436  			Desc: "Valid connect proxy kind",
  2437  			Kind: "connect-proxy:redis",
  2438  			TgService: []*Service{{
  2439  				Name: "redis",
  2440  				Connect: &ConsulConnect{
  2441  					SidecarService: &ConsulSidecarService{
  2442  						Port: "db",
  2443  					},
  2444  				},
  2445  			}},
  2446  		},
  2447  	}
  2448  
  2449  	for _, tc := range cases {
  2450  		tc := tc
  2451  		task := getTask(tc.Kind, tc.Leader)
  2452  		if tc.Service != nil {
  2453  			task.Services = []*Service{tc.Service}
  2454  		}
  2455  		t.Run(tc.Desc, func(t *testing.T) {
  2456  			err := task.Validate(ephemeralDisk, "service", tc.TgService, nil)
  2457  			if err == nil && tc.ErrContains == "" {
  2458  				// Ok!
  2459  				return
  2460  			}
  2461  			require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains)
  2462  			require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err)
  2463  		})
  2464  	}
  2465  
  2466  }
  2467  func TestTask_Validate_LogConfig(t *testing.T) {
  2468  	ci.Parallel(t)
  2469  
  2470  	task := &Task{
  2471  		LogConfig: DefaultLogConfig(),
  2472  	}
  2473  	ephemeralDisk := &EphemeralDisk{
  2474  		SizeMB: 1,
  2475  	}
  2476  
  2477  	err := task.Validate(ephemeralDisk, JobTypeService, nil, nil)
  2478  	require.Error(t, err, "log storage")
  2479  }
  2480  
  2481  func TestLogConfig_Equals(t *testing.T) {
  2482  	ci.Parallel(t)
  2483  
  2484  	t.Run("both nil", func(t *testing.T) {
  2485  		a := (*LogConfig)(nil)
  2486  		b := (*LogConfig)(nil)
  2487  		require.True(t, a.Equal(b))
  2488  	})
  2489  
  2490  	t.Run("one nil", func(t *testing.T) {
  2491  		a := new(LogConfig)
  2492  		b := (*LogConfig)(nil)
  2493  		require.False(t, a.Equal(b))
  2494  	})
  2495  
  2496  	t.Run("max files", func(t *testing.T) {
  2497  		a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2498  		b := &LogConfig{MaxFiles: 2, MaxFileSizeMB: 200}
  2499  		require.False(t, a.Equal(b))
  2500  	})
  2501  
  2502  	t.Run("max file size", func(t *testing.T) {
  2503  		a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 100}
  2504  		b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2505  		require.False(t, a.Equal(b))
  2506  	})
  2507  
  2508  	t.Run("same", func(t *testing.T) {
  2509  		a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2510  		b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2511  		require.True(t, a.Equal(b))
  2512  	})
  2513  }
  2514  
  2515  func TestTask_Validate_CSIPluginConfig(t *testing.T) {
  2516  	ci.Parallel(t)
  2517  
  2518  	table := []struct {
  2519  		name          string
  2520  		pc            *TaskCSIPluginConfig
  2521  		expectedErr   string
  2522  		unexpectedErr string
  2523  	}{
  2524  		{
  2525  			name:          "no errors when not specified",
  2526  			pc:            nil,
  2527  			unexpectedErr: "CSIPluginConfig",
  2528  		},
  2529  		{
  2530  			name:        "requires non-empty plugin id",
  2531  			pc:          &TaskCSIPluginConfig{},
  2532  			expectedErr: "CSIPluginConfig must have a non-empty PluginID",
  2533  		},
  2534  		{
  2535  			name: "requires valid plugin type",
  2536  			pc: &TaskCSIPluginConfig{
  2537  				ID:   "com.hashicorp.csi",
  2538  				Type: "nonsense",
  2539  			},
  2540  			expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"",
  2541  		},
  2542  	}
  2543  
  2544  	for _, tt := range table {
  2545  		t.Run(tt.name, func(t *testing.T) {
  2546  			task := testJob().TaskGroups[0].Tasks[0]
  2547  			task.CSIPluginConfig = tt.pc
  2548  			ephemeralDisk := &EphemeralDisk{
  2549  				SizeMB: 100,
  2550  			}
  2551  
  2552  			err := task.Validate(ephemeralDisk, JobTypeService, nil, nil)
  2553  			if tt.expectedErr != "" {
  2554  				require.Error(t, err)
  2555  				require.Contains(t, err.Error(), tt.expectedErr)
  2556  			} else {
  2557  				require.NoError(t, err)
  2558  			}
  2559  		})
  2560  	}
  2561  }
  2562  
  2563  func TestTask_Validate_Template(t *testing.T) {
  2564  	ci.Parallel(t)
  2565  
  2566  	bad := &Template{}
  2567  	task := &Task{
  2568  		Templates: []*Template{bad},
  2569  	}
  2570  	ephemeralDisk := &EphemeralDisk{
  2571  		SizeMB: 1,
  2572  	}
  2573  
  2574  	err := task.Validate(ephemeralDisk, JobTypeService, nil, nil)
  2575  	if !strings.Contains(err.Error(), "Template 1 validation failed") {
  2576  		t.Fatalf("err: %s", err)
  2577  	}
  2578  
  2579  	// Have two templates that share the same destination
  2580  	good := &Template{
  2581  		SourcePath: "foo",
  2582  		DestPath:   "local/foo",
  2583  		ChangeMode: "noop",
  2584  	}
  2585  
  2586  	task.Templates = []*Template{good, good}
  2587  	err = task.Validate(ephemeralDisk, JobTypeService, nil, nil)
  2588  	if !strings.Contains(err.Error(), "same destination as") {
  2589  		t.Fatalf("err: %s", err)
  2590  	}
  2591  
  2592  	// Env templates can't use signals
  2593  	task.Templates = []*Template{
  2594  		{
  2595  			Envvars:    true,
  2596  			ChangeMode: "signal",
  2597  		},
  2598  	}
  2599  
  2600  	err = task.Validate(ephemeralDisk, JobTypeService, nil, nil)
  2601  	if err == nil {
  2602  		t.Fatalf("expected error from Template.Validate")
  2603  	}
  2604  	if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) {
  2605  		t.Errorf("expected to find %q but found %v", expected, err)
  2606  	}
  2607  }
  2608  
  2609  func TestTemplate_Copy(t *testing.T) {
  2610  	ci.Parallel(t)
  2611  
  2612  	t1 := &Template{
  2613  		SourcePath:   "/local/file.txt",
  2614  		DestPath:     "/local/dest.txt",
  2615  		EmbeddedTmpl: "tpl",
  2616  		ChangeMode:   TemplateChangeModeScript,
  2617  		ChangeScript: &ChangeScript{
  2618  			Command: "/bin/foo",
  2619  			Args:    []string{"--force", "--debug"},
  2620  		},
  2621  		Splay:      10 * time.Second,
  2622  		Perms:      "777",
  2623  		Uid:        pointer.Of(1000),
  2624  		Gid:        pointer.Of(2000),
  2625  		LeftDelim:  "[[",
  2626  		RightDelim: "]]",
  2627  		Envvars:    true,
  2628  		VaultGrace: time.Minute,
  2629  		Wait: &WaitConfig{
  2630  			Min: pointer.Of(time.Second),
  2631  			Max: pointer.Of(time.Minute),
  2632  		},
  2633  	}
  2634  	t2 := t1.Copy()
  2635  
  2636  	t1.SourcePath = "/local/file2.txt"
  2637  	t1.DestPath = "/local/dest2.txt"
  2638  	t1.EmbeddedTmpl = "tpl2"
  2639  	t1.ChangeMode = TemplateChangeModeSignal
  2640  	t1.ChangeScript.Command = "/bin/foobar"
  2641  	t1.ChangeScript.Args = []string{"--forces", "--debugs"}
  2642  	t1.Splay = 5 * time.Second
  2643  	t1.Perms = "700"
  2644  	t1.Uid = pointer.Of(5000)
  2645  	t1.Gid = pointer.Of(6000)
  2646  	t1.LeftDelim = "(("
  2647  	t1.RightDelim = "))"
  2648  	t1.Envvars = false
  2649  	t1.VaultGrace = 2 * time.Minute
  2650  	t1.Wait.Min = pointer.Of(2 * time.Second)
  2651  	t1.Wait.Max = pointer.Of(2 * time.Minute)
  2652  
  2653  	require.NotEqual(t, t1.SourcePath, t2.SourcePath)
  2654  	require.NotEqual(t, t1.DestPath, t2.DestPath)
  2655  	require.NotEqual(t, t1.EmbeddedTmpl, t2.EmbeddedTmpl)
  2656  	require.NotEqual(t, t1.ChangeMode, t2.ChangeMode)
  2657  	require.NotEqual(t, t1.ChangeScript.Command, t2.ChangeScript.Command)
  2658  	require.NotEqual(t, t1.ChangeScript.Args, t2.ChangeScript.Args)
  2659  	require.NotEqual(t, t1.Splay, t2.Splay)
  2660  	require.NotEqual(t, t1.Perms, t2.Perms)
  2661  	require.NotEqual(t, t1.Uid, t2.Uid)
  2662  	require.NotEqual(t, t1.Gid, t2.Gid)
  2663  	require.NotEqual(t, t1.LeftDelim, t2.LeftDelim)
  2664  	require.NotEqual(t, t1.RightDelim, t2.RightDelim)
  2665  	require.NotEqual(t, t1.Envvars, t2.Envvars)
  2666  	require.NotEqual(t, t1.VaultGrace, t2.VaultGrace)
  2667  	require.NotEqual(t, t1.Wait.Min, t2.Wait.Min)
  2668  	require.NotEqual(t, t1.Wait.Max, t2.Wait.Max)
  2669  
  2670  }
  2671  
  2672  func TestTemplate_Validate(t *testing.T) {
  2673  	ci.Parallel(t)
  2674  
  2675  	cases := []struct {
  2676  		Tmpl         *Template
  2677  		Fail         bool
  2678  		ContainsErrs []string
  2679  	}{
  2680  		{
  2681  			Tmpl: &Template{},
  2682  			Fail: true,
  2683  			ContainsErrs: []string{
  2684  				"specify a source path",
  2685  				"specify a destination",
  2686  				TemplateChangeModeInvalidError.Error(),
  2687  			},
  2688  		},
  2689  		{
  2690  			Tmpl: &Template{
  2691  				Splay: -100,
  2692  			},
  2693  			Fail: true,
  2694  			ContainsErrs: []string{
  2695  				"positive splay",
  2696  			},
  2697  		},
  2698  		{
  2699  			Tmpl: &Template{
  2700  				ChangeMode: "foo",
  2701  			},
  2702  			Fail: true,
  2703  			ContainsErrs: []string{
  2704  				TemplateChangeModeInvalidError.Error(),
  2705  			},
  2706  		},
  2707  		{
  2708  			Tmpl: &Template{
  2709  				ChangeMode: "signal",
  2710  			},
  2711  			Fail: true,
  2712  			ContainsErrs: []string{
  2713  				"specify signal value",
  2714  			},
  2715  		},
  2716  		{
  2717  			Tmpl: &Template{
  2718  				SourcePath: "foo",
  2719  				DestPath:   "../../root",
  2720  				ChangeMode: "noop",
  2721  			},
  2722  			Fail: true,
  2723  			ContainsErrs: []string{
  2724  				"destination escapes",
  2725  			},
  2726  		},
  2727  		{
  2728  			Tmpl: &Template{
  2729  				SourcePath: "foo",
  2730  				DestPath:   "local/foo",
  2731  				ChangeMode: "noop",
  2732  			},
  2733  			Fail: false,
  2734  		},
  2735  		{
  2736  			Tmpl: &Template{
  2737  				SourcePath: "foo",
  2738  				DestPath:   "local/foo",
  2739  				ChangeMode: "noop",
  2740  				Perms:      "0444",
  2741  			},
  2742  			Fail: false,
  2743  		},
  2744  		{
  2745  			Tmpl: &Template{
  2746  				SourcePath: "foo",
  2747  				DestPath:   "local/foo",
  2748  				ChangeMode: "noop",
  2749  				Perms:      "zza",
  2750  			},
  2751  			Fail: true,
  2752  			ContainsErrs: []string{
  2753  				"as octal",
  2754  			},
  2755  		},
  2756  		{
  2757  			Tmpl: &Template{
  2758  				SourcePath: "foo",
  2759  				DestPath:   "local/foo",
  2760  				ChangeMode: "noop",
  2761  				Wait: &WaitConfig{
  2762  					Min: pointer.Of(10 * time.Second),
  2763  					Max: pointer.Of(5 * time.Second),
  2764  				},
  2765  			},
  2766  			Fail: true,
  2767  			ContainsErrs: []string{
  2768  				"greater than",
  2769  			},
  2770  		},
  2771  		{
  2772  			Tmpl: &Template{
  2773  				SourcePath: "foo",
  2774  				DestPath:   "local/foo",
  2775  				ChangeMode: "noop",
  2776  				Wait: &WaitConfig{
  2777  					Min: pointer.Of(5 * time.Second),
  2778  					Max: pointer.Of(5 * time.Second),
  2779  				},
  2780  			},
  2781  			Fail: false,
  2782  		},
  2783  		{
  2784  			Tmpl: &Template{
  2785  				SourcePath: "foo",
  2786  				DestPath:   "local/foo",
  2787  				ChangeMode: "noop",
  2788  				Wait: &WaitConfig{
  2789  					Min: pointer.Of(5 * time.Second),
  2790  					Max: pointer.Of(10 * time.Second),
  2791  				},
  2792  			},
  2793  			Fail: false,
  2794  		},
  2795  		{
  2796  			Tmpl: &Template{
  2797  				SourcePath:   "foo",
  2798  				DestPath:     "local/foo",
  2799  				ChangeMode:   "script",
  2800  				ChangeScript: nil,
  2801  			},
  2802  			Fail: true,
  2803  		},
  2804  		{
  2805  			Tmpl: &Template{
  2806  				SourcePath:   "foo",
  2807  				DestPath:     "local/foo",
  2808  				ChangeMode:   "script",
  2809  				ChangeScript: &ChangeScript{Command: ""},
  2810  			},
  2811  			Fail: true,
  2812  		},
  2813  		{
  2814  			Tmpl: &Template{
  2815  				SourcePath:   "foo",
  2816  				DestPath:     "local/foo",
  2817  				ChangeMode:   "script",
  2818  				ChangeScript: &ChangeScript{Command: "/bin/foo"},
  2819  			},
  2820  			Fail: false,
  2821  		},
  2822  	}
  2823  
  2824  	for i, c := range cases {
  2825  		err := c.Tmpl.Validate()
  2826  		if err != nil {
  2827  			if !c.Fail {
  2828  				t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err)
  2829  			}
  2830  
  2831  			e := err.Error()
  2832  			for _, exp := range c.ContainsErrs {
  2833  				if !strings.Contains(e, exp) {
  2834  					t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e)
  2835  				}
  2836  			}
  2837  		} else if c.Fail {
  2838  			t.Fatalf("Case %d: should have failed: %v", i+1, err)
  2839  		}
  2840  	}
  2841  }
  2842  
  2843  func TestTaskWaitConfig_Equals(t *testing.T) {
  2844  	ci.Parallel(t)
  2845  
  2846  	testCases := []struct {
  2847  		name     string
  2848  		config   *WaitConfig
  2849  		expected *WaitConfig
  2850  	}{
  2851  		{
  2852  			name: "all-fields",
  2853  			config: &WaitConfig{
  2854  				Min: pointer.Of(5 * time.Second),
  2855  				Max: pointer.Of(10 * time.Second),
  2856  			},
  2857  			expected: &WaitConfig{
  2858  				Min: pointer.Of(5 * time.Second),
  2859  				Max: pointer.Of(10 * time.Second),
  2860  			},
  2861  		},
  2862  		{
  2863  			name:     "no-fields",
  2864  			config:   &WaitConfig{},
  2865  			expected: &WaitConfig{},
  2866  		},
  2867  		{
  2868  			name: "min-only",
  2869  			config: &WaitConfig{
  2870  				Min: pointer.Of(5 * time.Second),
  2871  			},
  2872  			expected: &WaitConfig{
  2873  				Min: pointer.Of(5 * time.Second),
  2874  			},
  2875  		},
  2876  		{
  2877  			name: "max-only",
  2878  			config: &WaitConfig{
  2879  				Max: pointer.Of(10 * time.Second),
  2880  			},
  2881  			expected: &WaitConfig{
  2882  				Max: pointer.Of(10 * time.Second),
  2883  			},
  2884  		},
  2885  	}
  2886  
  2887  	for _, tc := range testCases {
  2888  		t.Run(tc.name, func(t *testing.T) {
  2889  			require.True(t, tc.config.Equal(tc.expected))
  2890  		})
  2891  	}
  2892  }
  2893  
  2894  func TestConstraint_Validate(t *testing.T) {
  2895  	ci.Parallel(t)
  2896  
  2897  	c := &Constraint{}
  2898  	err := c.Validate()
  2899  	require.Error(t, err, "Missing constraint operand")
  2900  
  2901  	c = &Constraint{
  2902  		LTarget: "$attr.kernel.name",
  2903  		RTarget: "linux",
  2904  		Operand: "=",
  2905  	}
  2906  	err = c.Validate()
  2907  	require.NoError(t, err)
  2908  
  2909  	// Perform additional regexp validation
  2910  	c.Operand = ConstraintRegex
  2911  	c.RTarget = "(foo"
  2912  	err = c.Validate()
  2913  	require.Error(t, err, "missing closing")
  2914  
  2915  	// Perform version validation
  2916  	c.Operand = ConstraintVersion
  2917  	c.RTarget = "~> foo"
  2918  	err = c.Validate()
  2919  	require.Error(t, err, "Malformed constraint")
  2920  
  2921  	// Perform semver validation
  2922  	c.Operand = ConstraintSemver
  2923  	err = c.Validate()
  2924  	require.Error(t, err, "Malformed constraint")
  2925  
  2926  	c.RTarget = ">= 0.6.1"
  2927  	require.NoError(t, c.Validate())
  2928  
  2929  	// Perform distinct_property validation
  2930  	c.Operand = ConstraintDistinctProperty
  2931  	c.RTarget = "0"
  2932  	err = c.Validate()
  2933  	require.Error(t, err, "count of 1 or greater")
  2934  
  2935  	c.RTarget = "-1"
  2936  	err = c.Validate()
  2937  	require.Error(t, err, "to uint64")
  2938  
  2939  	// Perform distinct_hosts validation
  2940  	c.Operand = ConstraintDistinctHosts
  2941  	c.LTarget = ""
  2942  	c.RTarget = ""
  2943  	if err := c.Validate(); err != nil {
  2944  		t.Fatalf("expected valid constraint: %v", err)
  2945  	}
  2946  
  2947  	// Perform set_contains* validation
  2948  	c.RTarget = ""
  2949  	for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} {
  2950  		c.Operand = o
  2951  		err = c.Validate()
  2952  		require.Error(t, err, "requires an RTarget")
  2953  	}
  2954  
  2955  	// Perform LTarget validation
  2956  	c.Operand = ConstraintRegex
  2957  	c.RTarget = "foo"
  2958  	c.LTarget = ""
  2959  	err = c.Validate()
  2960  	require.Error(t, err, "No LTarget")
  2961  
  2962  	// Perform constraint type validation
  2963  	c.Operand = "foo"
  2964  	err = c.Validate()
  2965  	require.Error(t, err, "Unknown constraint type")
  2966  }
  2967  
  2968  func TestAffinity_Validate(t *testing.T) {
  2969  	ci.Parallel(t)
  2970  
  2971  	type tc struct {
  2972  		affinity *Affinity
  2973  		err      error
  2974  		name     string
  2975  	}
  2976  	testCases := []tc{
  2977  		{
  2978  			affinity: &Affinity{},
  2979  			err:      fmt.Errorf("Missing affinity operand"),
  2980  		},
  2981  		{
  2982  			affinity: &Affinity{
  2983  				Operand: "foo",
  2984  				LTarget: "${meta.node_class}",
  2985  				Weight:  10,
  2986  			},
  2987  			err: fmt.Errorf("Unknown affinity operator \"foo\""),
  2988  		},
  2989  		{
  2990  			affinity: &Affinity{
  2991  				Operand: "=",
  2992  				LTarget: "${meta.node_class}",
  2993  				Weight:  10,
  2994  			},
  2995  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  2996  		},
  2997  		{
  2998  			affinity: &Affinity{
  2999  				Operand: "=",
  3000  				LTarget: "${meta.node_class}",
  3001  				RTarget: "c4",
  3002  				Weight:  0,
  3003  			},
  3004  			err: fmt.Errorf("Affinity weight cannot be zero"),
  3005  		},
  3006  		{
  3007  			affinity: &Affinity{
  3008  				Operand: "=",
  3009  				LTarget: "${meta.node_class}",
  3010  				RTarget: "c4",
  3011  				Weight:  110,
  3012  			},
  3013  			err: fmt.Errorf("Affinity weight must be within the range [-100,100]"),
  3014  		},
  3015  		{
  3016  			affinity: &Affinity{
  3017  				Operand: "=",
  3018  				LTarget: "${node.class}",
  3019  				Weight:  10,
  3020  			},
  3021  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  3022  		},
  3023  		{
  3024  			affinity: &Affinity{
  3025  				Operand: "version",
  3026  				LTarget: "${meta.os}",
  3027  				RTarget: ">>2.0",
  3028  				Weight:  110,
  3029  			},
  3030  			err: fmt.Errorf("Version affinity is invalid"),
  3031  		},
  3032  		{
  3033  			affinity: &Affinity{
  3034  				Operand: "regexp",
  3035  				LTarget: "${meta.os}",
  3036  				RTarget: "\\K2.0",
  3037  				Weight:  100,
  3038  			},
  3039  			err: fmt.Errorf("Regular expression failed to compile"),
  3040  		},
  3041  	}
  3042  
  3043  	for _, tc := range testCases {
  3044  		t.Run(tc.name, func(t *testing.T) {
  3045  			err := tc.affinity.Validate()
  3046  			if tc.err != nil {
  3047  				require.NotNil(t, err)
  3048  				require.Contains(t, err.Error(), tc.err.Error())
  3049  			} else {
  3050  				require.Nil(t, err)
  3051  			}
  3052  		})
  3053  	}
  3054  }
  3055  
  3056  func TestUpdateStrategy_Validate(t *testing.T) {
  3057  	ci.Parallel(t)
  3058  
  3059  	u := &UpdateStrategy{
  3060  		MaxParallel:      -1,
  3061  		HealthCheck:      "foo",
  3062  		MinHealthyTime:   -10,
  3063  		HealthyDeadline:  -15,
  3064  		ProgressDeadline: -25,
  3065  		AutoRevert:       false,
  3066  		Canary:           -1,
  3067  	}
  3068  
  3069  	err := u.Validate()
  3070  	requireErrors(t, err,
  3071  		"Invalid health check given",
  3072  		"Max parallel can not be less than zero",
  3073  		"Canary count can not be less than zero",
  3074  		"Minimum healthy time may not be less than zero",
  3075  		"Healthy deadline must be greater than zero",
  3076  		"Progress deadline must be zero or greater",
  3077  		"Minimum healthy time must be less than healthy deadline",
  3078  		"Healthy deadline must be less than progress deadline",
  3079  	)
  3080  }
  3081  
  3082  func TestResource_NetIndex(t *testing.T) {
  3083  	ci.Parallel(t)
  3084  
  3085  	r := &Resources{
  3086  		Networks: []*NetworkResource{
  3087  			{Device: "eth0"},
  3088  			{Device: "lo0"},
  3089  			{Device: ""},
  3090  		},
  3091  	}
  3092  	if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 {
  3093  		t.Fatalf("Bad: %d", idx)
  3094  	}
  3095  	if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 {
  3096  		t.Fatalf("Bad: %d", idx)
  3097  	}
  3098  	if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 {
  3099  		t.Fatalf("Bad: %d", idx)
  3100  	}
  3101  }
  3102  
  3103  func TestResource_Add(t *testing.T) {
  3104  	ci.Parallel(t)
  3105  
  3106  	r1 := &Resources{
  3107  		CPU:      2000,
  3108  		MemoryMB: 2048,
  3109  		DiskMB:   10000,
  3110  		Networks: []*NetworkResource{
  3111  			{
  3112  				CIDR:          "10.0.0.0/8",
  3113  				MBits:         100,
  3114  				ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3115  			},
  3116  		},
  3117  	}
  3118  	r2 := &Resources{
  3119  		CPU:      2000,
  3120  		MemoryMB: 1024,
  3121  		DiskMB:   5000,
  3122  		Networks: []*NetworkResource{
  3123  			{
  3124  				IP:            "10.0.0.1",
  3125  				MBits:         50,
  3126  				ReservedPorts: []Port{{"web", 80, 0, ""}},
  3127  			},
  3128  		},
  3129  	}
  3130  
  3131  	r1.Add(r2)
  3132  
  3133  	expect := &Resources{
  3134  		CPU:      3000,
  3135  		MemoryMB: 3072,
  3136  		DiskMB:   15000,
  3137  		Networks: []*NetworkResource{
  3138  			{
  3139  				CIDR:          "10.0.0.0/8",
  3140  				MBits:         150,
  3141  				ReservedPorts: []Port{{"ssh", 22, 0, ""}, {"web", 80, 0, ""}},
  3142  			},
  3143  		},
  3144  	}
  3145  
  3146  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  3147  		t.Fatalf("bad: %#v %#v", expect, r1)
  3148  	}
  3149  }
  3150  
  3151  func TestResource_Add_Network(t *testing.T) {
  3152  	ci.Parallel(t)
  3153  
  3154  	r1 := &Resources{}
  3155  	r2 := &Resources{
  3156  		Networks: []*NetworkResource{
  3157  			{
  3158  				MBits:        50,
  3159  				DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}},
  3160  			},
  3161  		},
  3162  	}
  3163  	r3 := &Resources{
  3164  		Networks: []*NetworkResource{
  3165  			{
  3166  				MBits:        25,
  3167  				DynamicPorts: []Port{{"admin", 0, 8080, ""}},
  3168  			},
  3169  		},
  3170  	}
  3171  
  3172  	r1.Add(r2)
  3173  	r1.Add(r3)
  3174  
  3175  	expect := &Resources{
  3176  		Networks: []*NetworkResource{
  3177  			{
  3178  				MBits:        75,
  3179  				DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}, {"admin", 0, 8080, ""}},
  3180  			},
  3181  		},
  3182  	}
  3183  
  3184  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  3185  		t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0])
  3186  	}
  3187  }
  3188  
  3189  func TestComparableResources_Subtract(t *testing.T) {
  3190  	ci.Parallel(t)
  3191  
  3192  	r1 := &ComparableResources{
  3193  		Flattened: AllocatedTaskResources{
  3194  			Cpu: AllocatedCpuResources{
  3195  				CpuShares:     2000,
  3196  				ReservedCores: []uint16{0, 1},
  3197  			},
  3198  			Memory: AllocatedMemoryResources{
  3199  				MemoryMB:    2048,
  3200  				MemoryMaxMB: 3048,
  3201  			},
  3202  			Networks: []*NetworkResource{
  3203  				{
  3204  					CIDR:          "10.0.0.0/8",
  3205  					MBits:         100,
  3206  					ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3207  				},
  3208  			},
  3209  		},
  3210  		Shared: AllocatedSharedResources{
  3211  			DiskMB: 10000,
  3212  		},
  3213  	}
  3214  
  3215  	r2 := &ComparableResources{
  3216  		Flattened: AllocatedTaskResources{
  3217  			Cpu: AllocatedCpuResources{
  3218  				CpuShares:     1000,
  3219  				ReservedCores: []uint16{0},
  3220  			},
  3221  			Memory: AllocatedMemoryResources{
  3222  				MemoryMB:    1024,
  3223  				MemoryMaxMB: 1524,
  3224  			},
  3225  			Networks: []*NetworkResource{
  3226  				{
  3227  					CIDR:          "10.0.0.0/8",
  3228  					MBits:         20,
  3229  					ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3230  				},
  3231  			},
  3232  		},
  3233  		Shared: AllocatedSharedResources{
  3234  			DiskMB: 5000,
  3235  		},
  3236  	}
  3237  	r1.Subtract(r2)
  3238  
  3239  	expect := &ComparableResources{
  3240  		Flattened: AllocatedTaskResources{
  3241  			Cpu: AllocatedCpuResources{
  3242  				CpuShares:     1000,
  3243  				ReservedCores: []uint16{1},
  3244  			},
  3245  			Memory: AllocatedMemoryResources{
  3246  				MemoryMB:    1024,
  3247  				MemoryMaxMB: 1524,
  3248  			},
  3249  			Networks: []*NetworkResource{
  3250  				{
  3251  					CIDR:          "10.0.0.0/8",
  3252  					MBits:         100,
  3253  					ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3254  				},
  3255  			},
  3256  		},
  3257  		Shared: AllocatedSharedResources{
  3258  			DiskMB: 5000,
  3259  		},
  3260  	}
  3261  
  3262  	require := require.New(t)
  3263  	require.Equal(expect, r1)
  3264  }
  3265  
  3266  func TestMemoryResources_Add(t *testing.T) {
  3267  	ci.Parallel(t)
  3268  
  3269  	r := &AllocatedMemoryResources{}
  3270  
  3271  	// adding plain no max
  3272  	r.Add(&AllocatedMemoryResources{
  3273  		MemoryMB: 100,
  3274  	})
  3275  	require.Equal(t, &AllocatedMemoryResources{
  3276  		MemoryMB:    100,
  3277  		MemoryMaxMB: 100,
  3278  	}, r)
  3279  
  3280  	// adding with max
  3281  	r.Add(&AllocatedMemoryResources{
  3282  		MemoryMB:    100,
  3283  		MemoryMaxMB: 200,
  3284  	})
  3285  	require.Equal(t, &AllocatedMemoryResources{
  3286  		MemoryMB:    200,
  3287  		MemoryMaxMB: 300,
  3288  	}, r)
  3289  }
  3290  
  3291  func TestNodeNetworkResource_Copy(t *testing.T) {
  3292  	ci.Parallel(t)
  3293  
  3294  	netResource := &NodeNetworkResource{
  3295  		Mode:       "host",
  3296  		Device:     "eth0",
  3297  		MacAddress: "00:00:00:00:00:00",
  3298  		Speed:      1000,
  3299  		Addresses: []NodeNetworkAddress{
  3300  			{
  3301  				Family:        NodeNetworkAF_IPv4,
  3302  				Alias:         "default",
  3303  				Address:       "192.168.0.2",
  3304  				ReservedPorts: "22",
  3305  				Gateway:       "192.168.0.1",
  3306  			},
  3307  		},
  3308  	}
  3309  
  3310  	// Copy must be equal.
  3311  	netResourceCopy := netResource.Copy()
  3312  	require.Equal(t, netResource, netResourceCopy)
  3313  
  3314  	// Modifying copy should not modify original value.
  3315  	netResourceCopy.Mode = "alloc"
  3316  	netResourceCopy.Device = "eth1"
  3317  	netResourceCopy.MacAddress = "11:11:11:11:11:11"
  3318  	netResourceCopy.Speed = 500
  3319  	netResourceCopy.Addresses[0].Alias = "copy"
  3320  	require.NotEqual(t, netResource, netResourceCopy)
  3321  }
  3322  
  3323  func TestEncodeDecode(t *testing.T) {
  3324  	ci.Parallel(t)
  3325  
  3326  	type FooRequest struct {
  3327  		Foo string
  3328  		Bar int
  3329  		Baz bool
  3330  	}
  3331  	arg := &FooRequest{
  3332  		Foo: "test",
  3333  		Bar: 42,
  3334  		Baz: true,
  3335  	}
  3336  	buf, err := Encode(1, arg)
  3337  	if err != nil {
  3338  		t.Fatalf("err: %v", err)
  3339  	}
  3340  
  3341  	var out FooRequest
  3342  	err = Decode(buf[1:], &out)
  3343  	if err != nil {
  3344  		t.Fatalf("err: %v", err)
  3345  	}
  3346  
  3347  	if !reflect.DeepEqual(arg, &out) {
  3348  		t.Fatalf("bad: %#v %#v", arg, out)
  3349  	}
  3350  }
  3351  
  3352  func BenchmarkEncodeDecode(b *testing.B) {
  3353  	job := testJob()
  3354  
  3355  	for i := 0; i < b.N; i++ {
  3356  		buf, err := Encode(1, job)
  3357  		if err != nil {
  3358  			b.Fatalf("err: %v", err)
  3359  		}
  3360  
  3361  		var out Job
  3362  		err = Decode(buf[1:], &out)
  3363  		if err != nil {
  3364  			b.Fatalf("err: %v", err)
  3365  		}
  3366  	}
  3367  }
  3368  
  3369  func TestInvalidServiceCheck(t *testing.T) {
  3370  	ci.Parallel(t)
  3371  
  3372  	s := Service{
  3373  		Name:      "service-name",
  3374  		Provider:  "consul",
  3375  		PortLabel: "bar",
  3376  		Checks: []*ServiceCheck{
  3377  			{
  3378  				Name: "check-name",
  3379  				Type: "lol",
  3380  			},
  3381  		},
  3382  	}
  3383  	if err := s.Validate(); err == nil {
  3384  		t.Fatalf("Service should be invalid (invalid type)")
  3385  	}
  3386  
  3387  	s = Service{
  3388  		Name:      "service.name",
  3389  		Provider:  "consul",
  3390  		PortLabel: "bar",
  3391  	}
  3392  	if err := s.ValidateName(s.Name); err == nil {
  3393  		t.Fatalf("Service should be invalid (contains a dot): %v", err)
  3394  	}
  3395  
  3396  	s = Service{
  3397  		Name:      "-my-service",
  3398  		Provider:  "consul",
  3399  		PortLabel: "bar",
  3400  	}
  3401  	if err := s.Validate(); err == nil {
  3402  		t.Fatalf("Service should be invalid (begins with a hyphen): %v", err)
  3403  	}
  3404  
  3405  	s = Service{
  3406  		Name:      "my-service-${NOMAD_META_FOO}",
  3407  		Provider:  "consul",
  3408  		PortLabel: "bar",
  3409  	}
  3410  	if err := s.Validate(); err != nil {
  3411  		t.Fatalf("Service should be valid: %v", err)
  3412  	}
  3413  
  3414  	s = Service{
  3415  		Name:      "my_service-${NOMAD_META_FOO}",
  3416  		Provider:  "consul",
  3417  		PortLabel: "bar",
  3418  	}
  3419  	if err := s.Validate(); err == nil {
  3420  		t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err)
  3421  	}
  3422  
  3423  	s = Service{
  3424  		Name:      "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456",
  3425  		Provider:  "consul",
  3426  		PortLabel: "bar",
  3427  	}
  3428  	if err := s.ValidateName(s.Name); err == nil {
  3429  		t.Fatalf("Service should be invalid (too long): %v", err)
  3430  	}
  3431  
  3432  	s = Service{
  3433  		Name:     "service-name",
  3434  		Provider: "consul",
  3435  		Checks: []*ServiceCheck{
  3436  			{
  3437  				Name:     "check-tcp",
  3438  				Type:     ServiceCheckTCP,
  3439  				Interval: 5 * time.Second,
  3440  				Timeout:  2 * time.Second,
  3441  			},
  3442  			{
  3443  				Name:     "check-http",
  3444  				Type:     ServiceCheckHTTP,
  3445  				Path:     "/foo",
  3446  				Interval: 5 * time.Second,
  3447  				Timeout:  2 * time.Second,
  3448  			},
  3449  		},
  3450  	}
  3451  	if err := s.Validate(); err == nil {
  3452  		t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err)
  3453  	}
  3454  
  3455  	s = Service{
  3456  		Name:     "service-name",
  3457  		Provider: "consul",
  3458  		Checks: []*ServiceCheck{
  3459  			{
  3460  				Name:     "check-script",
  3461  				Type:     ServiceCheckScript,
  3462  				Command:  "/bin/date",
  3463  				Interval: 5 * time.Second,
  3464  				Timeout:  2 * time.Second,
  3465  			},
  3466  		},
  3467  	}
  3468  	if err := s.Validate(); err != nil {
  3469  		t.Fatalf("un-expected error: %v", err)
  3470  	}
  3471  
  3472  	s = Service{
  3473  		Name:     "service-name",
  3474  		Provider: "consul",
  3475  		Checks: []*ServiceCheck{
  3476  			{
  3477  				Name:     "tcp-check",
  3478  				Type:     ServiceCheckTCP,
  3479  				Interval: 5 * time.Second,
  3480  				Timeout:  2 * time.Second,
  3481  			},
  3482  		},
  3483  		Connect: &ConsulConnect{
  3484  			SidecarService: &ConsulSidecarService{},
  3485  		},
  3486  	}
  3487  	require.Error(t, s.Validate())
  3488  }
  3489  
  3490  func TestDistinctCheckID(t *testing.T) {
  3491  	ci.Parallel(t)
  3492  
  3493  	c1 := ServiceCheck{
  3494  		Name:     "web-health",
  3495  		Type:     "http",
  3496  		Path:     "/health",
  3497  		Interval: 2 * time.Second,
  3498  		Timeout:  3 * time.Second,
  3499  	}
  3500  	c2 := ServiceCheck{
  3501  		Name:     "web-health",
  3502  		Type:     "http",
  3503  		Path:     "/health1",
  3504  		Interval: 2 * time.Second,
  3505  		Timeout:  3 * time.Second,
  3506  	}
  3507  
  3508  	c3 := ServiceCheck{
  3509  		Name:     "web-health",
  3510  		Type:     "http",
  3511  		Path:     "/health",
  3512  		Interval: 4 * time.Second,
  3513  		Timeout:  3 * time.Second,
  3514  	}
  3515  	serviceID := "123"
  3516  	c1Hash := c1.Hash(serviceID)
  3517  	c2Hash := c2.Hash(serviceID)
  3518  	c3Hash := c3.Hash(serviceID)
  3519  
  3520  	if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash {
  3521  		t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash)
  3522  	}
  3523  
  3524  }
  3525  
  3526  func TestService_Canonicalize(t *testing.T) {
  3527  	ci.Parallel(t)
  3528  
  3529  	testCases := []struct {
  3530  		inputService          *Service
  3531  		inputJob              string
  3532  		inputTaskGroup        string
  3533  		inputTask             string
  3534  		inputJobNamespace     string
  3535  		expectedOutputService *Service
  3536  		name                  string
  3537  	}{
  3538  		{
  3539  			inputService: &Service{
  3540  				Name: "${TASK}-db",
  3541  			},
  3542  			inputJob:          "example",
  3543  			inputTaskGroup:    "cache",
  3544  			inputTask:         "redis",
  3545  			inputJobNamespace: "platform",
  3546  			expectedOutputService: &Service{
  3547  				Name:      "redis-db",
  3548  				Provider:  "consul",
  3549  				Namespace: "default",
  3550  				TaskName:  "redis",
  3551  			},
  3552  			name: "interpolate task in name",
  3553  		},
  3554  		{
  3555  			inputService: &Service{
  3556  				Name: "db",
  3557  			},
  3558  			inputJob:          "example",
  3559  			inputTaskGroup:    "cache",
  3560  			inputTask:         "redis",
  3561  			inputJobNamespace: "platform",
  3562  			expectedOutputService: &Service{
  3563  				Name:      "db",
  3564  				Provider:  "consul",
  3565  				Namespace: "default",
  3566  				TaskName:  "redis",
  3567  			},
  3568  			name: "no interpolation in name",
  3569  		},
  3570  		{
  3571  			inputService: &Service{
  3572  				Name: "${JOB}-${TASKGROUP}-${TASK}-db",
  3573  			},
  3574  			inputJob:          "example",
  3575  			inputTaskGroup:    "cache",
  3576  			inputTask:         "redis",
  3577  			inputJobNamespace: "platform",
  3578  			expectedOutputService: &Service{
  3579  				Name:      "example-cache-redis-db",
  3580  				Provider:  "consul",
  3581  				Namespace: "default",
  3582  				TaskName:  "redis",
  3583  			},
  3584  			name: "interpolate job, taskgroup and task in name",
  3585  		},
  3586  		{
  3587  			inputService: &Service{
  3588  				Name: "${BASE}-db",
  3589  			},
  3590  			inputJob:          "example",
  3591  			inputTaskGroup:    "cache",
  3592  			inputTask:         "redis",
  3593  			inputJobNamespace: "platform",
  3594  			expectedOutputService: &Service{
  3595  				Name:      "example-cache-redis-db",
  3596  				Provider:  "consul",
  3597  				Namespace: "default",
  3598  				TaskName:  "redis",
  3599  			},
  3600  			name: "interpolate base in name",
  3601  		},
  3602  		{
  3603  			inputService: &Service{
  3604  				Name:     "db",
  3605  				Provider: "nomad",
  3606  			},
  3607  			inputJob:          "example",
  3608  			inputTaskGroup:    "cache",
  3609  			inputTask:         "redis",
  3610  			inputJobNamespace: "platform",
  3611  			expectedOutputService: &Service{
  3612  				Name:      "db",
  3613  				Provider:  "nomad",
  3614  				Namespace: "platform",
  3615  				TaskName:  "redis",
  3616  			},
  3617  			name: "nomad provider",
  3618  		},
  3619  	}
  3620  
  3621  	for _, tc := range testCases {
  3622  		t.Run(tc.name, func(t *testing.T) {
  3623  			tc.inputService.Canonicalize(tc.inputJob, tc.inputTaskGroup, tc.inputTask, tc.inputJobNamespace)
  3624  			assert.Equal(t, tc.expectedOutputService, tc.inputService)
  3625  		})
  3626  	}
  3627  }
  3628  
  3629  func TestJob_ExpandServiceNames(t *testing.T) {
  3630  	ci.Parallel(t)
  3631  
  3632  	j := &Job{
  3633  		Name: "my-job",
  3634  		TaskGroups: []*TaskGroup{
  3635  			{
  3636  				Name: "web",
  3637  				Tasks: []*Task{
  3638  					{
  3639  						Name: "frontend",
  3640  						Services: []*Service{
  3641  							{
  3642  								Name: "${BASE}-default",
  3643  							},
  3644  							{
  3645  								Name: "jmx",
  3646  							},
  3647  						},
  3648  					},
  3649  				},
  3650  			},
  3651  			{
  3652  				Name: "admin",
  3653  				Tasks: []*Task{
  3654  					{
  3655  						Name: "admin-web",
  3656  					},
  3657  				},
  3658  			},
  3659  		},
  3660  	}
  3661  
  3662  	j.Canonicalize()
  3663  
  3664  	service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name
  3665  	if service1Name != "my-job-web-frontend-default" {
  3666  		t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name)
  3667  	}
  3668  
  3669  	service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name
  3670  	if service2Name != "jmx" {
  3671  		t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name)
  3672  	}
  3673  
  3674  }
  3675  
  3676  func TestJob_CombinedTaskMeta(t *testing.T) {
  3677  	ci.Parallel(t)
  3678  
  3679  	j := &Job{
  3680  		Meta: map[string]string{
  3681  			"job_test":   "job",
  3682  			"group_test": "job",
  3683  			"task_test":  "job",
  3684  		},
  3685  		TaskGroups: []*TaskGroup{
  3686  			{
  3687  				Name: "group",
  3688  				Meta: map[string]string{
  3689  					"group_test": "group",
  3690  					"task_test":  "group",
  3691  				},
  3692  				Tasks: []*Task{
  3693  					{
  3694  						Name: "task",
  3695  						Meta: map[string]string{
  3696  							"task_test": "task",
  3697  						},
  3698  					},
  3699  				},
  3700  			},
  3701  		},
  3702  	}
  3703  
  3704  	require := require.New(t)
  3705  	require.EqualValues(map[string]string{
  3706  		"job_test":   "job",
  3707  		"group_test": "group",
  3708  		"task_test":  "task",
  3709  	}, j.CombinedTaskMeta("group", "task"))
  3710  	require.EqualValues(map[string]string{
  3711  		"job_test":   "job",
  3712  		"group_test": "group",
  3713  		"task_test":  "group",
  3714  	}, j.CombinedTaskMeta("group", ""))
  3715  	require.EqualValues(map[string]string{
  3716  		"job_test":   "job",
  3717  		"group_test": "job",
  3718  		"task_test":  "job",
  3719  	}, j.CombinedTaskMeta("", "task"))
  3720  
  3721  }
  3722  
  3723  func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
  3724  	ci.Parallel(t)
  3725  
  3726  	// Create a config that is enabled but with no interval specified.
  3727  	p := &PeriodicConfig{Enabled: true}
  3728  	if err := p.Validate(); err == nil {
  3729  		t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid")
  3730  	}
  3731  
  3732  	// Create a config that is enabled, with a spec but no type specified.
  3733  	p = &PeriodicConfig{Enabled: true, Spec: "foo"}
  3734  	if err := p.Validate(); err == nil {
  3735  		t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid")
  3736  	}
  3737  
  3738  	// Create a config that is enabled, with a spec type but no spec specified.
  3739  	p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron}
  3740  	if err := p.Validate(); err == nil {
  3741  		t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid")
  3742  	}
  3743  
  3744  	// Create a config that is enabled, with a bad time zone.
  3745  	p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
  3746  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
  3747  		t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
  3748  	}
  3749  }
  3750  
  3751  func TestPeriodicConfig_InvalidCron(t *testing.T) {
  3752  	ci.Parallel(t)
  3753  
  3754  	specs := []string{"foo", "* *", "@foo"}
  3755  	for _, spec := range specs {
  3756  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  3757  		p.Canonicalize()
  3758  		if err := p.Validate(); err == nil {
  3759  			t.Fatal("Invalid cron spec")
  3760  		}
  3761  	}
  3762  }
  3763  
  3764  func TestPeriodicConfig_ValidCron(t *testing.T) {
  3765  	ci.Parallel(t)
  3766  
  3767  	specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"}
  3768  	for _, spec := range specs {
  3769  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  3770  		p.Canonicalize()
  3771  		if err := p.Validate(); err != nil {
  3772  			t.Fatal("Passed valid cron")
  3773  		}
  3774  	}
  3775  }
  3776  
  3777  func TestPeriodicConfig_NextCron(t *testing.T) {
  3778  	ci.Parallel(t)
  3779  
  3780  	from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC)
  3781  
  3782  	cases := []struct {
  3783  		spec     string
  3784  		nextTime time.Time
  3785  		errorMsg string
  3786  	}{
  3787  		{
  3788  			spec:     "0 0 29 2 * 1980",
  3789  			nextTime: time.Time{},
  3790  		},
  3791  		{
  3792  			spec:     "*/5 * * * *",
  3793  			nextTime: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC),
  3794  		},
  3795  		{
  3796  			spec:     "1 15-0 *",
  3797  			nextTime: time.Time{},
  3798  			errorMsg: "failed parsing cron expression",
  3799  		},
  3800  	}
  3801  
  3802  	for i, c := range cases {
  3803  		t.Run(fmt.Sprintf("case: %d: %s", i, c.spec), func(t *testing.T) {
  3804  			p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: c.spec}
  3805  			p.Canonicalize()
  3806  			n, err := p.Next(from)
  3807  
  3808  			require.Equal(t, c.nextTime, n)
  3809  			if c.errorMsg == "" {
  3810  				require.NoError(t, err)
  3811  			} else {
  3812  				require.Error(t, err)
  3813  				require.Contains(t, err.Error(), c.errorMsg)
  3814  			}
  3815  		})
  3816  	}
  3817  }
  3818  
  3819  func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
  3820  	ci.Parallel(t)
  3821  
  3822  	zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"}
  3823  	for _, zone := range zones {
  3824  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
  3825  		p.Canonicalize()
  3826  		if err := p.Validate(); err != nil {
  3827  			t.Fatalf("Valid tz errored: %v", err)
  3828  		}
  3829  	}
  3830  }
  3831  
  3832  func TestPeriodicConfig_DST(t *testing.T) {
  3833  	ci.Parallel(t)
  3834  
  3835  	require := require.New(t)
  3836  
  3837  	// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
  3838  	p := &PeriodicConfig{
  3839  		Enabled:  true,
  3840  		SpecType: PeriodicSpecCron,
  3841  		Spec:     "0 2 11-13 3 * 2017",
  3842  		TimeZone: "America/Los_Angeles",
  3843  	}
  3844  	p.Canonicalize()
  3845  
  3846  	t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location)
  3847  	t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location)
  3848  
  3849  	// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
  3850  	e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC)
  3851  	e2 := time.Date(2017, time.March, 13, 9, 0, 0, 0, time.UTC)
  3852  
  3853  	n1, err := p.Next(t1)
  3854  	require.Nil(err)
  3855  
  3856  	n2, err := p.Next(t2)
  3857  	require.Nil(err)
  3858  
  3859  	require.Equal(e1, n1.UTC())
  3860  	require.Equal(e2, n2.UTC())
  3861  }
  3862  
  3863  func TestTaskLifecycleConfig_Validate(t *testing.T) {
  3864  	ci.Parallel(t)
  3865  
  3866  	testCases := []struct {
  3867  		name string
  3868  		tlc  *TaskLifecycleConfig
  3869  		err  error
  3870  	}{
  3871  		{
  3872  			name: "prestart completed",
  3873  			tlc: &TaskLifecycleConfig{
  3874  				Hook:    "prestart",
  3875  				Sidecar: false,
  3876  			},
  3877  			err: nil,
  3878  		},
  3879  		{
  3880  			name: "prestart running",
  3881  			tlc: &TaskLifecycleConfig{
  3882  				Hook:    "prestart",
  3883  				Sidecar: true,
  3884  			},
  3885  			err: nil,
  3886  		},
  3887  		{
  3888  			name: "no hook",
  3889  			tlc: &TaskLifecycleConfig{
  3890  				Sidecar: true,
  3891  			},
  3892  			err: fmt.Errorf("no lifecycle hook provided"),
  3893  		},
  3894  	}
  3895  
  3896  	for _, tc := range testCases {
  3897  		t.Run(tc.name, func(t *testing.T) {
  3898  			err := tc.tlc.Validate()
  3899  			if tc.err != nil {
  3900  				require.Error(t, err)
  3901  				require.Contains(t, err.Error(), tc.err.Error())
  3902  			} else {
  3903  				require.Nil(t, err)
  3904  			}
  3905  		})
  3906  
  3907  	}
  3908  }
  3909  
  3910  func TestRestartPolicy_Validate(t *testing.T) {
  3911  	ci.Parallel(t)
  3912  
  3913  	// Policy with acceptable restart options passes
  3914  	p := &RestartPolicy{
  3915  		Mode:     RestartPolicyModeFail,
  3916  		Attempts: 0,
  3917  		Interval: 5 * time.Second,
  3918  	}
  3919  	if err := p.Validate(); err != nil {
  3920  		t.Fatalf("err: %v", err)
  3921  	}
  3922  
  3923  	// Policy with ambiguous restart options fails
  3924  	p = &RestartPolicy{
  3925  		Mode:     RestartPolicyModeDelay,
  3926  		Attempts: 0,
  3927  		Interval: 5 * time.Second,
  3928  	}
  3929  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") {
  3930  		t.Fatalf("expect ambiguity error, got: %v", err)
  3931  	}
  3932  
  3933  	// Bad policy mode fails
  3934  	p = &RestartPolicy{
  3935  		Mode:     "nope",
  3936  		Attempts: 1,
  3937  		Interval: 5 * time.Second,
  3938  	}
  3939  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") {
  3940  		t.Fatalf("expect mode error, got: %v", err)
  3941  	}
  3942  
  3943  	// Fails when attempts*delay does not fit inside interval
  3944  	p = &RestartPolicy{
  3945  		Mode:     RestartPolicyModeDelay,
  3946  		Attempts: 3,
  3947  		Delay:    5 * time.Second,
  3948  		Interval: 5 * time.Second,
  3949  	}
  3950  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") {
  3951  		t.Fatalf("expect restart interval error, got: %v", err)
  3952  	}
  3953  
  3954  	// Fails when interval is to small
  3955  	p = &RestartPolicy{
  3956  		Mode:     RestartPolicyModeDelay,
  3957  		Attempts: 3,
  3958  		Delay:    5 * time.Second,
  3959  		Interval: 2 * time.Second,
  3960  	}
  3961  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") {
  3962  		t.Fatalf("expect interval too small error, got: %v", err)
  3963  	}
  3964  }
  3965  
  3966  func TestReschedulePolicy_Validate(t *testing.T) {
  3967  	ci.Parallel(t)
  3968  	type testCase struct {
  3969  		desc             string
  3970  		ReschedulePolicy *ReschedulePolicy
  3971  		errors           []error
  3972  	}
  3973  	testCases := []testCase{
  3974  		{
  3975  			desc: "Nil",
  3976  		},
  3977  		{
  3978  			desc: "Disabled",
  3979  			ReschedulePolicy: &ReschedulePolicy{
  3980  				Attempts: 0,
  3981  				Interval: 0 * time.Second},
  3982  		},
  3983  		{
  3984  			desc: "Disabled",
  3985  			ReschedulePolicy: &ReschedulePolicy{
  3986  				Attempts: -1,
  3987  				Interval: 5 * time.Minute},
  3988  		},
  3989  		{
  3990  			desc: "Valid Linear Delay",
  3991  			ReschedulePolicy: &ReschedulePolicy{
  3992  				Attempts:      1,
  3993  				Interval:      5 * time.Minute,
  3994  				Delay:         10 * time.Second,
  3995  				DelayFunction: "constant"},
  3996  		},
  3997  		{
  3998  			desc: "Valid Exponential Delay",
  3999  			ReschedulePolicy: &ReschedulePolicy{
  4000  				Attempts:      5,
  4001  				Interval:      1 * time.Hour,
  4002  				Delay:         30 * time.Second,
  4003  				MaxDelay:      5 * time.Minute,
  4004  				DelayFunction: "exponential"},
  4005  		},
  4006  		{
  4007  			desc: "Valid Fibonacci Delay",
  4008  			ReschedulePolicy: &ReschedulePolicy{
  4009  				Attempts:      5,
  4010  				Interval:      15 * time.Minute,
  4011  				Delay:         10 * time.Second,
  4012  				MaxDelay:      5 * time.Minute,
  4013  				DelayFunction: "fibonacci"},
  4014  		},
  4015  		{
  4016  			desc: "Invalid delay function",
  4017  			ReschedulePolicy: &ReschedulePolicy{
  4018  				Attempts:      1,
  4019  				Interval:      1 * time.Second,
  4020  				DelayFunction: "blah"},
  4021  			errors: []error{
  4022  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  4023  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4024  				fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions),
  4025  			},
  4026  		},
  4027  		{
  4028  			desc: "Invalid delay ceiling",
  4029  			ReschedulePolicy: &ReschedulePolicy{
  4030  				Attempts:      1,
  4031  				Interval:      8 * time.Second,
  4032  				DelayFunction: "exponential",
  4033  				Delay:         15 * time.Second,
  4034  				MaxDelay:      5 * time.Second},
  4035  			errors: []error{
  4036  				fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)",
  4037  					15*time.Second, 5*time.Second),
  4038  			},
  4039  		},
  4040  		{
  4041  			desc: "Invalid delay and interval",
  4042  			ReschedulePolicy: &ReschedulePolicy{
  4043  				Attempts:      1,
  4044  				Interval:      1 * time.Second,
  4045  				DelayFunction: "constant"},
  4046  			errors: []error{
  4047  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  4048  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4049  			},
  4050  		}, {
  4051  			// Should suggest 2h40m as the interval
  4052  			desc: "Invalid Attempts - linear delay",
  4053  			ReschedulePolicy: &ReschedulePolicy{
  4054  				Attempts:      10,
  4055  				Interval:      1 * time.Hour,
  4056  				Delay:         20 * time.Minute,
  4057  				DelayFunction: "constant",
  4058  			},
  4059  			errors: []error{
  4060  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+
  4061  					" delay function %q", 3, time.Hour, 20*time.Minute, "constant"),
  4062  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  4063  					200*time.Minute, 10),
  4064  			},
  4065  		},
  4066  		{
  4067  			// Should suggest 4h40m as the interval
  4068  			// Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40}
  4069  			desc: "Invalid Attempts - exponential delay",
  4070  			ReschedulePolicy: &ReschedulePolicy{
  4071  				Attempts:      10,
  4072  				Interval:      30 * time.Minute,
  4073  				Delay:         5 * time.Minute,
  4074  				MaxDelay:      40 * time.Minute,
  4075  				DelayFunction: "exponential",
  4076  			},
  4077  			errors: []error{
  4078  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  4079  					"delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute,
  4080  					"exponential", 40*time.Minute),
  4081  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  4082  					280*time.Minute, 10),
  4083  			},
  4084  		},
  4085  		{
  4086  			// Should suggest 8h as the interval
  4087  			// Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80}
  4088  			desc: "Invalid Attempts - fibonacci delay",
  4089  			ReschedulePolicy: &ReschedulePolicy{
  4090  				Attempts:      10,
  4091  				Interval:      1 * time.Hour,
  4092  				Delay:         20 * time.Minute,
  4093  				MaxDelay:      80 * time.Minute,
  4094  				DelayFunction: "fibonacci",
  4095  			},
  4096  			errors: []error{
  4097  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  4098  					"delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute,
  4099  					"fibonacci", 80*time.Minute),
  4100  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  4101  					480*time.Minute, 10),
  4102  			},
  4103  		},
  4104  		{
  4105  			desc: "Ambiguous Unlimited config, has both attempts and unlimited set",
  4106  			ReschedulePolicy: &ReschedulePolicy{
  4107  				Attempts:      1,
  4108  				Unlimited:     true,
  4109  				DelayFunction: "exponential",
  4110  				Delay:         5 * time.Minute,
  4111  				MaxDelay:      1 * time.Hour,
  4112  			},
  4113  			errors: []error{
  4114  				fmt.Errorf("Interval must be a non zero value if Attempts > 0"),
  4115  				fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true),
  4116  			},
  4117  		},
  4118  		{
  4119  			desc: "Invalid Unlimited config",
  4120  			ReschedulePolicy: &ReschedulePolicy{
  4121  				Attempts:      1,
  4122  				Interval:      1 * time.Second,
  4123  				Unlimited:     true,
  4124  				DelayFunction: "exponential",
  4125  			},
  4126  			errors: []error{
  4127  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4128  				fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4129  			},
  4130  		},
  4131  		{
  4132  			desc: "Valid Unlimited config",
  4133  			ReschedulePolicy: &ReschedulePolicy{
  4134  				Unlimited:     true,
  4135  				DelayFunction: "exponential",
  4136  				Delay:         5 * time.Second,
  4137  				MaxDelay:      1 * time.Hour,
  4138  			},
  4139  		},
  4140  	}
  4141  
  4142  	for _, tc := range testCases {
  4143  		t.Run(tc.desc, func(t *testing.T) {
  4144  			require := require.New(t)
  4145  			gotErr := tc.ReschedulePolicy.Validate()
  4146  			if tc.errors != nil {
  4147  				// Validate all errors
  4148  				for _, err := range tc.errors {
  4149  					require.Contains(gotErr.Error(), err.Error())
  4150  				}
  4151  			} else {
  4152  				require.Nil(gotErr)
  4153  			}
  4154  		})
  4155  	}
  4156  }
  4157  
  4158  func TestAllocation_Index(t *testing.T) {
  4159  	ci.Parallel(t)
  4160  
  4161  	a1 := Allocation{
  4162  		Name:      "example.cache[1]",
  4163  		TaskGroup: "cache",
  4164  		JobID:     "example",
  4165  		Job: &Job{
  4166  			ID:         "example",
  4167  			TaskGroups: []*TaskGroup{{Name: "cache"}}},
  4168  	}
  4169  	e1 := uint(1)
  4170  	a2 := a1.Copy()
  4171  	a2.Name = "example.cache[713127]"
  4172  	e2 := uint(713127)
  4173  
  4174  	if a1.Index() != e1 || a2.Index() != e2 {
  4175  		t.Fatalf("Got %d and %d", a1.Index(), a2.Index())
  4176  	}
  4177  }
  4178  
  4179  func TestTaskArtifact_Validate_Source(t *testing.T) {
  4180  	ci.Parallel(t)
  4181  
  4182  	valid := &TaskArtifact{GetterSource: "google.com"}
  4183  	if err := valid.Validate(); err != nil {
  4184  		t.Fatalf("unexpected error: %v", err)
  4185  	}
  4186  }
  4187  
  4188  func TestTaskArtifact_Validate_Dest(t *testing.T) {
  4189  	ci.Parallel(t)
  4190  
  4191  	valid := &TaskArtifact{GetterSource: "google.com"}
  4192  	if err := valid.Validate(); err != nil {
  4193  		t.Fatalf("unexpected error: %v", err)
  4194  	}
  4195  
  4196  	valid.RelativeDest = "local/"
  4197  	if err := valid.Validate(); err != nil {
  4198  		t.Fatalf("unexpected error: %v", err)
  4199  	}
  4200  
  4201  	valid.RelativeDest = "local/.."
  4202  	if err := valid.Validate(); err != nil {
  4203  		t.Fatalf("unexpected error: %v", err)
  4204  	}
  4205  
  4206  	valid.RelativeDest = "local/../../.."
  4207  	if err := valid.Validate(); err == nil {
  4208  		t.Fatalf("expected error: %v", err)
  4209  	}
  4210  }
  4211  
  4212  // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the
  4213  // fields change.
  4214  func TestTaskArtifact_Hash(t *testing.T) {
  4215  	ci.Parallel(t)
  4216  
  4217  	cases := []TaskArtifact{
  4218  		{},
  4219  		{
  4220  			GetterSource: "a",
  4221  		},
  4222  		{
  4223  			GetterSource: "b",
  4224  		},
  4225  		{
  4226  			GetterSource:  "b",
  4227  			GetterOptions: map[string]string{"c": "c"},
  4228  		},
  4229  		{
  4230  			GetterSource: "b",
  4231  			GetterOptions: map[string]string{
  4232  				"c": "c",
  4233  				"d": "d",
  4234  			},
  4235  		},
  4236  		{
  4237  			GetterSource: "b",
  4238  			GetterOptions: map[string]string{
  4239  				"c": "c",
  4240  				"d": "e",
  4241  			},
  4242  		},
  4243  		{
  4244  			GetterSource: "b",
  4245  			GetterOptions: map[string]string{
  4246  				"c": "c",
  4247  				"d": "e",
  4248  			},
  4249  			GetterMode: "f",
  4250  		},
  4251  		{
  4252  			GetterSource: "b",
  4253  			GetterOptions: map[string]string{
  4254  				"c": "c",
  4255  				"d": "e",
  4256  			},
  4257  			GetterMode: "g",
  4258  		},
  4259  		{
  4260  			GetterSource: "b",
  4261  			GetterOptions: map[string]string{
  4262  				"c": "c",
  4263  				"d": "e",
  4264  			},
  4265  			GetterMode:   "g",
  4266  			RelativeDest: "h",
  4267  		},
  4268  		{
  4269  			GetterSource: "b",
  4270  			GetterOptions: map[string]string{
  4271  				"c": "c",
  4272  				"d": "e",
  4273  			},
  4274  			GetterMode:   "g",
  4275  			RelativeDest: "i",
  4276  		},
  4277  	}
  4278  
  4279  	// Map of hash to source
  4280  	hashes := make(map[string]TaskArtifact, len(cases))
  4281  	for _, tc := range cases {
  4282  		h := tc.Hash()
  4283  
  4284  		// Hash should be deterministic
  4285  		require.Equal(t, h, tc.Hash())
  4286  
  4287  		// Hash should be unique
  4288  		if orig, ok := hashes[h]; ok {
  4289  			require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n",
  4290  				pretty.Sprint(tc), pretty.Sprint(orig),
  4291  			)
  4292  		}
  4293  		hashes[h] = tc
  4294  	}
  4295  
  4296  	require.Len(t, hashes, len(cases))
  4297  }
  4298  
  4299  func TestAllocation_ShouldMigrate(t *testing.T) {
  4300  	ci.Parallel(t)
  4301  
  4302  	alloc := Allocation{
  4303  		PreviousAllocation: "123",
  4304  		TaskGroup:          "foo",
  4305  		Job: &Job{
  4306  			TaskGroups: []*TaskGroup{
  4307  				{
  4308  					Name: "foo",
  4309  					EphemeralDisk: &EphemeralDisk{
  4310  						Migrate: true,
  4311  						Sticky:  true,
  4312  					},
  4313  				},
  4314  			},
  4315  		},
  4316  	}
  4317  
  4318  	if !alloc.ShouldMigrate() {
  4319  		t.Fatalf("bad: %v", alloc)
  4320  	}
  4321  
  4322  	alloc1 := Allocation{
  4323  		PreviousAllocation: "123",
  4324  		TaskGroup:          "foo",
  4325  		Job: &Job{
  4326  			TaskGroups: []*TaskGroup{
  4327  				{
  4328  					Name:          "foo",
  4329  					EphemeralDisk: &EphemeralDisk{},
  4330  				},
  4331  			},
  4332  		},
  4333  	}
  4334  
  4335  	if alloc1.ShouldMigrate() {
  4336  		t.Fatalf("bad: %v", alloc)
  4337  	}
  4338  
  4339  	alloc2 := Allocation{
  4340  		PreviousAllocation: "123",
  4341  		TaskGroup:          "foo",
  4342  		Job: &Job{
  4343  			TaskGroups: []*TaskGroup{
  4344  				{
  4345  					Name: "foo",
  4346  					EphemeralDisk: &EphemeralDisk{
  4347  						Sticky:  false,
  4348  						Migrate: true,
  4349  					},
  4350  				},
  4351  			},
  4352  		},
  4353  	}
  4354  
  4355  	if alloc2.ShouldMigrate() {
  4356  		t.Fatalf("bad: %v", alloc)
  4357  	}
  4358  
  4359  	alloc3 := Allocation{
  4360  		PreviousAllocation: "123",
  4361  		TaskGroup:          "foo",
  4362  		Job: &Job{
  4363  			TaskGroups: []*TaskGroup{
  4364  				{
  4365  					Name: "foo",
  4366  				},
  4367  			},
  4368  		},
  4369  	}
  4370  
  4371  	if alloc3.ShouldMigrate() {
  4372  		t.Fatalf("bad: %v", alloc)
  4373  	}
  4374  
  4375  	// No previous
  4376  	alloc4 := Allocation{
  4377  		TaskGroup: "foo",
  4378  		Job: &Job{
  4379  			TaskGroups: []*TaskGroup{
  4380  				{
  4381  					Name: "foo",
  4382  					EphemeralDisk: &EphemeralDisk{
  4383  						Migrate: true,
  4384  						Sticky:  true,
  4385  					},
  4386  				},
  4387  			},
  4388  		},
  4389  	}
  4390  
  4391  	if alloc4.ShouldMigrate() {
  4392  		t.Fatalf("bad: %v", alloc4)
  4393  	}
  4394  }
  4395  
  4396  func TestTaskArtifact_Validate_Checksum(t *testing.T) {
  4397  	ci.Parallel(t)
  4398  
  4399  	cases := []struct {
  4400  		Input *TaskArtifact
  4401  		Err   bool
  4402  	}{
  4403  		{
  4404  			&TaskArtifact{
  4405  				GetterSource: "foo.com",
  4406  				GetterOptions: map[string]string{
  4407  					"checksum": "no-type",
  4408  				},
  4409  			},
  4410  			true,
  4411  		},
  4412  		{
  4413  			&TaskArtifact{
  4414  				GetterSource: "foo.com",
  4415  				GetterOptions: map[string]string{
  4416  					"checksum": "md5:toosmall",
  4417  				},
  4418  			},
  4419  			true,
  4420  		},
  4421  		{
  4422  			&TaskArtifact{
  4423  				GetterSource: "foo.com",
  4424  				GetterOptions: map[string]string{
  4425  					"checksum": "invalid:type",
  4426  				},
  4427  			},
  4428  			true,
  4429  		},
  4430  		{
  4431  			&TaskArtifact{
  4432  				GetterSource: "foo.com",
  4433  				GetterOptions: map[string]string{
  4434  					"checksum": "md5:${ARTIFACT_CHECKSUM}",
  4435  				},
  4436  			},
  4437  			false,
  4438  		},
  4439  	}
  4440  
  4441  	for i, tc := range cases {
  4442  		err := tc.Input.Validate()
  4443  		if (err != nil) != tc.Err {
  4444  			t.Fatalf("case %d: %v", i, err)
  4445  		}
  4446  	}
  4447  }
  4448  
  4449  func TestPlan_NormalizeAllocations(t *testing.T) {
  4450  	ci.Parallel(t)
  4451  	plan := &Plan{
  4452  		NodeUpdate:      make(map[string][]*Allocation),
  4453  		NodePreemptions: make(map[string][]*Allocation),
  4454  	}
  4455  	stoppedAlloc := MockAlloc()
  4456  	desiredDesc := "Desired desc"
  4457  	plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost, "followup-eval-id")
  4458  	preemptedAlloc := MockAlloc()
  4459  	preemptingAllocID := uuid.Generate()
  4460  	plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID)
  4461  
  4462  	plan.NormalizeAllocations()
  4463  
  4464  	actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0]
  4465  	expectedStoppedAlloc := &Allocation{
  4466  		ID:                 stoppedAlloc.ID,
  4467  		DesiredDescription: desiredDesc,
  4468  		ClientStatus:       AllocClientStatusLost,
  4469  		FollowupEvalID:     "followup-eval-id",
  4470  	}
  4471  	assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc)
  4472  	actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0]
  4473  	expectedPreemptedAlloc := &Allocation{
  4474  		ID:                    preemptedAlloc.ID,
  4475  		PreemptedByAllocation: preemptingAllocID,
  4476  	}
  4477  	assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc)
  4478  }
  4479  
  4480  func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) {
  4481  	ci.Parallel(t)
  4482  	plan := &Plan{
  4483  		NodeUpdate: make(map[string][]*Allocation),
  4484  	}
  4485  	alloc := MockAlloc()
  4486  	desiredDesc := "Desired desc"
  4487  
  4488  	plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost, "")
  4489  
  4490  	expectedAlloc := new(Allocation)
  4491  	*expectedAlloc = *alloc
  4492  	expectedAlloc.DesiredDescription = desiredDesc
  4493  	expectedAlloc.DesiredStatus = AllocDesiredStatusStop
  4494  	expectedAlloc.ClientStatus = AllocClientStatusLost
  4495  	expectedAlloc.Job = nil
  4496  	expectedAlloc.AllocStates = []*AllocState{{
  4497  		Field: AllocStateFieldClientStatus,
  4498  		Value: "lost",
  4499  	}}
  4500  
  4501  	// This value is set to time.Now() in AppendStoppedAlloc, so clear it
  4502  	appendedAlloc := plan.NodeUpdate[alloc.NodeID][0]
  4503  	appendedAlloc.AllocStates[0].Time = time.Time{}
  4504  
  4505  	assert.Equal(t, expectedAlloc, appendedAlloc)
  4506  	assert.Equal(t, alloc.Job, plan.Job)
  4507  }
  4508  
  4509  func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) {
  4510  	ci.Parallel(t)
  4511  	plan := &Plan{
  4512  		NodePreemptions: make(map[string][]*Allocation),
  4513  	}
  4514  	alloc := MockAlloc()
  4515  	preemptingAllocID := uuid.Generate()
  4516  
  4517  	plan.AppendPreemptedAlloc(alloc, preemptingAllocID)
  4518  
  4519  	appendedAlloc := plan.NodePreemptions[alloc.NodeID][0]
  4520  	expectedAlloc := &Allocation{
  4521  		ID:                    alloc.ID,
  4522  		PreemptedByAllocation: preemptingAllocID,
  4523  		JobID:                 alloc.JobID,
  4524  		Namespace:             alloc.Namespace,
  4525  		DesiredStatus:         AllocDesiredStatusEvict,
  4526  		DesiredDescription:    fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID),
  4527  		AllocatedResources:    alloc.AllocatedResources,
  4528  		TaskResources:         alloc.TaskResources,
  4529  		SharedResources:       alloc.SharedResources,
  4530  	}
  4531  	assert.Equal(t, expectedAlloc, appendedAlloc)
  4532  }
  4533  
  4534  func TestAllocation_MsgPackTags(t *testing.T) {
  4535  	ci.Parallel(t)
  4536  	planType := reflect.TypeOf(Allocation{})
  4537  
  4538  	msgPackTags, _ := planType.FieldByName("_struct")
  4539  
  4540  	assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`))
  4541  }
  4542  
  4543  func TestEvaluation_MsgPackTags(t *testing.T) {
  4544  	ci.Parallel(t)
  4545  	planType := reflect.TypeOf(Evaluation{})
  4546  
  4547  	msgPackTags, _ := planType.FieldByName("_struct")
  4548  
  4549  	assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`))
  4550  }
  4551  
  4552  func TestAllocation_Terminated(t *testing.T) {
  4553  	ci.Parallel(t)
  4554  	type desiredState struct {
  4555  		ClientStatus  string
  4556  		DesiredStatus string
  4557  		Terminated    bool
  4558  	}
  4559  	harness := []desiredState{
  4560  		{
  4561  			ClientStatus:  AllocClientStatusPending,
  4562  			DesiredStatus: AllocDesiredStatusStop,
  4563  			Terminated:    false,
  4564  		},
  4565  		{
  4566  			ClientStatus:  AllocClientStatusRunning,
  4567  			DesiredStatus: AllocDesiredStatusStop,
  4568  			Terminated:    false,
  4569  		},
  4570  		{
  4571  			ClientStatus:  AllocClientStatusFailed,
  4572  			DesiredStatus: AllocDesiredStatusStop,
  4573  			Terminated:    true,
  4574  		},
  4575  		{
  4576  			ClientStatus:  AllocClientStatusFailed,
  4577  			DesiredStatus: AllocDesiredStatusRun,
  4578  			Terminated:    true,
  4579  		},
  4580  	}
  4581  
  4582  	for _, state := range harness {
  4583  		alloc := Allocation{}
  4584  		alloc.DesiredStatus = state.DesiredStatus
  4585  		alloc.ClientStatus = state.ClientStatus
  4586  		if alloc.Terminated() != state.Terminated {
  4587  			t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated())
  4588  		}
  4589  	}
  4590  }
  4591  
  4592  func TestAllocation_ShouldReschedule(t *testing.T) {
  4593  	ci.Parallel(t)
  4594  	type testCase struct {
  4595  		Desc               string
  4596  		FailTime           time.Time
  4597  		ClientStatus       string
  4598  		DesiredStatus      string
  4599  		ReschedulePolicy   *ReschedulePolicy
  4600  		RescheduleTrackers []*RescheduleEvent
  4601  		ShouldReschedule   bool
  4602  	}
  4603  	fail := time.Now()
  4604  
  4605  	harness := []testCase{
  4606  		{
  4607  			Desc:             "Reschedule when desired state is stop",
  4608  			ClientStatus:     AllocClientStatusPending,
  4609  			DesiredStatus:    AllocDesiredStatusStop,
  4610  			FailTime:         fail,
  4611  			ReschedulePolicy: nil,
  4612  			ShouldReschedule: false,
  4613  		},
  4614  		{
  4615  			Desc:             "Disabled rescheduling",
  4616  			ClientStatus:     AllocClientStatusFailed,
  4617  			DesiredStatus:    AllocDesiredStatusRun,
  4618  			FailTime:         fail,
  4619  			ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute},
  4620  			ShouldReschedule: false,
  4621  		},
  4622  		{
  4623  			Desc:             "Reschedule when client status is complete",
  4624  			ClientStatus:     AllocClientStatusComplete,
  4625  			DesiredStatus:    AllocDesiredStatusRun,
  4626  			FailTime:         fail,
  4627  			ReschedulePolicy: nil,
  4628  			ShouldReschedule: false,
  4629  		},
  4630  		{
  4631  			Desc:             "Reschedule with nil reschedule policy",
  4632  			ClientStatus:     AllocClientStatusFailed,
  4633  			DesiredStatus:    AllocDesiredStatusRun,
  4634  			FailTime:         fail,
  4635  			ReschedulePolicy: nil,
  4636  			ShouldReschedule: false,
  4637  		},
  4638  		{
  4639  			Desc:             "Reschedule with unlimited and attempts >0",
  4640  			ClientStatus:     AllocClientStatusFailed,
  4641  			DesiredStatus:    AllocDesiredStatusRun,
  4642  			FailTime:         fail,
  4643  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true},
  4644  			ShouldReschedule: true,
  4645  		},
  4646  		{
  4647  			Desc:             "Reschedule when client status is complete",
  4648  			ClientStatus:     AllocClientStatusComplete,
  4649  			DesiredStatus:    AllocDesiredStatusRun,
  4650  			FailTime:         fail,
  4651  			ReschedulePolicy: nil,
  4652  			ShouldReschedule: false,
  4653  		},
  4654  		{
  4655  			Desc:             "Reschedule with policy when client status complete",
  4656  			ClientStatus:     AllocClientStatusComplete,
  4657  			DesiredStatus:    AllocDesiredStatusRun,
  4658  			FailTime:         fail,
  4659  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  4660  			ShouldReschedule: false,
  4661  		},
  4662  		{
  4663  			Desc:             "Reschedule with no previous attempts",
  4664  			ClientStatus:     AllocClientStatusFailed,
  4665  			DesiredStatus:    AllocDesiredStatusRun,
  4666  			FailTime:         fail,
  4667  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  4668  			ShouldReschedule: true,
  4669  		},
  4670  		{
  4671  			Desc:             "Reschedule with leftover attempts",
  4672  			ClientStatus:     AllocClientStatusFailed,
  4673  			DesiredStatus:    AllocDesiredStatusRun,
  4674  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  4675  			FailTime:         fail,
  4676  			RescheduleTrackers: []*RescheduleEvent{
  4677  				{
  4678  					RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(),
  4679  				},
  4680  			},
  4681  			ShouldReschedule: true,
  4682  		},
  4683  		{
  4684  			Desc:             "Reschedule with too old previous attempts",
  4685  			ClientStatus:     AllocClientStatusFailed,
  4686  			DesiredStatus:    AllocDesiredStatusRun,
  4687  			FailTime:         fail,
  4688  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute},
  4689  			RescheduleTrackers: []*RescheduleEvent{
  4690  				{
  4691  					RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(),
  4692  				},
  4693  			},
  4694  			ShouldReschedule: true,
  4695  		},
  4696  		{
  4697  			Desc:             "Reschedule with no leftover attempts",
  4698  			ClientStatus:     AllocClientStatusFailed,
  4699  			DesiredStatus:    AllocDesiredStatusRun,
  4700  			FailTime:         fail,
  4701  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  4702  			RescheduleTrackers: []*RescheduleEvent{
  4703  				{
  4704  					RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
  4705  				},
  4706  				{
  4707  					RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(),
  4708  				},
  4709  			},
  4710  			ShouldReschedule: false,
  4711  		},
  4712  	}
  4713  
  4714  	for _, state := range harness {
  4715  		alloc := Allocation{}
  4716  		alloc.DesiredStatus = state.DesiredStatus
  4717  		alloc.ClientStatus = state.ClientStatus
  4718  		alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers}
  4719  
  4720  		t.Run(state.Desc, func(t *testing.T) {
  4721  			if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule {
  4722  				t.Fatalf("expected %v but got %v", state.ShouldReschedule, got)
  4723  			}
  4724  		})
  4725  
  4726  	}
  4727  }
  4728  
  4729  func TestAllocation_LastEventTime(t *testing.T) {
  4730  	ci.Parallel(t)
  4731  	type testCase struct {
  4732  		desc                  string
  4733  		taskState             map[string]*TaskState
  4734  		expectedLastEventTime time.Time
  4735  	}
  4736  	t1 := time.Now().UTC()
  4737  
  4738  	testCases := []testCase{
  4739  		{
  4740  			desc:                  "nil task state",
  4741  			expectedLastEventTime: t1,
  4742  		},
  4743  		{
  4744  			desc:                  "empty task state",
  4745  			taskState:             make(map[string]*TaskState),
  4746  			expectedLastEventTime: t1,
  4747  		},
  4748  		{
  4749  			desc: "Finished At not set",
  4750  			taskState: map[string]*TaskState{"foo": {State: "start",
  4751  				StartedAt: t1.Add(-2 * time.Hour)}},
  4752  			expectedLastEventTime: t1,
  4753  		},
  4754  		{
  4755  			desc: "One finished ",
  4756  			taskState: map[string]*TaskState{"foo": {State: "start",
  4757  				StartedAt:  t1.Add(-2 * time.Hour),
  4758  				FinishedAt: t1.Add(-1 * time.Hour)}},
  4759  			expectedLastEventTime: t1.Add(-1 * time.Hour),
  4760  		},
  4761  		{
  4762  			desc: "Multiple task groups",
  4763  			taskState: map[string]*TaskState{"foo": {State: "start",
  4764  				StartedAt:  t1.Add(-2 * time.Hour),
  4765  				FinishedAt: t1.Add(-1 * time.Hour)},
  4766  				"bar": {State: "start",
  4767  					StartedAt:  t1.Add(-2 * time.Hour),
  4768  					FinishedAt: t1.Add(-40 * time.Minute)}},
  4769  			expectedLastEventTime: t1.Add(-40 * time.Minute),
  4770  		},
  4771  		{
  4772  			desc: "No finishedAt set, one task event, should use modify time",
  4773  			taskState: map[string]*TaskState{"foo": {
  4774  				State:     "run",
  4775  				StartedAt: t1.Add(-2 * time.Hour),
  4776  				Events: []*TaskEvent{
  4777  					{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
  4778  				}},
  4779  			},
  4780  			expectedLastEventTime: t1,
  4781  		},
  4782  	}
  4783  	for _, tc := range testCases {
  4784  		t.Run(tc.desc, func(t *testing.T) {
  4785  			alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()}
  4786  			alloc.TaskStates = tc.taskState
  4787  			require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime())
  4788  		})
  4789  	}
  4790  }
  4791  
  4792  func TestAllocation_NextDelay(t *testing.T) {
  4793  	ci.Parallel(t)
  4794  	type testCase struct {
  4795  		desc                       string
  4796  		reschedulePolicy           *ReschedulePolicy
  4797  		alloc                      *Allocation
  4798  		expectedRescheduleTime     time.Time
  4799  		expectedRescheduleEligible bool
  4800  	}
  4801  	now := time.Now()
  4802  	testCases := []testCase{
  4803  		{
  4804  			desc: "Allocation hasn't failed yet",
  4805  			reschedulePolicy: &ReschedulePolicy{
  4806  				DelayFunction: "constant",
  4807  				Delay:         5 * time.Second,
  4808  			},
  4809  			alloc:                      &Allocation{},
  4810  			expectedRescheduleTime:     time.Time{},
  4811  			expectedRescheduleEligible: false,
  4812  		},
  4813  		{
  4814  			desc:                       "Allocation has no reschedule policy",
  4815  			alloc:                      &Allocation{},
  4816  			expectedRescheduleTime:     time.Time{},
  4817  			expectedRescheduleEligible: false,
  4818  		},
  4819  		{
  4820  			desc: "Allocation lacks task state",
  4821  			reschedulePolicy: &ReschedulePolicy{
  4822  				DelayFunction: "constant",
  4823  				Delay:         5 * time.Second,
  4824  				Unlimited:     true,
  4825  			},
  4826  			alloc:                      &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
  4827  			expectedRescheduleTime:     now.UTC().Add(5 * time.Second),
  4828  			expectedRescheduleEligible: true,
  4829  		},
  4830  		{
  4831  			desc: "linear delay, unlimited restarts, no reschedule tracker",
  4832  			reschedulePolicy: &ReschedulePolicy{
  4833  				DelayFunction: "constant",
  4834  				Delay:         5 * time.Second,
  4835  				Unlimited:     true,
  4836  			},
  4837  			alloc: &Allocation{
  4838  				ClientStatus: AllocClientStatusFailed,
  4839  				TaskStates: map[string]*TaskState{"foo": {State: "dead",
  4840  					StartedAt:  now.Add(-1 * time.Hour),
  4841  					FinishedAt: now.Add(-2 * time.Second)}},
  4842  			},
  4843  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  4844  			expectedRescheduleEligible: true,
  4845  		},
  4846  		{
  4847  			desc: "linear delay with reschedule tracker",
  4848  			reschedulePolicy: &ReschedulePolicy{
  4849  				DelayFunction: "constant",
  4850  				Delay:         5 * time.Second,
  4851  				Interval:      10 * time.Minute,
  4852  				Attempts:      2,
  4853  			},
  4854  			alloc: &Allocation{
  4855  				ClientStatus: AllocClientStatusFailed,
  4856  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  4857  					StartedAt:  now.Add(-1 * time.Hour),
  4858  					FinishedAt: now.Add(-2 * time.Second)}},
  4859  				RescheduleTracker: &RescheduleTracker{
  4860  					Events: []*RescheduleEvent{{
  4861  						RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  4862  						Delay:          5 * time.Second,
  4863  					}},
  4864  				}},
  4865  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  4866  			expectedRescheduleEligible: true,
  4867  		},
  4868  		{
  4869  			desc: "linear delay with reschedule tracker, attempts exhausted",
  4870  			reschedulePolicy: &ReschedulePolicy{
  4871  				DelayFunction: "constant",
  4872  				Delay:         5 * time.Second,
  4873  				Interval:      10 * time.Minute,
  4874  				Attempts:      2,
  4875  			},
  4876  			alloc: &Allocation{
  4877  				ClientStatus: AllocClientStatusFailed,
  4878  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  4879  					StartedAt:  now.Add(-1 * time.Hour),
  4880  					FinishedAt: now.Add(-2 * time.Second)}},
  4881  				RescheduleTracker: &RescheduleTracker{
  4882  					Events: []*RescheduleEvent{
  4883  						{
  4884  							RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
  4885  							Delay:          5 * time.Second,
  4886  						},
  4887  						{
  4888  							RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  4889  							Delay:          5 * time.Second,
  4890  						},
  4891  					},
  4892  				}},
  4893  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  4894  			expectedRescheduleEligible: false,
  4895  		},
  4896  		{
  4897  			desc: "exponential delay - no reschedule tracker",
  4898  			reschedulePolicy: &ReschedulePolicy{
  4899  				DelayFunction: "exponential",
  4900  				Delay:         5 * time.Second,
  4901  				MaxDelay:      90 * time.Second,
  4902  				Unlimited:     true,
  4903  			},
  4904  			alloc: &Allocation{
  4905  				ClientStatus: AllocClientStatusFailed,
  4906  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  4907  					StartedAt:  now.Add(-1 * time.Hour),
  4908  					FinishedAt: now.Add(-2 * time.Second)}},
  4909  			},
  4910  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  4911  			expectedRescheduleEligible: true,
  4912  		},
  4913  		{
  4914  			desc: "exponential delay with reschedule tracker",
  4915  			reschedulePolicy: &ReschedulePolicy{
  4916  				DelayFunction: "exponential",
  4917  				Delay:         5 * time.Second,
  4918  				MaxDelay:      90 * time.Second,
  4919  				Unlimited:     true,
  4920  			},
  4921  			alloc: &Allocation{
  4922  				ClientStatus: AllocClientStatusFailed,
  4923  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  4924  					StartedAt:  now.Add(-1 * time.Hour),
  4925  					FinishedAt: now.Add(-2 * time.Second)}},
  4926  				RescheduleTracker: &RescheduleTracker{
  4927  					Events: []*RescheduleEvent{
  4928  						{
  4929  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  4930  							Delay:          5 * time.Second,
  4931  						},
  4932  						{
  4933  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  4934  							Delay:          10 * time.Second,
  4935  						},
  4936  						{
  4937  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  4938  							Delay:          20 * time.Second,
  4939  						},
  4940  					},
  4941  				}},
  4942  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  4943  			expectedRescheduleEligible: true,
  4944  		},
  4945  		{
  4946  			desc: "exponential delay with delay ceiling reached",
  4947  			reschedulePolicy: &ReschedulePolicy{
  4948  				DelayFunction: "exponential",
  4949  				Delay:         5 * time.Second,
  4950  				MaxDelay:      90 * time.Second,
  4951  				Unlimited:     true,
  4952  			},
  4953  			alloc: &Allocation{
  4954  				ClientStatus: AllocClientStatusFailed,
  4955  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  4956  					StartedAt:  now.Add(-1 * time.Hour),
  4957  					FinishedAt: now.Add(-15 * time.Second)}},
  4958  				RescheduleTracker: &RescheduleTracker{
  4959  					Events: []*RescheduleEvent{
  4960  						{
  4961  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  4962  							Delay:          5 * time.Second,
  4963  						},
  4964  						{
  4965  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  4966  							Delay:          10 * time.Second,
  4967  						},
  4968  						{
  4969  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  4970  							Delay:          20 * time.Second,
  4971  						},
  4972  						{
  4973  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  4974  							Delay:          40 * time.Second,
  4975  						},
  4976  						{
  4977  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  4978  							Delay:          80 * time.Second,
  4979  						},
  4980  					},
  4981  				}},
  4982  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(90 * time.Second),
  4983  			expectedRescheduleEligible: true,
  4984  		},
  4985  		{
  4986  			// Test case where most recent reschedule ran longer than delay ceiling
  4987  			desc: "exponential delay, delay ceiling reset condition met",
  4988  			reschedulePolicy: &ReschedulePolicy{
  4989  				DelayFunction: "exponential",
  4990  				Delay:         5 * time.Second,
  4991  				MaxDelay:      90 * time.Second,
  4992  				Unlimited:     true,
  4993  			},
  4994  			alloc: &Allocation{
  4995  				ClientStatus: AllocClientStatusFailed,
  4996  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  4997  					StartedAt:  now.Add(-1 * time.Hour),
  4998  					FinishedAt: now.Add(-15 * time.Minute)}},
  4999  				RescheduleTracker: &RescheduleTracker{
  5000  					Events: []*RescheduleEvent{
  5001  						{
  5002  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5003  							Delay:          5 * time.Second,
  5004  						},
  5005  						{
  5006  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5007  							Delay:          10 * time.Second,
  5008  						},
  5009  						{
  5010  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5011  							Delay:          20 * time.Second,
  5012  						},
  5013  						{
  5014  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5015  							Delay:          40 * time.Second,
  5016  						},
  5017  						{
  5018  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5019  							Delay:          80 * time.Second,
  5020  						},
  5021  						{
  5022  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5023  							Delay:          90 * time.Second,
  5024  						},
  5025  						{
  5026  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5027  							Delay:          90 * time.Second,
  5028  						},
  5029  					},
  5030  				}},
  5031  			expectedRescheduleTime:     now.Add(-15 * time.Minute).Add(5 * time.Second),
  5032  			expectedRescheduleEligible: true,
  5033  		},
  5034  		{
  5035  			desc: "fibonacci delay - no reschedule tracker",
  5036  			reschedulePolicy: &ReschedulePolicy{
  5037  				DelayFunction: "fibonacci",
  5038  				Delay:         5 * time.Second,
  5039  				MaxDelay:      90 * time.Second,
  5040  				Unlimited:     true,
  5041  			},
  5042  			alloc: &Allocation{
  5043  				ClientStatus: AllocClientStatusFailed,
  5044  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5045  					StartedAt:  now.Add(-1 * time.Hour),
  5046  					FinishedAt: now.Add(-2 * time.Second)}}},
  5047  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  5048  			expectedRescheduleEligible: true,
  5049  		},
  5050  		{
  5051  			desc: "fibonacci delay with reschedule tracker",
  5052  			reschedulePolicy: &ReschedulePolicy{
  5053  				DelayFunction: "fibonacci",
  5054  				Delay:         5 * time.Second,
  5055  				MaxDelay:      90 * time.Second,
  5056  				Unlimited:     true,
  5057  			},
  5058  			alloc: &Allocation{
  5059  				ClientStatus: AllocClientStatusFailed,
  5060  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5061  					StartedAt:  now.Add(-1 * time.Hour),
  5062  					FinishedAt: now.Add(-2 * time.Second)}},
  5063  				RescheduleTracker: &RescheduleTracker{
  5064  					Events: []*RescheduleEvent{
  5065  						{
  5066  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5067  							Delay:          5 * time.Second,
  5068  						},
  5069  						{
  5070  							RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(),
  5071  							Delay:          5 * time.Second,
  5072  						},
  5073  					},
  5074  				}},
  5075  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(10 * time.Second),
  5076  			expectedRescheduleEligible: true,
  5077  		},
  5078  		{
  5079  			desc: "fibonacci delay with more events",
  5080  			reschedulePolicy: &ReschedulePolicy{
  5081  				DelayFunction: "fibonacci",
  5082  				Delay:         5 * time.Second,
  5083  				MaxDelay:      90 * time.Second,
  5084  				Unlimited:     true,
  5085  			},
  5086  			alloc: &Allocation{
  5087  				ClientStatus: AllocClientStatusFailed,
  5088  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5089  					StartedAt:  now.Add(-1 * time.Hour),
  5090  					FinishedAt: now.Add(-2 * time.Second)}},
  5091  				RescheduleTracker: &RescheduleTracker{
  5092  					Events: []*RescheduleEvent{
  5093  						{
  5094  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5095  							Delay:          5 * time.Second,
  5096  						},
  5097  						{
  5098  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5099  							Delay:          5 * time.Second,
  5100  						},
  5101  						{
  5102  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5103  							Delay:          10 * time.Second,
  5104  						},
  5105  						{
  5106  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5107  							Delay:          15 * time.Second,
  5108  						},
  5109  						{
  5110  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5111  							Delay:          25 * time.Second,
  5112  						},
  5113  					},
  5114  				}},
  5115  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  5116  			expectedRescheduleEligible: true,
  5117  		},
  5118  		{
  5119  			desc: "fibonacci delay with delay ceiling reached",
  5120  			reschedulePolicy: &ReschedulePolicy{
  5121  				DelayFunction: "fibonacci",
  5122  				Delay:         5 * time.Second,
  5123  				MaxDelay:      50 * time.Second,
  5124  				Unlimited:     true,
  5125  			},
  5126  			alloc: &Allocation{
  5127  				ClientStatus: AllocClientStatusFailed,
  5128  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5129  					StartedAt:  now.Add(-1 * time.Hour),
  5130  					FinishedAt: now.Add(-15 * time.Second)}},
  5131  				RescheduleTracker: &RescheduleTracker{
  5132  					Events: []*RescheduleEvent{
  5133  						{
  5134  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5135  							Delay:          5 * time.Second,
  5136  						},
  5137  						{
  5138  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5139  							Delay:          5 * time.Second,
  5140  						},
  5141  						{
  5142  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5143  							Delay:          10 * time.Second,
  5144  						},
  5145  						{
  5146  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5147  							Delay:          15 * time.Second,
  5148  						},
  5149  						{
  5150  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5151  							Delay:          25 * time.Second,
  5152  						},
  5153  						{
  5154  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  5155  							Delay:          40 * time.Second,
  5156  						},
  5157  					},
  5158  				}},
  5159  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(50 * time.Second),
  5160  			expectedRescheduleEligible: true,
  5161  		},
  5162  		{
  5163  			desc: "fibonacci delay with delay reset condition met",
  5164  			reschedulePolicy: &ReschedulePolicy{
  5165  				DelayFunction: "fibonacci",
  5166  				Delay:         5 * time.Second,
  5167  				MaxDelay:      50 * time.Second,
  5168  				Unlimited:     true,
  5169  			},
  5170  			alloc: &Allocation{
  5171  				ClientStatus: AllocClientStatusFailed,
  5172  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5173  					StartedAt:  now.Add(-1 * time.Hour),
  5174  					FinishedAt: now.Add(-5 * time.Minute)}},
  5175  				RescheduleTracker: &RescheduleTracker{
  5176  					Events: []*RescheduleEvent{
  5177  						{
  5178  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5179  							Delay:          5 * time.Second,
  5180  						},
  5181  						{
  5182  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5183  							Delay:          5 * time.Second,
  5184  						},
  5185  						{
  5186  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5187  							Delay:          10 * time.Second,
  5188  						},
  5189  						{
  5190  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5191  							Delay:          15 * time.Second,
  5192  						},
  5193  						{
  5194  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5195  							Delay:          25 * time.Second,
  5196  						},
  5197  						{
  5198  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5199  							Delay:          40 * time.Second,
  5200  						},
  5201  					},
  5202  				}},
  5203  			expectedRescheduleTime:     now.Add(-5 * time.Minute).Add(5 * time.Second),
  5204  			expectedRescheduleEligible: true,
  5205  		},
  5206  		{
  5207  			desc: "fibonacci delay with the most recent event that reset delay value",
  5208  			reschedulePolicy: &ReschedulePolicy{
  5209  				DelayFunction: "fibonacci",
  5210  				Delay:         5 * time.Second,
  5211  				MaxDelay:      50 * time.Second,
  5212  				Unlimited:     true,
  5213  			},
  5214  			alloc: &Allocation{
  5215  				ClientStatus: AllocClientStatusFailed,
  5216  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5217  					StartedAt:  now.Add(-1 * time.Hour),
  5218  					FinishedAt: now.Add(-5 * time.Second)}},
  5219  				RescheduleTracker: &RescheduleTracker{
  5220  					Events: []*RescheduleEvent{
  5221  						{
  5222  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5223  							Delay:          5 * time.Second,
  5224  						},
  5225  						{
  5226  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5227  							Delay:          5 * time.Second,
  5228  						},
  5229  						{
  5230  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5231  							Delay:          10 * time.Second,
  5232  						},
  5233  						{
  5234  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5235  							Delay:          15 * time.Second,
  5236  						},
  5237  						{
  5238  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5239  							Delay:          25 * time.Second,
  5240  						},
  5241  						{
  5242  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5243  							Delay:          40 * time.Second,
  5244  						},
  5245  						{
  5246  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5247  							Delay:          50 * time.Second,
  5248  						},
  5249  						{
  5250  							RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(),
  5251  							Delay:          5 * time.Second,
  5252  						},
  5253  					},
  5254  				}},
  5255  			expectedRescheduleTime:     now.Add(-5 * time.Second).Add(5 * time.Second),
  5256  			expectedRescheduleEligible: true,
  5257  		},
  5258  	}
  5259  	for _, tc := range testCases {
  5260  		t.Run(tc.desc, func(t *testing.T) {
  5261  			require := require.New(t)
  5262  			j := testJob()
  5263  			if tc.reschedulePolicy != nil {
  5264  				j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy
  5265  			}
  5266  			tc.alloc.Job = j
  5267  			tc.alloc.TaskGroup = j.TaskGroups[0].Name
  5268  			reschedTime, allowed := tc.alloc.NextRescheduleTime()
  5269  			require.Equal(tc.expectedRescheduleEligible, allowed)
  5270  			require.Equal(tc.expectedRescheduleTime, reschedTime)
  5271  		})
  5272  	}
  5273  
  5274  }
  5275  
  5276  func TestAllocation_WaitClientStop(t *testing.T) {
  5277  	ci.Parallel(t)
  5278  	type testCase struct {
  5279  		desc                   string
  5280  		stop                   time.Duration
  5281  		status                 string
  5282  		expectedShould         bool
  5283  		expectedRescheduleTime time.Time
  5284  	}
  5285  	now := time.Now().UTC()
  5286  	testCases := []testCase{
  5287  		{
  5288  			desc:           "running",
  5289  			stop:           2 * time.Second,
  5290  			status:         AllocClientStatusRunning,
  5291  			expectedShould: true,
  5292  		},
  5293  		{
  5294  			desc:           "no stop_after_client_disconnect",
  5295  			status:         AllocClientStatusLost,
  5296  			expectedShould: false,
  5297  		},
  5298  		{
  5299  			desc:                   "stop",
  5300  			status:                 AllocClientStatusLost,
  5301  			stop:                   2 * time.Second,
  5302  			expectedShould:         true,
  5303  			expectedRescheduleTime: now.Add((2 + 5) * time.Second),
  5304  		},
  5305  	}
  5306  	for _, tc := range testCases {
  5307  		t.Run(tc.desc, func(t *testing.T) {
  5308  			j := testJob()
  5309  			a := &Allocation{
  5310  				ClientStatus: tc.status,
  5311  				Job:          j,
  5312  				TaskStates:   map[string]*TaskState{},
  5313  			}
  5314  
  5315  			if tc.status == AllocClientStatusLost {
  5316  				a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost)
  5317  			}
  5318  
  5319  			j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop
  5320  			a.TaskGroup = j.TaskGroups[0].Name
  5321  
  5322  			require.Equal(t, tc.expectedShould, a.ShouldClientStop())
  5323  
  5324  			if !tc.expectedShould || tc.status != AllocClientStatusLost {
  5325  				return
  5326  			}
  5327  
  5328  			// the reschedTime is close to the expectedRescheduleTime
  5329  			reschedTime := a.WaitClientStop()
  5330  			e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix()
  5331  			require.Less(t, e, int64(2))
  5332  		})
  5333  	}
  5334  }
  5335  
  5336  func TestAllocation_DisconnectTimeout(t *testing.T) {
  5337  	type testCase struct {
  5338  		desc          string
  5339  		maxDisconnect *time.Duration
  5340  	}
  5341  
  5342  	testCases := []testCase{
  5343  		{
  5344  			desc:          "no max_client_disconnect",
  5345  			maxDisconnect: nil,
  5346  		},
  5347  		{
  5348  			desc:          "has max_client_disconnect",
  5349  			maxDisconnect: pointer.Of(30 * time.Second),
  5350  		},
  5351  		{
  5352  			desc:          "zero max_client_disconnect",
  5353  			maxDisconnect: pointer.Of(0 * time.Second),
  5354  		},
  5355  	}
  5356  	for _, tc := range testCases {
  5357  		t.Run(tc.desc, func(t *testing.T) {
  5358  			j := testJob()
  5359  			a := &Allocation{
  5360  				Job: j,
  5361  			}
  5362  
  5363  			j.TaskGroups[0].MaxClientDisconnect = tc.maxDisconnect
  5364  			a.TaskGroup = j.TaskGroups[0].Name
  5365  
  5366  			now := time.Now()
  5367  
  5368  			reschedTime := a.DisconnectTimeout(now)
  5369  
  5370  			if tc.maxDisconnect == nil {
  5371  				require.Equal(t, now, reschedTime, "expected to be now")
  5372  			} else {
  5373  				difference := reschedTime.Sub(now)
  5374  				require.Equal(t, *tc.maxDisconnect, difference, "expected durations to be equal")
  5375  			}
  5376  
  5377  		})
  5378  	}
  5379  }
  5380  
  5381  func TestAllocation_Expired(t *testing.T) {
  5382  	type testCase struct {
  5383  		name             string
  5384  		maxDisconnect    string
  5385  		ellapsed         int
  5386  		expected         bool
  5387  		nilJob           bool
  5388  		badTaskGroup     bool
  5389  		mixedUTC         bool
  5390  		noReconnectEvent bool
  5391  		status           string
  5392  	}
  5393  
  5394  	testCases := []testCase{
  5395  		{
  5396  			name:          "has-expired",
  5397  			maxDisconnect: "5s",
  5398  			ellapsed:      10,
  5399  			expected:      true,
  5400  		},
  5401  		{
  5402  			name:          "has-not-expired",
  5403  			maxDisconnect: "5s",
  5404  			ellapsed:      3,
  5405  			expected:      false,
  5406  		},
  5407  		{
  5408  			name:          "are-equal",
  5409  			maxDisconnect: "5s",
  5410  			ellapsed:      5,
  5411  			expected:      true,
  5412  		},
  5413  		{
  5414  			name:          "nil-job",
  5415  			maxDisconnect: "5s",
  5416  			ellapsed:      10,
  5417  			expected:      false,
  5418  			nilJob:        true,
  5419  		},
  5420  		{
  5421  			name:          "wrong-status",
  5422  			maxDisconnect: "5s",
  5423  			ellapsed:      10,
  5424  			expected:      false,
  5425  			status:        AllocClientStatusRunning,
  5426  		},
  5427  		{
  5428  			name:          "bad-task-group",
  5429  			maxDisconnect: "",
  5430  			badTaskGroup:  true,
  5431  			ellapsed:      10,
  5432  			expected:      false,
  5433  		},
  5434  		{
  5435  			name:          "no-max-disconnect",
  5436  			maxDisconnect: "",
  5437  			ellapsed:      10,
  5438  			expected:      false,
  5439  		},
  5440  		{
  5441  			name:          "mixed-utc-has-expired",
  5442  			maxDisconnect: "5s",
  5443  			ellapsed:      10,
  5444  			mixedUTC:      true,
  5445  			expected:      true,
  5446  		},
  5447  		{
  5448  			name:          "mixed-utc-has-not-expired",
  5449  			maxDisconnect: "5s",
  5450  			ellapsed:      3,
  5451  			mixedUTC:      true,
  5452  			expected:      false,
  5453  		},
  5454  		{
  5455  			name:             "no-reconnect-event",
  5456  			maxDisconnect:    "5s",
  5457  			ellapsed:         2,
  5458  			expected:         false,
  5459  			noReconnectEvent: true,
  5460  		},
  5461  	}
  5462  	for _, tc := range testCases {
  5463  		t.Run(tc.name, func(t *testing.T) {
  5464  			alloc := MockAlloc()
  5465  			var err error
  5466  			var maxDisconnect time.Duration
  5467  
  5468  			if tc.maxDisconnect != "" {
  5469  				maxDisconnect, err = time.ParseDuration(tc.maxDisconnect)
  5470  				require.NoError(t, err)
  5471  				alloc.Job.TaskGroups[0].MaxClientDisconnect = &maxDisconnect
  5472  			}
  5473  
  5474  			if tc.nilJob {
  5475  				alloc.Job = nil
  5476  			}
  5477  
  5478  			if tc.badTaskGroup {
  5479  				alloc.TaskGroup = "bad"
  5480  			}
  5481  
  5482  			alloc.ClientStatus = AllocClientStatusUnknown
  5483  			if tc.status != "" {
  5484  				alloc.ClientStatus = tc.status
  5485  			}
  5486  
  5487  			alloc.AllocStates = []*AllocState{{
  5488  				Field: AllocStateFieldClientStatus,
  5489  				Value: AllocClientStatusUnknown,
  5490  				Time:  time.Now(),
  5491  			}}
  5492  
  5493  			require.NoError(t, err)
  5494  			now := time.Now().UTC()
  5495  			if tc.mixedUTC {
  5496  				now = time.Now()
  5497  			}
  5498  
  5499  			if !tc.noReconnectEvent {
  5500  				event := NewTaskEvent(TaskClientReconnected)
  5501  				event.Time = now.UnixNano()
  5502  
  5503  				alloc.TaskStates = map[string]*TaskState{
  5504  					"web": {
  5505  						Events: []*TaskEvent{event},
  5506  					},
  5507  				}
  5508  			}
  5509  
  5510  			ellapsedDuration := time.Duration(tc.ellapsed) * time.Second
  5511  			now = now.Add(ellapsedDuration)
  5512  
  5513  			require.Equal(t, tc.expected, alloc.Expired(now))
  5514  		})
  5515  	}
  5516  }
  5517  
  5518  func TestAllocation_NeedsToReconnect(t *testing.T) {
  5519  	ci.Parallel(t)
  5520  
  5521  	testCases := []struct {
  5522  		name     string
  5523  		states   []*AllocState
  5524  		expected bool
  5525  	}{
  5526  		{
  5527  			name:     "no state",
  5528  			expected: false,
  5529  		},
  5530  		{
  5531  			name:     "never disconnected",
  5532  			states:   []*AllocState{},
  5533  			expected: false,
  5534  		},
  5535  		{
  5536  			name: "disconnected once",
  5537  			states: []*AllocState{
  5538  				{
  5539  					Field: AllocStateFieldClientStatus,
  5540  					Value: AllocClientStatusUnknown,
  5541  					Time:  time.Now(),
  5542  				},
  5543  			},
  5544  			expected: true,
  5545  		},
  5546  		{
  5547  			name: "disconnect reconnect disconnect",
  5548  			states: []*AllocState{
  5549  				{
  5550  					Field: AllocStateFieldClientStatus,
  5551  					Value: AllocClientStatusUnknown,
  5552  					Time:  time.Now().Add(-2 * time.Minute),
  5553  				},
  5554  				{
  5555  					Field: AllocStateFieldClientStatus,
  5556  					Value: AllocClientStatusRunning,
  5557  					Time:  time.Now().Add(-1 * time.Minute),
  5558  				},
  5559  				{
  5560  					Field: AllocStateFieldClientStatus,
  5561  					Value: AllocClientStatusUnknown,
  5562  					Time:  time.Now(),
  5563  				},
  5564  			},
  5565  			expected: true,
  5566  		},
  5567  		{
  5568  			name: "disconnect multiple times before reconnect",
  5569  			states: []*AllocState{
  5570  				{
  5571  					Field: AllocStateFieldClientStatus,
  5572  					Value: AllocClientStatusUnknown,
  5573  					Time:  time.Now().Add(-2 * time.Minute),
  5574  				},
  5575  				{
  5576  					Field: AllocStateFieldClientStatus,
  5577  					Value: AllocClientStatusUnknown,
  5578  					Time:  time.Now().Add(-1 * time.Minute),
  5579  				},
  5580  				{
  5581  					Field: AllocStateFieldClientStatus,
  5582  					Value: AllocClientStatusRunning,
  5583  					Time:  time.Now(),
  5584  				},
  5585  			},
  5586  			expected: false,
  5587  		},
  5588  		{
  5589  			name: "disconnect after multiple updates",
  5590  			states: []*AllocState{
  5591  				{
  5592  					Field: AllocStateFieldClientStatus,
  5593  					Value: AllocClientStatusPending,
  5594  					Time:  time.Now().Add(-2 * time.Minute),
  5595  				},
  5596  				{
  5597  					Field: AllocStateFieldClientStatus,
  5598  					Value: AllocClientStatusRunning,
  5599  					Time:  time.Now().Add(-1 * time.Minute),
  5600  				},
  5601  				{
  5602  					Field: AllocStateFieldClientStatus,
  5603  					Value: AllocClientStatusUnknown,
  5604  					Time:  time.Now(),
  5605  				},
  5606  			},
  5607  			expected: true,
  5608  		},
  5609  	}
  5610  
  5611  	for _, tc := range testCases {
  5612  		t.Run(tc.name, func(t *testing.T) {
  5613  			alloc := MockAlloc()
  5614  			alloc.AllocStates = tc.states
  5615  
  5616  			got := alloc.NeedsToReconnect()
  5617  			require.Equal(t, tc.expected, got)
  5618  		})
  5619  	}
  5620  }
  5621  
  5622  func TestAllocation_Canonicalize_Old(t *testing.T) {
  5623  	ci.Parallel(t)
  5624  
  5625  	alloc := MockAlloc()
  5626  	alloc.AllocatedResources = nil
  5627  	alloc.TaskResources = map[string]*Resources{
  5628  		"web": {
  5629  			CPU:      500,
  5630  			MemoryMB: 256,
  5631  			Networks: []*NetworkResource{
  5632  				{
  5633  					Device:        "eth0",
  5634  					IP:            "192.168.0.100",
  5635  					ReservedPorts: []Port{{Label: "admin", Value: 5000}},
  5636  					MBits:         50,
  5637  					DynamicPorts:  []Port{{Label: "http", Value: 9876}},
  5638  				},
  5639  			},
  5640  		},
  5641  	}
  5642  	alloc.SharedResources = &Resources{
  5643  		DiskMB: 150,
  5644  	}
  5645  	alloc.Canonicalize()
  5646  
  5647  	expected := &AllocatedResources{
  5648  		Tasks: map[string]*AllocatedTaskResources{
  5649  			"web": {
  5650  				Cpu: AllocatedCpuResources{
  5651  					CpuShares: 500,
  5652  				},
  5653  				Memory: AllocatedMemoryResources{
  5654  					MemoryMB: 256,
  5655  				},
  5656  				Networks: []*NetworkResource{
  5657  					{
  5658  						Device:        "eth0",
  5659  						IP:            "192.168.0.100",
  5660  						ReservedPorts: []Port{{Label: "admin", Value: 5000}},
  5661  						MBits:         50,
  5662  						DynamicPorts:  []Port{{Label: "http", Value: 9876}},
  5663  					},
  5664  				},
  5665  			},
  5666  		},
  5667  		Shared: AllocatedSharedResources{
  5668  			DiskMB: 150,
  5669  		},
  5670  	}
  5671  
  5672  	require.Equal(t, expected, alloc.AllocatedResources)
  5673  }
  5674  
  5675  // TestAllocation_Canonicalize_New asserts that an alloc with latest
  5676  // schema isn't modified with Canonicalize
  5677  func TestAllocation_Canonicalize_New(t *testing.T) {
  5678  	ci.Parallel(t)
  5679  
  5680  	alloc := MockAlloc()
  5681  	copy := alloc.Copy()
  5682  
  5683  	alloc.Canonicalize()
  5684  	require.Equal(t, copy, alloc)
  5685  }
  5686  
  5687  func TestRescheduleTracker_Copy(t *testing.T) {
  5688  	ci.Parallel(t)
  5689  	type testCase struct {
  5690  		original *RescheduleTracker
  5691  		expected *RescheduleTracker
  5692  	}
  5693  	cases := []testCase{
  5694  		{nil, nil},
  5695  		{&RescheduleTracker{Events: []*RescheduleEvent{
  5696  			{RescheduleTime: 2,
  5697  				PrevAllocID: "12",
  5698  				PrevNodeID:  "12",
  5699  				Delay:       30 * time.Second},
  5700  		}}, &RescheduleTracker{Events: []*RescheduleEvent{
  5701  			{RescheduleTime: 2,
  5702  				PrevAllocID: "12",
  5703  				PrevNodeID:  "12",
  5704  				Delay:       30 * time.Second},
  5705  		}}},
  5706  	}
  5707  
  5708  	for _, tc := range cases {
  5709  		if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) {
  5710  			t.Fatalf("expected %v but got %v", *tc.expected, *got)
  5711  		}
  5712  	}
  5713  }
  5714  
  5715  func TestVault_Validate(t *testing.T) {
  5716  	ci.Parallel(t)
  5717  
  5718  	v := &Vault{
  5719  		Env:        true,
  5720  		ChangeMode: VaultChangeModeNoop,
  5721  	}
  5722  
  5723  	if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") {
  5724  		t.Fatalf("Expected policy list empty error")
  5725  	}
  5726  
  5727  	v.Policies = []string{"foo", "root"}
  5728  	v.ChangeMode = VaultChangeModeSignal
  5729  
  5730  	err := v.Validate()
  5731  	if err == nil {
  5732  		t.Fatalf("Expected validation errors")
  5733  	}
  5734  
  5735  	if !strings.Contains(err.Error(), "Signal must") {
  5736  		t.Fatalf("Expected signal empty error")
  5737  	}
  5738  	if !strings.Contains(err.Error(), "root") {
  5739  		t.Fatalf("Expected root error")
  5740  	}
  5741  }
  5742  
  5743  func TestVault_Copy(t *testing.T) {
  5744  	v := &Vault{
  5745  		Policies:     []string{"policy1", "policy2"},
  5746  		Namespace:    "ns1",
  5747  		Env:          false,
  5748  		ChangeMode:   "noop",
  5749  		ChangeSignal: "SIGKILL",
  5750  	}
  5751  
  5752  	// Copy and modify.
  5753  	vc := v.Copy()
  5754  	vc.Policies[0] = "policy0"
  5755  	vc.Namespace = "ns2"
  5756  	vc.Env = true
  5757  	vc.ChangeMode = "signal"
  5758  	vc.ChangeSignal = "SIGHUP"
  5759  
  5760  	require.NotEqual(t, v, vc)
  5761  }
  5762  
  5763  func TestVault_Canonicalize(t *testing.T) {
  5764  	v := &Vault{
  5765  		ChangeSignal: "sighup",
  5766  	}
  5767  	v.Canonicalize()
  5768  	require.Equal(t, "SIGHUP", v.ChangeSignal)
  5769  	require.Equal(t, VaultChangeModeRestart, v.ChangeMode)
  5770  }
  5771  
  5772  func TestParameterizedJobConfig_Validate(t *testing.T) {
  5773  	ci.Parallel(t)
  5774  
  5775  	d := &ParameterizedJobConfig{
  5776  		Payload: "foo",
  5777  	}
  5778  
  5779  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") {
  5780  		t.Fatalf("Expected unknown payload requirement: %v", err)
  5781  	}
  5782  
  5783  	d.Payload = DispatchPayloadOptional
  5784  	d.MetaOptional = []string{"foo", "bar"}
  5785  	d.MetaRequired = []string{"bar", "baz"}
  5786  
  5787  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") {
  5788  		t.Fatalf("Expected meta not being disjoint error: %v", err)
  5789  	}
  5790  }
  5791  
  5792  func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) {
  5793  	ci.Parallel(t)
  5794  
  5795  	job := testJob()
  5796  	job.ParameterizedJob = &ParameterizedJobConfig{
  5797  		Payload: DispatchPayloadOptional,
  5798  	}
  5799  	job.Type = JobTypeSystem
  5800  
  5801  	if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") {
  5802  		t.Fatalf("Expected bad scheduler tpye: %v", err)
  5803  	}
  5804  }
  5805  
  5806  func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) {
  5807  	ci.Parallel(t)
  5808  	// Setup a system Job with stop_after_client_disconnect set, which is invalid
  5809  	job := testJob()
  5810  	job.Type = JobTypeSystem
  5811  	stop := 1 * time.Minute
  5812  	job.TaskGroups[0].StopAfterClientDisconnect = &stop
  5813  
  5814  	err := job.Validate()
  5815  	require.Error(t, err)
  5816  	require.Contains(t, err.Error(), "stop_after_client_disconnect can only be set in batch and service jobs")
  5817  
  5818  	// Modify the job to a batch job with an invalid stop_after_client_disconnect value
  5819  	job.Type = JobTypeBatch
  5820  	invalid := -1 * time.Minute
  5821  	job.TaskGroups[0].StopAfterClientDisconnect = &invalid
  5822  
  5823  	err = job.Validate()
  5824  	require.Error(t, err)
  5825  	require.Contains(t, err.Error(), "stop_after_client_disconnect must be a positive value")
  5826  
  5827  	// Modify the job to a batch job with a valid stop_after_client_disconnect value
  5828  	job.Type = JobTypeBatch
  5829  	job.TaskGroups[0].StopAfterClientDisconnect = &stop
  5830  	err = job.Validate()
  5831  	require.NoError(t, err)
  5832  }
  5833  
  5834  func TestJobConfig_Validate_MaxClientDisconnect(t *testing.T) {
  5835  	// Set up a job with an invalid max_client_disconnect value
  5836  	job := testJob()
  5837  	timeout := -1 * time.Minute
  5838  	job.TaskGroups[0].MaxClientDisconnect = &timeout
  5839  	job.TaskGroups[0].StopAfterClientDisconnect = &timeout
  5840  
  5841  	err := job.Validate()
  5842  	require.Error(t, err)
  5843  	require.Contains(t, err.Error(), "max_client_disconnect cannot be negative")
  5844  	require.Contains(t, err.Error(), "Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect")
  5845  
  5846  	// Modify the job with a valid max_client_disconnect value
  5847  	timeout = 1 * time.Minute
  5848  	job.TaskGroups[0].MaxClientDisconnect = &timeout
  5849  	job.TaskGroups[0].StopAfterClientDisconnect = nil
  5850  	err = job.Validate()
  5851  	require.NoError(t, err)
  5852  }
  5853  
  5854  func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
  5855  	ci.Parallel(t)
  5856  
  5857  	d := &ParameterizedJobConfig{}
  5858  	d.Canonicalize()
  5859  	if d.Payload != DispatchPayloadOptional {
  5860  		t.Fatalf("Canonicalize failed")
  5861  	}
  5862  }
  5863  
  5864  func TestDispatchPayloadConfig_Validate(t *testing.T) {
  5865  	ci.Parallel(t)
  5866  
  5867  	d := &DispatchPayloadConfig{
  5868  		File: "foo",
  5869  	}
  5870  
  5871  	// task/local/haha
  5872  	if err := d.Validate(); err != nil {
  5873  		t.Fatalf("bad: %v", err)
  5874  	}
  5875  
  5876  	// task/haha
  5877  	d.File = "../haha"
  5878  	if err := d.Validate(); err != nil {
  5879  		t.Fatalf("bad: %v", err)
  5880  	}
  5881  
  5882  	// ../haha
  5883  	d.File = "../../../haha"
  5884  	if err := d.Validate(); err == nil {
  5885  		t.Fatalf("bad: %v", err)
  5886  	}
  5887  }
  5888  
  5889  func TestScalingPolicy_Canonicalize(t *testing.T) {
  5890  	ci.Parallel(t)
  5891  
  5892  	cases := []struct {
  5893  		name     string
  5894  		input    *ScalingPolicy
  5895  		expected *ScalingPolicy
  5896  	}{
  5897  		{
  5898  			name:     "empty policy",
  5899  			input:    &ScalingPolicy{},
  5900  			expected: &ScalingPolicy{Type: ScalingPolicyTypeHorizontal},
  5901  		},
  5902  		{
  5903  			name:     "policy with type",
  5904  			input:    &ScalingPolicy{Type: "other-type"},
  5905  			expected: &ScalingPolicy{Type: "other-type"},
  5906  		},
  5907  	}
  5908  
  5909  	for _, c := range cases {
  5910  		t.Run(c.name, func(t *testing.T) {
  5911  			require := require.New(t)
  5912  
  5913  			c.input.Canonicalize()
  5914  			require.Equal(c.expected, c.input)
  5915  		})
  5916  	}
  5917  }
  5918  
  5919  func TestScalingPolicy_Validate(t *testing.T) {
  5920  	ci.Parallel(t)
  5921  	type testCase struct {
  5922  		name        string
  5923  		input       *ScalingPolicy
  5924  		expectedErr string
  5925  	}
  5926  	cases := []testCase{
  5927  		{
  5928  			name: "full horizontal policy",
  5929  			input: &ScalingPolicy{
  5930  				Policy: map[string]interface{}{
  5931  					"key": "value",
  5932  				},
  5933  				Type:    ScalingPolicyTypeHorizontal,
  5934  				Min:     5,
  5935  				Max:     5,
  5936  				Enabled: true,
  5937  				Target: map[string]string{
  5938  					ScalingTargetNamespace: "my-namespace",
  5939  					ScalingTargetJob:       "my-job",
  5940  					ScalingTargetGroup:     "my-task-group",
  5941  				},
  5942  			},
  5943  		},
  5944  		{
  5945  			name:        "missing type",
  5946  			input:       &ScalingPolicy{},
  5947  			expectedErr: "missing scaling policy type",
  5948  		},
  5949  		{
  5950  			name: "invalid type",
  5951  			input: &ScalingPolicy{
  5952  				Type: "not valid",
  5953  			},
  5954  			expectedErr: `scaling policy type "not valid" is not valid`,
  5955  		},
  5956  		{
  5957  			name: "min < 0",
  5958  			input: &ScalingPolicy{
  5959  				Type: ScalingPolicyTypeHorizontal,
  5960  				Min:  -1,
  5961  				Max:  5,
  5962  			},
  5963  			expectedErr: "minimum count must be specified and non-negative",
  5964  		},
  5965  		{
  5966  			name: "max < 0",
  5967  			input: &ScalingPolicy{
  5968  				Type: ScalingPolicyTypeHorizontal,
  5969  				Min:  5,
  5970  				Max:  -1,
  5971  			},
  5972  			expectedErr: "maximum count must be specified and non-negative",
  5973  		},
  5974  		{
  5975  			name: "min > max",
  5976  			input: &ScalingPolicy{
  5977  				Type: ScalingPolicyTypeHorizontal,
  5978  				Min:  10,
  5979  				Max:  0,
  5980  			},
  5981  			expectedErr: "maximum count must not be less than minimum count",
  5982  		},
  5983  		{
  5984  			name: "min == max",
  5985  			input: &ScalingPolicy{
  5986  				Type: ScalingPolicyTypeHorizontal,
  5987  				Min:  10,
  5988  				Max:  10,
  5989  			},
  5990  		},
  5991  		{
  5992  			name: "min == 0",
  5993  			input: &ScalingPolicy{
  5994  				Type: ScalingPolicyTypeHorizontal,
  5995  				Min:  0,
  5996  				Max:  10,
  5997  			},
  5998  		},
  5999  		{
  6000  			name: "max == 0",
  6001  			input: &ScalingPolicy{
  6002  				Type: ScalingPolicyTypeHorizontal,
  6003  				Min:  0,
  6004  				Max:  0,
  6005  			},
  6006  		},
  6007  		{
  6008  			name: "horizontal missing namespace",
  6009  			input: &ScalingPolicy{
  6010  				Type: ScalingPolicyTypeHorizontal,
  6011  				Target: map[string]string{
  6012  					ScalingTargetJob:   "my-job",
  6013  					ScalingTargetGroup: "my-group",
  6014  				},
  6015  			},
  6016  			expectedErr: "missing target namespace",
  6017  		},
  6018  		{
  6019  			name: "horizontal missing job",
  6020  			input: &ScalingPolicy{
  6021  				Type: ScalingPolicyTypeHorizontal,
  6022  				Target: map[string]string{
  6023  					ScalingTargetNamespace: "my-namespace",
  6024  					ScalingTargetGroup:     "my-group",
  6025  				},
  6026  			},
  6027  			expectedErr: "missing target job",
  6028  		},
  6029  		{
  6030  			name: "horizontal missing group",
  6031  			input: &ScalingPolicy{
  6032  				Type: ScalingPolicyTypeHorizontal,
  6033  				Target: map[string]string{
  6034  					ScalingTargetNamespace: "my-namespace",
  6035  					ScalingTargetJob:       "my-job",
  6036  				},
  6037  			},
  6038  			expectedErr: "missing target group",
  6039  		},
  6040  	}
  6041  
  6042  	for _, c := range cases {
  6043  		t.Run(c.name, func(t *testing.T) {
  6044  			require := require.New(t)
  6045  
  6046  			err := c.input.Validate()
  6047  
  6048  			if len(c.expectedErr) > 0 {
  6049  				require.Error(err, c.expectedErr)
  6050  			} else {
  6051  				require.NoError(err)
  6052  			}
  6053  		})
  6054  	}
  6055  }
  6056  
  6057  func TestIsRecoverable(t *testing.T) {
  6058  	ci.Parallel(t)
  6059  
  6060  	if IsRecoverable(nil) {
  6061  		t.Errorf("nil should not be recoverable")
  6062  	}
  6063  	if IsRecoverable(NewRecoverableError(nil, true)) {
  6064  		t.Errorf("NewRecoverableError(nil, true) should not be recoverable")
  6065  	}
  6066  	if IsRecoverable(fmt.Errorf("i promise im recoverable")) {
  6067  		t.Errorf("Custom errors should not be recoverable")
  6068  	}
  6069  	if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) {
  6070  		t.Errorf("Explicitly unrecoverable errors should not be recoverable")
  6071  	}
  6072  	if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) {
  6073  		t.Errorf("Explicitly recoverable errors *should* be recoverable")
  6074  	}
  6075  }
  6076  
  6077  func TestACLTokenSetHash(t *testing.T) {
  6078  	ci.Parallel(t)
  6079  
  6080  	tk := &ACLToken{
  6081  		Name:     "foo",
  6082  		Type:     ACLClientToken,
  6083  		Policies: []string{"foo", "bar"},
  6084  		Global:   false,
  6085  	}
  6086  	out1 := tk.SetHash()
  6087  	assert.NotNil(t, out1)
  6088  	assert.NotNil(t, tk.Hash)
  6089  	assert.Equal(t, out1, tk.Hash)
  6090  
  6091  	tk.Policies = []string{"foo"}
  6092  	out2 := tk.SetHash()
  6093  	assert.NotNil(t, out2)
  6094  	assert.NotNil(t, tk.Hash)
  6095  	assert.Equal(t, out2, tk.Hash)
  6096  	assert.NotEqual(t, out1, out2)
  6097  }
  6098  
  6099  func TestACLPolicySetHash(t *testing.T) {
  6100  	ci.Parallel(t)
  6101  
  6102  	ap := &ACLPolicy{
  6103  		Name:        "foo",
  6104  		Description: "great policy",
  6105  		Rules:       "node { policy = \"read\" }",
  6106  	}
  6107  	out1 := ap.SetHash()
  6108  	assert.NotNil(t, out1)
  6109  	assert.NotNil(t, ap.Hash)
  6110  	assert.Equal(t, out1, ap.Hash)
  6111  
  6112  	ap.Rules = "node { policy = \"write\" }"
  6113  	out2 := ap.SetHash()
  6114  	assert.NotNil(t, out2)
  6115  	assert.NotNil(t, ap.Hash)
  6116  	assert.Equal(t, out2, ap.Hash)
  6117  	assert.NotEqual(t, out1, out2)
  6118  }
  6119  
  6120  func TestTaskEventPopulate(t *testing.T) {
  6121  	ci.Parallel(t)
  6122  
  6123  	prepopulatedEvent := NewTaskEvent(TaskSetup)
  6124  	prepopulatedEvent.DisplayMessage = "Hola"
  6125  	testcases := []struct {
  6126  		event       *TaskEvent
  6127  		expectedMsg string
  6128  	}{
  6129  		{nil, ""},
  6130  		{prepopulatedEvent, "Hola"},
  6131  		{NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"},
  6132  		{NewTaskEvent(TaskStarted), "Task started by client"},
  6133  		{NewTaskEvent(TaskReceived), "Task received by client"},
  6134  		{NewTaskEvent(TaskFailedValidation), "Validation of task failed"},
  6135  		{NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"},
  6136  		{NewTaskEvent(TaskSetupFailure), "Task setup failed"},
  6137  		{NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"},
  6138  		{NewTaskEvent(TaskDriverFailure), "Failed to start task"},
  6139  		{NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"},
  6140  		{NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"},
  6141  		{NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"},
  6142  		{NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"},
  6143  		{NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"},
  6144  		{NewTaskEvent(TaskKilling), "Sent interrupt"},
  6145  		{NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"},
  6146  		{NewTaskEvent(TaskKilling).SetKillTimeout(1*time.Second, 5*time.Second), "Sent interrupt. Waiting 1s before force killing"},
  6147  		{NewTaskEvent(TaskKilling).SetKillTimeout(10*time.Second, 5*time.Second), "Sent interrupt. Waiting 5s before force killing"},
  6148  		{NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"},
  6149  		{NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""},
  6150  		{NewTaskEvent(TaskKilled), "Task successfully killed"},
  6151  		{NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"},
  6152  		{NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"},
  6153  		{NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"},
  6154  		{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
  6155  		{NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"},
  6156  		{NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"},
  6157  		{NewTaskEvent(TaskSignaling), "Task being sent a signal"},
  6158  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"},
  6159  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"},
  6160  		{NewTaskEvent(TaskRestartSignal), "Task signaled to restart"},
  6161  		{NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"},
  6162  		{NewTaskEvent(TaskClientReconnected), "Client reconnected"},
  6163  		{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
  6164  		{NewTaskEvent("Unknown Type, No message"), ""},
  6165  		{NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"},
  6166  	}
  6167  
  6168  	for _, tc := range testcases {
  6169  		tc.event.PopulateEventDisplayMessage()
  6170  		if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg {
  6171  			t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage)
  6172  		}
  6173  	}
  6174  }
  6175  
  6176  func TestNetworkResourcesEquals(t *testing.T) {
  6177  	ci.Parallel(t)
  6178  
  6179  	require := require.New(t)
  6180  	var networkResourcesTest = []struct {
  6181  		input    []*NetworkResource
  6182  		expected bool
  6183  		errorMsg string
  6184  	}{
  6185  		{
  6186  			[]*NetworkResource{
  6187  				{
  6188  					IP:            "10.0.0.1",
  6189  					MBits:         50,
  6190  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6191  				},
  6192  				{
  6193  					IP:            "10.0.0.1",
  6194  					MBits:         50,
  6195  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6196  				},
  6197  			},
  6198  			true,
  6199  			"Equal network resources should return true",
  6200  		},
  6201  		{
  6202  			[]*NetworkResource{
  6203  				{
  6204  					IP:            "10.0.0.0",
  6205  					MBits:         50,
  6206  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6207  				},
  6208  				{
  6209  					IP:            "10.0.0.1",
  6210  					MBits:         50,
  6211  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6212  				},
  6213  			},
  6214  			false,
  6215  			"Different IP addresses should return false",
  6216  		},
  6217  		{
  6218  			[]*NetworkResource{
  6219  				{
  6220  					IP:            "10.0.0.1",
  6221  					MBits:         40,
  6222  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6223  				},
  6224  				{
  6225  					IP:            "10.0.0.1",
  6226  					MBits:         50,
  6227  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6228  				},
  6229  			},
  6230  			false,
  6231  			"Different MBits values should return false",
  6232  		},
  6233  		{
  6234  			[]*NetworkResource{
  6235  				{
  6236  					IP:            "10.0.0.1",
  6237  					MBits:         50,
  6238  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6239  				},
  6240  				{
  6241  					IP:            "10.0.0.1",
  6242  					MBits:         50,
  6243  					ReservedPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}},
  6244  				},
  6245  			},
  6246  			false,
  6247  			"Different ReservedPorts lengths should return false",
  6248  		},
  6249  		{
  6250  			[]*NetworkResource{
  6251  				{
  6252  					IP:            "10.0.0.1",
  6253  					MBits:         50,
  6254  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6255  				},
  6256  				{
  6257  					IP:            "10.0.0.1",
  6258  					MBits:         50,
  6259  					ReservedPorts: []Port{},
  6260  				},
  6261  			},
  6262  			false,
  6263  			"Empty and non empty ReservedPorts values should return false",
  6264  		},
  6265  		{
  6266  			[]*NetworkResource{
  6267  				{
  6268  					IP:            "10.0.0.1",
  6269  					MBits:         50,
  6270  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6271  				},
  6272  				{
  6273  					IP:            "10.0.0.1",
  6274  					MBits:         50,
  6275  					ReservedPorts: []Port{{"notweb", 80, 0, ""}},
  6276  				},
  6277  			},
  6278  			false,
  6279  			"Different valued ReservedPorts values should return false",
  6280  		},
  6281  		{
  6282  			[]*NetworkResource{
  6283  				{
  6284  					IP:           "10.0.0.1",
  6285  					MBits:        50,
  6286  					DynamicPorts: []Port{{"web", 80, 0, ""}},
  6287  				},
  6288  				{
  6289  					IP:           "10.0.0.1",
  6290  					MBits:        50,
  6291  					DynamicPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}},
  6292  				},
  6293  			},
  6294  			false,
  6295  			"Different DynamicPorts lengths should return false",
  6296  		},
  6297  		{
  6298  			[]*NetworkResource{
  6299  				{
  6300  					IP:           "10.0.0.1",
  6301  					MBits:        50,
  6302  					DynamicPorts: []Port{{"web", 80, 0, ""}},
  6303  				},
  6304  				{
  6305  					IP:           "10.0.0.1",
  6306  					MBits:        50,
  6307  					DynamicPorts: []Port{},
  6308  				},
  6309  			},
  6310  			false,
  6311  			"Empty and non empty DynamicPorts values should return false",
  6312  		},
  6313  		{
  6314  			[]*NetworkResource{
  6315  				{
  6316  					IP:           "10.0.0.1",
  6317  					MBits:        50,
  6318  					DynamicPorts: []Port{{"web", 80, 0, ""}},
  6319  				},
  6320  				{
  6321  					IP:           "10.0.0.1",
  6322  					MBits:        50,
  6323  					DynamicPorts: []Port{{"notweb", 80, 0, ""}},
  6324  				},
  6325  			},
  6326  			false,
  6327  			"Different valued DynamicPorts values should return false",
  6328  		},
  6329  	}
  6330  	for _, testCase := range networkResourcesTest {
  6331  		first := testCase.input[0]
  6332  		second := testCase.input[1]
  6333  		require.Equal(testCase.expected, first.Equal(second), testCase.errorMsg)
  6334  	}
  6335  }
  6336  
  6337  func TestNode_Canonicalize(t *testing.T) {
  6338  	ci.Parallel(t)
  6339  	require := require.New(t)
  6340  
  6341  	// Make sure the eligiblity is set properly
  6342  	node := &Node{}
  6343  	node.Canonicalize()
  6344  	require.Equal(NodeSchedulingEligible, node.SchedulingEligibility)
  6345  
  6346  	node = &Node{
  6347  		DrainStrategy: &DrainStrategy{
  6348  			DrainSpec: DrainSpec{
  6349  				Deadline: 30000,
  6350  			},
  6351  		},
  6352  	}
  6353  	node.Canonicalize()
  6354  	require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility)
  6355  }
  6356  
  6357  func TestNode_Copy(t *testing.T) {
  6358  	ci.Parallel(t)
  6359  	require := require.New(t)
  6360  
  6361  	node := &Node{
  6362  		ID:         uuid.Generate(),
  6363  		SecretID:   uuid.Generate(),
  6364  		Datacenter: "dc1",
  6365  		Name:       "foobar",
  6366  		Attributes: map[string]string{
  6367  			"kernel.name":        "linux",
  6368  			"arch":               "x86",
  6369  			"nomad.version":      "0.5.0",
  6370  			"driver.exec":        "1",
  6371  			"driver.mock_driver": "1",
  6372  		},
  6373  		Resources: &Resources{
  6374  			CPU:      4000,
  6375  			MemoryMB: 8192,
  6376  			DiskMB:   100 * 1024,
  6377  			Networks: []*NetworkResource{
  6378  				{
  6379  					Device: "eth0",
  6380  					CIDR:   "192.168.0.100/32",
  6381  					MBits:  1000,
  6382  				},
  6383  			},
  6384  		},
  6385  		Reserved: &Resources{
  6386  			CPU:      100,
  6387  			MemoryMB: 256,
  6388  			DiskMB:   4 * 1024,
  6389  			Networks: []*NetworkResource{
  6390  				{
  6391  					Device:        "eth0",
  6392  					IP:            "192.168.0.100",
  6393  					ReservedPorts: []Port{{Label: "ssh", Value: 22}},
  6394  					MBits:         1,
  6395  				},
  6396  			},
  6397  		},
  6398  		NodeResources: &NodeResources{
  6399  			Cpu: NodeCpuResources{
  6400  				CpuShares:          4000,
  6401  				TotalCpuCores:      4,
  6402  				ReservableCpuCores: []uint16{0, 1, 2, 3},
  6403  			},
  6404  			Memory: NodeMemoryResources{
  6405  				MemoryMB: 8192,
  6406  			},
  6407  			Disk: NodeDiskResources{
  6408  				DiskMB: 100 * 1024,
  6409  			},
  6410  			Networks: []*NetworkResource{
  6411  				{
  6412  					Device: "eth0",
  6413  					CIDR:   "192.168.0.100/32",
  6414  					MBits:  1000,
  6415  				},
  6416  			},
  6417  		},
  6418  		ReservedResources: &NodeReservedResources{
  6419  			Cpu: NodeReservedCpuResources{
  6420  				CpuShares:        100,
  6421  				ReservedCpuCores: []uint16{0},
  6422  			},
  6423  			Memory: NodeReservedMemoryResources{
  6424  				MemoryMB: 256,
  6425  			},
  6426  			Disk: NodeReservedDiskResources{
  6427  				DiskMB: 4 * 1024,
  6428  			},
  6429  			Networks: NodeReservedNetworkResources{
  6430  				ReservedHostPorts: "22",
  6431  			},
  6432  		},
  6433  		Links: map[string]string{
  6434  			"consul": "foobar.dc1",
  6435  		},
  6436  		Meta: map[string]string{
  6437  			"pci-dss":  "true",
  6438  			"database": "mysql",
  6439  			"version":  "5.6",
  6440  		},
  6441  		NodeClass:             "linux-medium-pci",
  6442  		Status:                NodeStatusReady,
  6443  		SchedulingEligibility: NodeSchedulingEligible,
  6444  		Drivers: map[string]*DriverInfo{
  6445  			"mock_driver": {
  6446  				Attributes:        map[string]string{"running": "1"},
  6447  				Detected:          true,
  6448  				Healthy:           true,
  6449  				HealthDescription: "Currently active",
  6450  				UpdateTime:        time.Now(),
  6451  			},
  6452  		},
  6453  	}
  6454  	node.ComputeClass()
  6455  
  6456  	node2 := node.Copy()
  6457  
  6458  	require.Equal(node.Attributes, node2.Attributes)
  6459  	require.Equal(node.Resources, node2.Resources)
  6460  	require.Equal(node.Reserved, node2.Reserved)
  6461  	require.Equal(node.Links, node2.Links)
  6462  	require.Equal(node.Meta, node2.Meta)
  6463  	require.Equal(node.Events, node2.Events)
  6464  	require.Equal(node.DrainStrategy, node2.DrainStrategy)
  6465  	require.Equal(node.Drivers, node2.Drivers)
  6466  }
  6467  
  6468  func TestNode_GetID(t *testing.T) {
  6469  	ci.Parallel(t)
  6470  
  6471  	testCases := []struct {
  6472  		inputNode      *Node
  6473  		expectedOutput string
  6474  		name           string
  6475  	}{
  6476  		{
  6477  			inputNode:      nil,
  6478  			expectedOutput: "",
  6479  			name:           "nil input node",
  6480  		},
  6481  		{
  6482  			inputNode:      &Node{ID: "someid"},
  6483  			expectedOutput: "someid",
  6484  			name:           "nil input node",
  6485  		},
  6486  	}
  6487  
  6488  	for _, tc := range testCases {
  6489  		actualOutput := tc.inputNode.GetID()
  6490  		require.Equal(t, tc.expectedOutput, actualOutput)
  6491  	}
  6492  }
  6493  
  6494  func TestNode_Sanitize(t *testing.T) {
  6495  	ci.Parallel(t)
  6496  
  6497  	require := require.New(t)
  6498  
  6499  	testCases := []*Node{
  6500  		nil,
  6501  		{
  6502  			ID:       uuid.Generate(),
  6503  			SecretID: "",
  6504  		},
  6505  		{
  6506  			ID:       uuid.Generate(),
  6507  			SecretID: uuid.Generate(),
  6508  		},
  6509  	}
  6510  	for _, tc := range testCases {
  6511  		sanitized := tc.Sanitize()
  6512  		if tc == nil {
  6513  			require.Nil(sanitized)
  6514  		} else {
  6515  			require.NotNil(sanitized)
  6516  			require.Empty(sanitized.SecretID)
  6517  		}
  6518  	}
  6519  }
  6520  
  6521  func TestSpread_Validate(t *testing.T) {
  6522  	ci.Parallel(t)
  6523  	type tc struct {
  6524  		spread *Spread
  6525  		err    error
  6526  		name   string
  6527  	}
  6528  	testCases := []tc{
  6529  		{
  6530  			spread: &Spread{},
  6531  			err:    fmt.Errorf("Missing spread attribute"),
  6532  			name:   "empty spread",
  6533  		},
  6534  		{
  6535  			spread: &Spread{
  6536  				Attribute: "${node.datacenter}",
  6537  				Weight:    -1,
  6538  			},
  6539  			err:  fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
  6540  			name: "Invalid weight",
  6541  		},
  6542  		{
  6543  			spread: &Spread{
  6544  				Attribute: "${node.datacenter}",
  6545  				Weight:    110,
  6546  			},
  6547  			err:  fmt.Errorf("Spread stanza must have a positive weight from 0 to 100"),
  6548  			name: "Invalid weight",
  6549  		},
  6550  		{
  6551  			spread: &Spread{
  6552  				Attribute: "${node.datacenter}",
  6553  				Weight:    50,
  6554  				SpreadTarget: []*SpreadTarget{
  6555  					{
  6556  						Value:   "dc1",
  6557  						Percent: 25,
  6558  					},
  6559  					{
  6560  						Value:   "dc2",
  6561  						Percent: 150,
  6562  					},
  6563  				},
  6564  			},
  6565  			err:  fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"),
  6566  			name: "Invalid percentages",
  6567  		},
  6568  		{
  6569  			spread: &Spread{
  6570  				Attribute: "${node.datacenter}",
  6571  				Weight:    50,
  6572  				SpreadTarget: []*SpreadTarget{
  6573  					{
  6574  						Value:   "dc1",
  6575  						Percent: 75,
  6576  					},
  6577  					{
  6578  						Value:   "dc2",
  6579  						Percent: 75,
  6580  					},
  6581  				},
  6582  			},
  6583  			err:  fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150),
  6584  			name: "Invalid percentages",
  6585  		},
  6586  		{
  6587  			spread: &Spread{
  6588  				Attribute: "${node.datacenter}",
  6589  				Weight:    50,
  6590  				SpreadTarget: []*SpreadTarget{
  6591  					{
  6592  						Value:   "dc1",
  6593  						Percent: 25,
  6594  					},
  6595  					{
  6596  						Value:   "dc1",
  6597  						Percent: 50,
  6598  					},
  6599  				},
  6600  			},
  6601  			err:  fmt.Errorf("Spread target value \"dc1\" already defined"),
  6602  			name: "No spread targets",
  6603  		},
  6604  		{
  6605  			spread: &Spread{
  6606  				Attribute: "${node.datacenter}",
  6607  				Weight:    50,
  6608  				SpreadTarget: []*SpreadTarget{
  6609  					{
  6610  						Value:   "dc1",
  6611  						Percent: 25,
  6612  					},
  6613  					{
  6614  						Value:   "dc2",
  6615  						Percent: 50,
  6616  					},
  6617  				},
  6618  			},
  6619  			err:  nil,
  6620  			name: "Valid spread",
  6621  		},
  6622  	}
  6623  
  6624  	for _, tc := range testCases {
  6625  		t.Run(tc.name, func(t *testing.T) {
  6626  			err := tc.spread.Validate()
  6627  			if tc.err != nil {
  6628  				require.NotNil(t, err)
  6629  				require.Contains(t, err.Error(), tc.err.Error())
  6630  			} else {
  6631  				require.Nil(t, err)
  6632  			}
  6633  		})
  6634  	}
  6635  }
  6636  
  6637  func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) {
  6638  	ci.Parallel(t)
  6639  
  6640  	require := require.New(t)
  6641  	cases := []struct {
  6642  		Input  string
  6643  		Parsed []uint64
  6644  		Err    bool
  6645  	}{
  6646  		{
  6647  			"1,2,3",
  6648  			[]uint64{1, 2, 3},
  6649  			false,
  6650  		},
  6651  		{
  6652  			"3,1,2,1,2,3,1-3",
  6653  			[]uint64{1, 2, 3},
  6654  			false,
  6655  		},
  6656  		{
  6657  			"3-1",
  6658  			nil,
  6659  			true,
  6660  		},
  6661  		{
  6662  			"1-3,2-4",
  6663  			[]uint64{1, 2, 3, 4},
  6664  			false,
  6665  		},
  6666  		{
  6667  			"1-3,4,5-5,6,7,8-10",
  6668  			[]uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
  6669  			false,
  6670  		},
  6671  	}
  6672  
  6673  	for i, tc := range cases {
  6674  		r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input}
  6675  		out, err := r.ParseReservedHostPorts()
  6676  		if (err != nil) != tc.Err {
  6677  			t.Fatalf("test case %d: %v", i, err)
  6678  		}
  6679  
  6680  		require.Equal(out, tc.Parsed)
  6681  	}
  6682  }
  6683  
  6684  func TestMultiregion_CopyCanonicalize(t *testing.T) {
  6685  	ci.Parallel(t)
  6686  
  6687  	require := require.New(t)
  6688  
  6689  	emptyOld := &Multiregion{}
  6690  	expected := &Multiregion{
  6691  		Strategy: &MultiregionStrategy{},
  6692  		Regions:  []*MultiregionRegion{},
  6693  	}
  6694  
  6695  	old := emptyOld.Copy()
  6696  	old.Canonicalize()
  6697  	require.Equal(old, expected)
  6698  	require.False(old.Diff(expected))
  6699  
  6700  	nonEmptyOld := &Multiregion{
  6701  		Strategy: &MultiregionStrategy{
  6702  			MaxParallel: 2,
  6703  			OnFailure:   "fail_all",
  6704  		},
  6705  		Regions: []*MultiregionRegion{
  6706  			{
  6707  				Name:        "west",
  6708  				Count:       2,
  6709  				Datacenters: []string{"west-1", "west-2"},
  6710  				Meta:        map[string]string{},
  6711  			},
  6712  			{
  6713  				Name:        "east",
  6714  				Count:       1,
  6715  				Datacenters: []string{"east-1"},
  6716  				Meta:        map[string]string{},
  6717  			},
  6718  		},
  6719  	}
  6720  
  6721  	old = nonEmptyOld.Copy()
  6722  	old.Canonicalize()
  6723  	require.Equal(old, nonEmptyOld)
  6724  	require.False(old.Diff(nonEmptyOld))
  6725  }
  6726  
  6727  func TestNodeResources_Copy(t *testing.T) {
  6728  	ci.Parallel(t)
  6729  
  6730  	orig := &NodeResources{
  6731  		Cpu: NodeCpuResources{
  6732  			CpuShares:          int64(32000),
  6733  			TotalCpuCores:      32,
  6734  			ReservableCpuCores: []uint16{1, 2, 3, 9},
  6735  		},
  6736  		Memory: NodeMemoryResources{
  6737  			MemoryMB: int64(64000),
  6738  		},
  6739  		Networks: Networks{
  6740  			{
  6741  				Device: "foo",
  6742  			},
  6743  		},
  6744  		NodeNetworks: []*NodeNetworkResource{
  6745  			{
  6746  				Mode:       "host",
  6747  				Device:     "eth0",
  6748  				MacAddress: "00:00:00:00:00:00",
  6749  				Speed:      1000,
  6750  				Addresses: []NodeNetworkAddress{
  6751  					{
  6752  						Family:        NodeNetworkAF_IPv4,
  6753  						Alias:         "private",
  6754  						Address:       "192.168.0.100",
  6755  						ReservedPorts: "22,80",
  6756  						Gateway:       "192.168.0.1",
  6757  					},
  6758  				},
  6759  			},
  6760  		},
  6761  	}
  6762  
  6763  	kopy := orig.Copy()
  6764  	assert.Equal(t, orig, kopy)
  6765  
  6766  	// Make sure slices aren't shared
  6767  	kopy.Cpu.ReservableCpuCores[1] = 9000
  6768  	assert.NotEqual(t, orig.Cpu.ReservableCpuCores, kopy.Cpu.ReservableCpuCores)
  6769  
  6770  	kopy.NodeNetworks[0].MacAddress = "11:11:11:11:11:11"
  6771  	kopy.NodeNetworks[0].Addresses[0].Alias = "public"
  6772  	assert.NotEqual(t, orig.NodeNetworks[0], kopy.NodeNetworks[0])
  6773  }
  6774  
  6775  func TestNodeResources_Merge(t *testing.T) {
  6776  	ci.Parallel(t)
  6777  
  6778  	res := &NodeResources{
  6779  		Cpu: NodeCpuResources{
  6780  			CpuShares:     int64(32000),
  6781  			TotalCpuCores: 32,
  6782  		},
  6783  		Memory: NodeMemoryResources{
  6784  			MemoryMB: int64(64000),
  6785  		},
  6786  		Networks: Networks{
  6787  			{
  6788  				Device: "foo",
  6789  			},
  6790  		},
  6791  	}
  6792  
  6793  	res.Merge(&NodeResources{
  6794  		Cpu: NodeCpuResources{ReservableCpuCores: []uint16{0, 1, 2, 3}},
  6795  		Memory: NodeMemoryResources{
  6796  			MemoryMB: int64(100000),
  6797  		},
  6798  		Networks: Networks{
  6799  			{
  6800  				Mode: "foo/bar",
  6801  			},
  6802  		},
  6803  	})
  6804  
  6805  	require.Exactly(t, &NodeResources{
  6806  		Cpu: NodeCpuResources{
  6807  			CpuShares:          int64(32000),
  6808  			TotalCpuCores:      32,
  6809  			ReservableCpuCores: []uint16{0, 1, 2, 3},
  6810  		},
  6811  		Memory: NodeMemoryResources{
  6812  			MemoryMB: int64(100000),
  6813  		},
  6814  		Networks: Networks{
  6815  			{
  6816  				Device: "foo",
  6817  			},
  6818  			{
  6819  				Mode: "foo/bar",
  6820  			},
  6821  		},
  6822  	}, res)
  6823  }
  6824  
  6825  func TestAllocatedResources_Canonicalize(t *testing.T) {
  6826  	ci.Parallel(t)
  6827  
  6828  	cases := map[string]struct {
  6829  		input    *AllocatedResources
  6830  		expected *AllocatedResources
  6831  	}{
  6832  		"base": {
  6833  			input: &AllocatedResources{
  6834  				Tasks: map[string]*AllocatedTaskResources{
  6835  					"task": {
  6836  						Networks: Networks{
  6837  							{
  6838  								IP:           "127.0.0.1",
  6839  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  6840  							},
  6841  						},
  6842  					},
  6843  				},
  6844  			},
  6845  			expected: &AllocatedResources{
  6846  				Tasks: map[string]*AllocatedTaskResources{
  6847  					"task": {
  6848  						Networks: Networks{
  6849  							{
  6850  								IP:           "127.0.0.1",
  6851  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  6852  							},
  6853  						},
  6854  					},
  6855  				},
  6856  				Shared: AllocatedSharedResources{
  6857  					Ports: AllocatedPorts{
  6858  						{
  6859  							Label:  "admin",
  6860  							Value:  8080,
  6861  							To:     0,
  6862  							HostIP: "127.0.0.1",
  6863  						},
  6864  					},
  6865  				},
  6866  			},
  6867  		},
  6868  		"base with existing": {
  6869  			input: &AllocatedResources{
  6870  				Tasks: map[string]*AllocatedTaskResources{
  6871  					"task": {
  6872  						Networks: Networks{
  6873  							{
  6874  								IP:           "127.0.0.1",
  6875  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  6876  							},
  6877  						},
  6878  					},
  6879  				},
  6880  				Shared: AllocatedSharedResources{
  6881  					Ports: AllocatedPorts{
  6882  						{
  6883  							Label:  "http",
  6884  							Value:  80,
  6885  							To:     8080,
  6886  							HostIP: "127.0.0.1",
  6887  						},
  6888  					},
  6889  				},
  6890  			},
  6891  			expected: &AllocatedResources{
  6892  				Tasks: map[string]*AllocatedTaskResources{
  6893  					"task": {
  6894  						Networks: Networks{
  6895  							{
  6896  								IP:           "127.0.0.1",
  6897  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  6898  							},
  6899  						},
  6900  					},
  6901  				},
  6902  				Shared: AllocatedSharedResources{
  6903  					Ports: AllocatedPorts{
  6904  						{
  6905  							Label:  "http",
  6906  							Value:  80,
  6907  							To:     8080,
  6908  							HostIP: "127.0.0.1",
  6909  						},
  6910  						{
  6911  							Label:  "admin",
  6912  							Value:  8080,
  6913  							To:     0,
  6914  							HostIP: "127.0.0.1",
  6915  						},
  6916  					},
  6917  				},
  6918  			},
  6919  		},
  6920  	}
  6921  	for name, tc := range cases {
  6922  		tc.input.Canonicalize()
  6923  		require.Exactly(t, tc.expected, tc.input, "case %s did not match", name)
  6924  	}
  6925  }
  6926  
  6927  func TestAllocatedSharedResources_Canonicalize(t *testing.T) {
  6928  	ci.Parallel(t)
  6929  
  6930  	a := &AllocatedSharedResources{
  6931  		Networks: []*NetworkResource{
  6932  			{
  6933  				IP: "127.0.0.1",
  6934  				DynamicPorts: []Port{
  6935  					{
  6936  						Label: "http",
  6937  						Value: 22222,
  6938  						To:    8080,
  6939  					},
  6940  				},
  6941  				ReservedPorts: []Port{
  6942  					{
  6943  						Label: "redis",
  6944  						Value: 6783,
  6945  						To:    6783,
  6946  					},
  6947  				},
  6948  			},
  6949  		},
  6950  	}
  6951  
  6952  	a.Canonicalize()
  6953  	require.Exactly(t, AllocatedPorts{
  6954  		{
  6955  			Label:  "http",
  6956  			Value:  22222,
  6957  			To:     8080,
  6958  			HostIP: "127.0.0.1",
  6959  		},
  6960  		{
  6961  			Label:  "redis",
  6962  			Value:  6783,
  6963  			To:     6783,
  6964  			HostIP: "127.0.0.1",
  6965  		},
  6966  	}, a.Ports)
  6967  }
  6968  
  6969  func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) {
  6970  	ci.Parallel(t)
  6971  
  6972  	t.Run("service task not set", func(t *testing.T) {
  6973  		tg := &TaskGroup{
  6974  			Name: "group1",
  6975  			Services: []*Service{{
  6976  				Name:     "service1",
  6977  				TaskName: "", // unset
  6978  				Checks: []*ServiceCheck{{
  6979  					Name:     "check1",
  6980  					Type:     "script",
  6981  					TaskName: "", // unset
  6982  				}, {
  6983  					Name: "check2",
  6984  					Type: "ttl", // not script
  6985  				}, {
  6986  					Name:     "check3",
  6987  					Type:     "script",
  6988  					TaskName: "", // unset
  6989  				}},
  6990  			}, {
  6991  				Name: "service2",
  6992  				Checks: []*ServiceCheck{{
  6993  					Type:     "script",
  6994  					TaskName: "task1", // set
  6995  				}},
  6996  			}, {
  6997  				Name:     "service3",
  6998  				TaskName: "", // unset
  6999  				Checks: []*ServiceCheck{{
  7000  					Name:     "check1",
  7001  					Type:     "script",
  7002  					TaskName: "", // unset
  7003  				}},
  7004  			}},
  7005  		}
  7006  
  7007  		errStr := tg.validateScriptChecksInGroupServices().Error()
  7008  		require.Contains(t, errStr, "Service [group1]->service1 or Check check1 must specify task parameter")
  7009  		require.Contains(t, errStr, "Service [group1]->service1 or Check check3 must specify task parameter")
  7010  		require.Contains(t, errStr, "Service [group1]->service3 or Check check1 must specify task parameter")
  7011  	})
  7012  
  7013  	t.Run("service task set", func(t *testing.T) {
  7014  		tgOK := &TaskGroup{
  7015  			Name: "group1",
  7016  			Services: []*Service{{
  7017  				Name:     "service1",
  7018  				TaskName: "task1",
  7019  				Checks: []*ServiceCheck{{
  7020  					Name: "check1",
  7021  					Type: "script",
  7022  				}, {
  7023  					Name: "check2",
  7024  					Type: "ttl",
  7025  				}, {
  7026  					Name: "check3",
  7027  					Type: "script",
  7028  				}},
  7029  			}},
  7030  		}
  7031  
  7032  		mErrOK := tgOK.validateScriptChecksInGroupServices()
  7033  		require.Nil(t, mErrOK)
  7034  	})
  7035  }
  7036  
  7037  func TestComparableResources_Superset(t *testing.T) {
  7038  	ci.Parallel(t)
  7039  
  7040  	base := &ComparableResources{
  7041  		Flattened: AllocatedTaskResources{
  7042  			Cpu: AllocatedCpuResources{
  7043  				CpuShares:     4000,
  7044  				ReservedCores: []uint16{0, 1, 2, 3},
  7045  			},
  7046  			Memory: AllocatedMemoryResources{MemoryMB: 4096},
  7047  		},
  7048  		Shared: AllocatedSharedResources{DiskMB: 10000},
  7049  	}
  7050  	cases := []struct {
  7051  		a         *ComparableResources
  7052  		b         *ComparableResources
  7053  		dimension string
  7054  	}{
  7055  		{
  7056  			a: base,
  7057  			b: &ComparableResources{
  7058  				Flattened: AllocatedTaskResources{
  7059  					Cpu: AllocatedCpuResources{CpuShares: 1000, ReservedCores: []uint16{0}},
  7060  				},
  7061  			},
  7062  		},
  7063  		{
  7064  			a: base,
  7065  			b: &ComparableResources{
  7066  				Flattened: AllocatedTaskResources{
  7067  					Cpu: AllocatedCpuResources{CpuShares: 4000, ReservedCores: []uint16{0, 1, 2, 3}},
  7068  				},
  7069  			},
  7070  		},
  7071  		{
  7072  			a: base,
  7073  			b: &ComparableResources{
  7074  				Flattened: AllocatedTaskResources{
  7075  					Cpu: AllocatedCpuResources{CpuShares: 5000},
  7076  				},
  7077  			},
  7078  			dimension: "cpu",
  7079  		},
  7080  		{
  7081  			a: base,
  7082  			b: &ComparableResources{
  7083  				Flattened: AllocatedTaskResources{
  7084  					Cpu: AllocatedCpuResources{CpuShares: 1000, ReservedCores: []uint16{3, 4}},
  7085  				},
  7086  			},
  7087  			dimension: "cores",
  7088  		},
  7089  	}
  7090  
  7091  	for _, c := range cases {
  7092  		fit, dim := c.a.Superset(c.b)
  7093  		if c.dimension == "" {
  7094  			require.True(t, fit)
  7095  		} else {
  7096  			require.False(t, fit)
  7097  			require.Equal(t, c.dimension, dim)
  7098  		}
  7099  	}
  7100  }
  7101  
  7102  func requireErrors(t *testing.T, err error, expected ...string) {
  7103  	t.Helper()
  7104  	require.Error(t, err)
  7105  	mErr, ok := err.(*multierror.Error)
  7106  	require.True(t, ok)
  7107  
  7108  	var found []string
  7109  	for _, e := range expected {
  7110  		for _, actual := range mErr.Errors {
  7111  			if strings.Contains(actual.Error(), e) {
  7112  				found = append(found, e)
  7113  				break
  7114  			}
  7115  		}
  7116  	}
  7117  
  7118  	require.Equal(t, expected, found)
  7119  }