github.com/hernad/nomad@v1.6.112/nomad/structs/structs_test.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package structs
     5  
     6  import (
     7  	"fmt"
     8  	"net"
     9  	"os"
    10  	"reflect"
    11  	"strings"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/hashicorp/consul/api"
    16  	"github.com/hashicorp/go-multierror"
    17  	"github.com/hernad/nomad/ci"
    18  	"github.com/hernad/nomad/helper/pointer"
    19  	"github.com/hernad/nomad/helper/uuid"
    20  	"github.com/kr/pretty"
    21  	"github.com/shoenig/test/must"
    22  	"github.com/stretchr/testify/assert"
    23  	"github.com/stretchr/testify/require"
    24  )
    25  
    26  func TestNamespace_Validate(t *testing.T) {
    27  	ci.Parallel(t)
    28  	cases := []struct {
    29  		Test      string
    30  		Namespace *Namespace
    31  		Expected  string
    32  	}{
    33  		{
    34  			Test: "empty name",
    35  			Namespace: &Namespace{
    36  				Name: "",
    37  			},
    38  			Expected: "invalid name",
    39  		},
    40  		{
    41  			Test: "slashes in name",
    42  			Namespace: &Namespace{
    43  				Name: "foo/bar",
    44  			},
    45  			Expected: "invalid name",
    46  		},
    47  		{
    48  			Test: "too long name",
    49  			Namespace: &Namespace{
    50  				Name: strings.Repeat("a", 200),
    51  			},
    52  			Expected: "invalid name",
    53  		},
    54  		{
    55  			Test: "too long description",
    56  			Namespace: &Namespace{
    57  				Name:        "foo",
    58  				Description: strings.Repeat("a", 300),
    59  			},
    60  			Expected: "description longer than",
    61  		},
    62  		{
    63  			Test: "valid",
    64  			Namespace: &Namespace{
    65  				Name:        "foo",
    66  				Description: "bar",
    67  			},
    68  		},
    69  	}
    70  
    71  	for _, c := range cases {
    72  		t.Run(c.Test, func(t *testing.T) {
    73  			err := c.Namespace.Validate()
    74  			if err == nil {
    75  				if c.Expected == "" {
    76  					return
    77  				}
    78  
    79  				t.Fatalf("Expected error %q; got nil", c.Expected)
    80  			} else if c.Expected == "" {
    81  				t.Fatalf("Unexpected error %v", err)
    82  			} else if !strings.Contains(err.Error(), c.Expected) {
    83  				t.Fatalf("Expected error %q; got %v", c.Expected, err)
    84  			}
    85  		})
    86  	}
    87  }
    88  
    89  func TestNamespace_SetHash(t *testing.T) {
    90  	ci.Parallel(t)
    91  
    92  	ns := &Namespace{
    93  		Name:        "foo",
    94  		Description: "bar",
    95  		Quota:       "q1",
    96  		Capabilities: &NamespaceCapabilities{
    97  			EnabledTaskDrivers:  []string{"docker"},
    98  			DisabledTaskDrivers: []string{"raw_exec"},
    99  		},
   100  		NodePoolConfiguration: &NamespaceNodePoolConfiguration{
   101  			Default: "dev",
   102  			Allowed: []string{"default"},
   103  		},
   104  		Meta: map[string]string{
   105  			"a": "b",
   106  			"c": "d",
   107  		},
   108  	}
   109  	out1 := ns.SetHash()
   110  	must.NotNil(t, out1)
   111  	must.NotNil(t, ns.Hash)
   112  	must.Eq(t, out1, ns.Hash)
   113  
   114  	ns.Description = "bam"
   115  	out2 := ns.SetHash()
   116  	must.NotNil(t, out2)
   117  	must.NotNil(t, ns.Hash)
   118  	must.Eq(t, out2, ns.Hash)
   119  	must.NotEq(t, out1, out2)
   120  
   121  	ns.Quota = "q2"
   122  	out3 := ns.SetHash()
   123  	must.NotNil(t, out3)
   124  	must.NotNil(t, ns.Hash)
   125  	must.Eq(t, out3, ns.Hash)
   126  	must.NotEq(t, out2, out3)
   127  
   128  	ns.Meta["a"] = "c"
   129  	delete(ns.Meta, "c")
   130  	ns.Meta["d"] = "e"
   131  	out4 := ns.SetHash()
   132  	must.NotNil(t, out4)
   133  	must.NotNil(t, ns.Hash)
   134  	must.Eq(t, out4, ns.Hash)
   135  	must.NotEq(t, out3, out4)
   136  
   137  	ns.Capabilities.EnabledTaskDrivers = []string{"docker", "podman"}
   138  	ns.Capabilities.DisabledTaskDrivers = []string{}
   139  	out5 := ns.SetHash()
   140  	must.NotNil(t, out5)
   141  	must.NotNil(t, ns.Hash)
   142  	must.Eq(t, out5, ns.Hash)
   143  	must.NotEq(t, out4, out5)
   144  
   145  	ns.NodePoolConfiguration.Default = "default"
   146  	ns.NodePoolConfiguration.Allowed = []string{}
   147  	ns.NodePoolConfiguration.Denied = []string{"all"}
   148  	out6 := ns.SetHash()
   149  	must.NotNil(t, out6)
   150  	must.NotNil(t, ns.Hash)
   151  	must.Eq(t, out6, ns.Hash)
   152  	must.NotEq(t, out5, out6)
   153  }
   154  
   155  func TestNamespace_Copy(t *testing.T) {
   156  	ci.Parallel(t)
   157  
   158  	ns := &Namespace{
   159  		Name:        "foo",
   160  		Description: "bar",
   161  		Quota:       "q1",
   162  		Capabilities: &NamespaceCapabilities{
   163  			EnabledTaskDrivers:  []string{"docker"},
   164  			DisabledTaskDrivers: []string{"raw_exec"},
   165  		},
   166  		NodePoolConfiguration: &NamespaceNodePoolConfiguration{
   167  			Default: "dev",
   168  			Allowed: []string{"default"},
   169  		},
   170  		Meta: map[string]string{
   171  			"a": "b",
   172  			"c": "d",
   173  		},
   174  	}
   175  	ns.SetHash()
   176  
   177  	nsCopy := ns.Copy()
   178  	nsCopy.Name = "bar"
   179  	nsCopy.Description = "foo"
   180  	nsCopy.Quota = "q2"
   181  	nsCopy.Capabilities.EnabledTaskDrivers = []string{"exec"}
   182  	nsCopy.Capabilities.DisabledTaskDrivers = []string{"java"}
   183  	nsCopy.NodePoolConfiguration.Default = "default"
   184  	nsCopy.NodePoolConfiguration.Allowed = []string{}
   185  	nsCopy.NodePoolConfiguration.Denied = []string{"dev"}
   186  	nsCopy.Meta["a"] = "z"
   187  	must.NotEq(t, ns, nsCopy)
   188  
   189  	nsCopy2 := ns.Copy()
   190  	must.Eq(t, ns, nsCopy2)
   191  }
   192  
   193  func TestAuthenticatedIdentity_String(t *testing.T) {
   194  	ci.Parallel(t)
   195  
   196  	testCases := []struct {
   197  		name                       string
   198  		inputAuthenticatedIdentity *AuthenticatedIdentity
   199  		expectedOutput             string
   200  	}{
   201  		{
   202  			name:                       "nil",
   203  			inputAuthenticatedIdentity: nil,
   204  			expectedOutput:             "unauthenticated",
   205  		},
   206  		{
   207  			name: "ACL token",
   208  			inputAuthenticatedIdentity: &AuthenticatedIdentity{
   209  				ACLToken: &ACLToken{
   210  					AccessorID: "my-testing-accessor-id",
   211  				},
   212  			},
   213  			expectedOutput: "token:my-testing-accessor-id",
   214  		},
   215  		{
   216  			name: "alloc claim",
   217  			inputAuthenticatedIdentity: &AuthenticatedIdentity{
   218  				Claims: &IdentityClaims{
   219  					AllocationID: "my-testing-alloc-id",
   220  				},
   221  			},
   222  			expectedOutput: "alloc:my-testing-alloc-id",
   223  		},
   224  		{
   225  			name: "client",
   226  			inputAuthenticatedIdentity: &AuthenticatedIdentity{
   227  				ClientID: "my-testing-client-id",
   228  			},
   229  			expectedOutput: "client:my-testing-client-id",
   230  		},
   231  		{
   232  			name: "tls remote IP",
   233  			inputAuthenticatedIdentity: &AuthenticatedIdentity{
   234  				TLSName:  "my-testing-tls-name",
   235  				RemoteIP: net.IPv4(192, 168, 135, 232),
   236  			},
   237  			expectedOutput: "my-testing-tls-name:192.168.135.232",
   238  		},
   239  	}
   240  
   241  	for _, tc := range testCases {
   242  		t.Run(tc.name, func(t *testing.T) {
   243  			actualOutput := tc.inputAuthenticatedIdentity.String()
   244  			must.Eq(t, tc.expectedOutput, actualOutput)
   245  		})
   246  	}
   247  }
   248  
   249  func TestJob_Validate(t *testing.T) {
   250  	ci.Parallel(t)
   251  
   252  	tests := []struct {
   253  		name   string
   254  		job    *Job
   255  		expErr []string
   256  	}{
   257  		{
   258  			name: "job is empty",
   259  			job:  &Job{},
   260  			expErr: []string{
   261  				"datacenters",
   262  				"job ID",
   263  				"job name",
   264  				"job region",
   265  				"job type",
   266  				"namespace",
   267  				"task groups",
   268  			},
   269  		},
   270  		{
   271  			name: "job type is invalid",
   272  			job: &Job{
   273  				Type: "invalid-job-type",
   274  			},
   275  			expErr: []string{
   276  				`Invalid job type: "invalid-job-type"`,
   277  			},
   278  		},
   279  		{
   280  			name: "job periodic specification type is missing",
   281  			job: &Job{
   282  				Type: JobTypeService,
   283  				Periodic: &PeriodicConfig{
   284  					Enabled: true,
   285  				},
   286  			},
   287  			expErr: []string{
   288  				`Unknown periodic specification type ""`,
   289  				"Must specify a spec",
   290  			},
   291  		},
   292  		{
   293  			name: "job datacenters is empty",
   294  			job: &Job{
   295  				Datacenters: []string{""},
   296  			},
   297  			expErr: []string{
   298  				"datacenter must be non-empty string",
   299  			},
   300  		},
   301  		{
   302  			name: "job task group is type invalid",
   303  			job: &Job{
   304  				Region:      "global",
   305  				ID:          uuid.Generate(),
   306  				Namespace:   "test",
   307  				Name:        "my-job",
   308  				Type:        JobTypeService,
   309  				Priority:    JobDefaultPriority,
   310  				Datacenters: []string{"*"},
   311  				TaskGroups: []*TaskGroup{
   312  					{
   313  						Name: "web",
   314  						RestartPolicy: &RestartPolicy{
   315  							Interval: 5 * time.Minute,
   316  							Delay:    10 * time.Second,
   317  							Attempts: 10,
   318  						},
   319  					},
   320  					{
   321  						Name: "web",
   322  						RestartPolicy: &RestartPolicy{
   323  							Interval: 5 * time.Minute,
   324  							Delay:    10 * time.Second,
   325  							Attempts: 10,
   326  						},
   327  					},
   328  					{
   329  						RestartPolicy: &RestartPolicy{
   330  							Interval: 5 * time.Minute,
   331  							Delay:    10 * time.Second,
   332  							Attempts: 10,
   333  						},
   334  					},
   335  				},
   336  			},
   337  			expErr: []string{
   338  				"2 redefines 'web' from group 1",
   339  				"group 3 missing name",
   340  				"Task group web validation failed",
   341  				"Missing tasks for task group",
   342  				"Unsupported restart mode",
   343  				"Task Group web should have a reschedule policy",
   344  				"Task Group web should have an ephemeral disk object",
   345  			},
   346  		},
   347  	}
   348  	for _, tc := range tests {
   349  		t.Run(tc.name, func(t *testing.T) {
   350  			err := tc.job.Validate()
   351  			requireErrors(t, err, tc.expErr...)
   352  		})
   353  	}
   354  
   355  }
   356  
   357  func TestJob_ValidateScaling(t *testing.T) {
   358  	ci.Parallel(t)
   359  
   360  	require := require.New(t)
   361  
   362  	p := &ScalingPolicy{
   363  		Policy:  nil, // allowed to be nil
   364  		Type:    ScalingPolicyTypeHorizontal,
   365  		Min:     5,
   366  		Max:     5,
   367  		Enabled: true,
   368  	}
   369  	job := testJob()
   370  	job.TaskGroups[0].Scaling = p
   371  	job.TaskGroups[0].Count = 5
   372  
   373  	require.NoError(job.Validate())
   374  
   375  	// min <= max
   376  	p.Max = 0
   377  	p.Min = 10
   378  	err := job.Validate()
   379  	requireErrors(t, err,
   380  		"task group count must not be less than minimum count in scaling policy",
   381  		"task group count must not be greater than maximum count in scaling policy",
   382  	)
   383  
   384  	// count <= max
   385  	p.Max = 0
   386  	p.Min = 5
   387  	job.TaskGroups[0].Count = 5
   388  	err = job.Validate()
   389  	require.Error(err,
   390  		"task group count must not be greater than maximum count in scaling policy",
   391  	)
   392  
   393  	// min <= count
   394  	job.TaskGroups[0].Count = 0
   395  	p.Min = 5
   396  	p.Max = 5
   397  	err = job.Validate()
   398  	require.Error(err,
   399  		"task group count must not be less than minimum count in scaling policy",
   400  	)
   401  }
   402  
   403  func TestJob_ValidateNullChar(t *testing.T) {
   404  	ci.Parallel(t)
   405  
   406  	assert := assert.New(t)
   407  
   408  	// job id should not allow null characters
   409  	job := testJob()
   410  	job.ID = "id_with\000null_character"
   411  	assert.Error(job.Validate(), "null character in job ID should not validate")
   412  
   413  	// job name should not allow null characters
   414  	job.ID = "happy_little_job_id"
   415  	job.Name = "my job name with \000 characters"
   416  	assert.Error(job.Validate(), "null character in job name should not validate")
   417  
   418  	// task group name should not allow null characters
   419  	job.Name = "my job"
   420  	job.TaskGroups[0].Name = "oh_no_another_\000_char"
   421  	assert.Error(job.Validate(), "null character in task group name should not validate")
   422  
   423  	// task name should not allow null characters
   424  	job.TaskGroups[0].Name = "so_much_better"
   425  	job.TaskGroups[0].Tasks[0].Name = "ive_had_it_with_these_\000_chars_in_these_names"
   426  	assert.Error(job.Validate(), "null character in task name should not validate")
   427  }
   428  
   429  func TestJob_Warnings(t *testing.T) {
   430  	ci.Parallel(t)
   431  
   432  	cases := []struct {
   433  		Name     string
   434  		Job      *Job
   435  		Expected []string
   436  	}{
   437  		{
   438  			Name:     "Higher counts for update block",
   439  			Expected: []string{"max parallel count is greater"},
   440  			Job: &Job{
   441  				Type: JobTypeService,
   442  				TaskGroups: []*TaskGroup{
   443  					{
   444  						Name:  "foo",
   445  						Count: 2,
   446  						Update: &UpdateStrategy{
   447  							MaxParallel: 10,
   448  						},
   449  					},
   450  				},
   451  			},
   452  		},
   453  		{
   454  			Name:     "AutoPromote mixed TaskGroups",
   455  			Expected: []string{"auto_promote must be true for all groups"},
   456  			Job: &Job{
   457  				Type: JobTypeService,
   458  				TaskGroups: []*TaskGroup{
   459  					{
   460  						Update: &UpdateStrategy{
   461  							AutoPromote: true,
   462  						},
   463  					},
   464  					{
   465  						Update: &UpdateStrategy{
   466  							AutoPromote: false,
   467  							Canary:      1,
   468  						},
   469  					},
   470  				},
   471  			},
   472  		},
   473  		{
   474  			Name:     "no error for mixed but implied AutoPromote",
   475  			Expected: []string{},
   476  			Job: &Job{
   477  				Type: JobTypeService,
   478  				TaskGroups: []*TaskGroup{
   479  					{
   480  						Update: &UpdateStrategy{
   481  							AutoPromote: true,
   482  						},
   483  					},
   484  					{
   485  						Update: &UpdateStrategy{
   486  							AutoPromote: false,
   487  							Canary:      0,
   488  						},
   489  					},
   490  				},
   491  			},
   492  		},
   493  		{
   494  			Name:     "Template.VaultGrace Deprecated",
   495  			Expected: []string{"VaultGrace has been deprecated as of Nomad 0.11 and ignored since Vault 0.5. Please remove VaultGrace / vault_grace from template block."},
   496  			Job: &Job{
   497  				Type: JobTypeService,
   498  				TaskGroups: []*TaskGroup{
   499  					{
   500  						Tasks: []*Task{
   501  							{
   502  								Templates: []*Template{
   503  									{
   504  										VaultGrace: 1,
   505  									},
   506  								},
   507  							},
   508  						},
   509  					},
   510  				},
   511  			},
   512  		},
   513  		{
   514  			Name:     "Update.MaxParallel warning",
   515  			Expected: []string{"Update max parallel count is greater than task group count (5 > 2). A destructive change would result in the simultaneous replacement of all allocations."},
   516  			Job: &Job{
   517  				Type: JobTypeService,
   518  				TaskGroups: []*TaskGroup{
   519  					{
   520  						Count: 2,
   521  						Update: &UpdateStrategy{
   522  							MaxParallel: 5,
   523  						},
   524  					},
   525  				},
   526  			},
   527  		},
   528  		{
   529  			Name:     "Update.MaxParallel no warning",
   530  			Expected: []string{},
   531  			Job: &Job{
   532  				Type: JobTypeService,
   533  				TaskGroups: []*TaskGroup{
   534  					{
   535  						Count: 1,
   536  						Update: &UpdateStrategy{
   537  							MaxParallel: 5,
   538  						},
   539  					},
   540  				},
   541  			},
   542  		},
   543  	}
   544  
   545  	for _, c := range cases {
   546  		t.Run(c.Name, func(t *testing.T) {
   547  			warnings := c.Job.Warnings()
   548  			if warnings == nil {
   549  				if len(c.Expected) == 0 {
   550  					return
   551  				}
   552  				t.Fatal("Got no warnings when they were expected")
   553  			}
   554  
   555  			a := warnings.Error()
   556  			for _, e := range c.Expected {
   557  				if !strings.Contains(a, e) {
   558  					t.Fatalf("Got warnings %q; didn't contain %q", a, e)
   559  				}
   560  			}
   561  		})
   562  	}
   563  }
   564  
   565  func TestJob_SpecChanged(t *testing.T) {
   566  	ci.Parallel(t)
   567  
   568  	// Get a base test job
   569  	base := testJob()
   570  
   571  	// Only modify the indexes/mutable state of the job
   572  	mutatedBase := base.Copy()
   573  	mutatedBase.Status = "foo"
   574  	mutatedBase.ModifyIndex = base.ModifyIndex + 100
   575  
   576  	// changed contains a spec change that should be detected
   577  	change := base.Copy()
   578  	change.Priority = 99
   579  
   580  	cases := []struct {
   581  		Name     string
   582  		Original *Job
   583  		New      *Job
   584  		Changed  bool
   585  	}{
   586  		{
   587  			Name:     "Same job except mutable indexes",
   588  			Changed:  false,
   589  			Original: base,
   590  			New:      mutatedBase,
   591  		},
   592  		{
   593  			Name:     "Different",
   594  			Changed:  true,
   595  			Original: base,
   596  			New:      change,
   597  		},
   598  		{
   599  			Name:     "With Constraints",
   600  			Changed:  false,
   601  			Original: &Job{Constraints: []*Constraint{{"A", "B", "="}}},
   602  			New:      &Job{Constraints: []*Constraint{{"A", "B", "="}}},
   603  		},
   604  		{
   605  			Name:     "With Affinities",
   606  			Changed:  false,
   607  			Original: &Job{Affinities: []*Affinity{{"A", "B", "=", 1}}},
   608  			New:      &Job{Affinities: []*Affinity{{"A", "B", "=", 1}}},
   609  		},
   610  	}
   611  
   612  	for _, c := range cases {
   613  		t.Run(c.Name, func(t *testing.T) {
   614  			if actual := c.Original.SpecChanged(c.New); actual != c.Changed {
   615  				t.Fatalf("SpecChanged() returned %v; want %v", actual, c.Changed)
   616  			}
   617  		})
   618  	}
   619  }
   620  
   621  func testJob() *Job {
   622  	return &Job{
   623  		Region:      "global",
   624  		ID:          uuid.Generate(),
   625  		Namespace:   "test",
   626  		Name:        "my-job",
   627  		Type:        JobTypeService,
   628  		Priority:    JobDefaultPriority,
   629  		AllAtOnce:   false,
   630  		Datacenters: []string{"*"},
   631  		NodePool:    NodePoolDefault,
   632  		Constraints: []*Constraint{
   633  			{
   634  				LTarget: "$attr.kernel.name",
   635  				RTarget: "linux",
   636  				Operand: "=",
   637  			},
   638  		},
   639  		Periodic: &PeriodicConfig{
   640  			Enabled: false,
   641  		},
   642  		TaskGroups: []*TaskGroup{
   643  			{
   644  				Name:          "web",
   645  				Count:         10,
   646  				EphemeralDisk: DefaultEphemeralDisk(),
   647  				RestartPolicy: &RestartPolicy{
   648  					Mode:     RestartPolicyModeFail,
   649  					Attempts: 3,
   650  					Interval: 10 * time.Minute,
   651  					Delay:    1 * time.Minute,
   652  				},
   653  				ReschedulePolicy: &ReschedulePolicy{
   654  					Interval:      5 * time.Minute,
   655  					Attempts:      10,
   656  					Delay:         5 * time.Second,
   657  					DelayFunction: "constant",
   658  				},
   659  				Networks: []*NetworkResource{
   660  					{
   661  						DynamicPorts: []Port{
   662  							{Label: "http"},
   663  						},
   664  					},
   665  				},
   666  				Services: []*Service{
   667  					{
   668  						Name:      "${TASK}-frontend",
   669  						PortLabel: "http",
   670  						Provider:  "consul",
   671  					},
   672  				},
   673  				Tasks: []*Task{
   674  					{
   675  						Name:   "web",
   676  						Driver: "exec",
   677  						Config: map[string]interface{}{
   678  							"command": "/bin/date",
   679  						},
   680  						Env: map[string]string{
   681  							"FOO": "bar",
   682  						},
   683  						Artifacts: []*TaskArtifact{
   684  							{
   685  								GetterSource: "http://foo.com",
   686  							},
   687  						},
   688  						Identity: &WorkloadIdentity{
   689  							Env:  true,
   690  							File: true,
   691  						},
   692  						Resources: &Resources{
   693  							CPU:      500,
   694  							MemoryMB: 256,
   695  						},
   696  						LogConfig: &LogConfig{
   697  							MaxFiles:      10,
   698  							MaxFileSizeMB: 1,
   699  						},
   700  					},
   701  				},
   702  				Meta: map[string]string{
   703  					"elb_check_type":     "http",
   704  					"elb_check_interval": "30s",
   705  					"elb_check_min":      "3",
   706  				},
   707  			},
   708  		},
   709  		Meta: map[string]string{
   710  			"owner": "armon",
   711  		},
   712  	}
   713  }
   714  
   715  func TestJob_Copy(t *testing.T) {
   716  	ci.Parallel(t)
   717  
   718  	j := testJob()
   719  	c := j.Copy()
   720  	if !reflect.DeepEqual(j, c) {
   721  		t.Fatalf("Copy() returned an unequal Job; got %#v; want %#v", c, j)
   722  	}
   723  }
   724  
   725  func TestJob_IsPeriodic(t *testing.T) {
   726  	ci.Parallel(t)
   727  
   728  	j := &Job{
   729  		Type: JobTypeService,
   730  		Periodic: &PeriodicConfig{
   731  			Enabled: true,
   732  		},
   733  	}
   734  	if !j.IsPeriodic() {
   735  		t.Fatalf("IsPeriodic() returned false on periodic job")
   736  	}
   737  
   738  	j = &Job{
   739  		Type: JobTypeService,
   740  	}
   741  	if j.IsPeriodic() {
   742  		t.Fatalf("IsPeriodic() returned true on non-periodic job")
   743  	}
   744  }
   745  
   746  func TestJob_IsPeriodicActive(t *testing.T) {
   747  	ci.Parallel(t)
   748  
   749  	cases := []struct {
   750  		job    *Job
   751  		active bool
   752  	}{
   753  		{
   754  			job: &Job{
   755  				Type: JobTypeService,
   756  				Periodic: &PeriodicConfig{
   757  					Enabled: true,
   758  				},
   759  			},
   760  			active: true,
   761  		},
   762  		{
   763  			job: &Job{
   764  				Type: JobTypeService,
   765  				Periodic: &PeriodicConfig{
   766  					Enabled: false,
   767  				},
   768  			},
   769  			active: false,
   770  		},
   771  		{
   772  			job: &Job{
   773  				Type: JobTypeService,
   774  				Periodic: &PeriodicConfig{
   775  					Enabled: true,
   776  				},
   777  				Stop: true,
   778  			},
   779  			active: false,
   780  		},
   781  		{
   782  			job: &Job{
   783  				Type: JobTypeService,
   784  				Periodic: &PeriodicConfig{
   785  					Enabled: false,
   786  				},
   787  				ParameterizedJob: &ParameterizedJobConfig{},
   788  			},
   789  			active: false,
   790  		},
   791  	}
   792  
   793  	for i, c := range cases {
   794  		if act := c.job.IsPeriodicActive(); act != c.active {
   795  			t.Fatalf("case %d failed: got %v; want %v", i, act, c.active)
   796  		}
   797  	}
   798  }
   799  
   800  func TestJob_SystemJob_Validate(t *testing.T) {
   801  	j := testJob()
   802  	j.Type = JobTypeSystem
   803  	j.TaskGroups[0].ReschedulePolicy = nil
   804  	j.Canonicalize()
   805  
   806  	err := j.Validate()
   807  	if err == nil || !strings.Contains(err.Error(), "exceed") {
   808  		t.Fatalf("expect error due to count")
   809  	}
   810  
   811  	j.TaskGroups[0].Count = 0
   812  	if err := j.Validate(); err != nil {
   813  		t.Fatalf("unexpected err: %v", err)
   814  	}
   815  
   816  	j.TaskGroups[0].Count = 1
   817  	if err := j.Validate(); err != nil {
   818  		t.Fatalf("unexpected err: %v", err)
   819  	}
   820  
   821  	// Add affinities at job, task group and task level, that should fail validation
   822  
   823  	j.Affinities = []*Affinity{{
   824  		Operand: "=",
   825  		LTarget: "${node.datacenter}",
   826  		RTarget: "dc1",
   827  	}}
   828  	j.TaskGroups[0].Affinities = []*Affinity{{
   829  		Operand: "=",
   830  		LTarget: "${meta.rack}",
   831  		RTarget: "r1",
   832  	}}
   833  	j.TaskGroups[0].Tasks[0].Affinities = []*Affinity{{
   834  		Operand: "=",
   835  		LTarget: "${meta.rack}",
   836  		RTarget: "r1",
   837  	}}
   838  	err = j.Validate()
   839  	require.NotNil(t, err)
   840  	require.Contains(t, err.Error(), "System jobs may not have an affinity block")
   841  
   842  	// Add spread at job and task group level, that should fail validation
   843  	j.Spreads = []*Spread{{
   844  		Attribute: "${node.datacenter}",
   845  		Weight:    100,
   846  	}}
   847  	j.TaskGroups[0].Spreads = []*Spread{{
   848  		Attribute: "${node.datacenter}",
   849  		Weight:    100,
   850  	}}
   851  
   852  	err = j.Validate()
   853  	require.NotNil(t, err)
   854  	require.Contains(t, err.Error(), "System jobs may not have a spread block")
   855  
   856  }
   857  
   858  func TestJob_Vault(t *testing.T) {
   859  	ci.Parallel(t)
   860  
   861  	j0 := &Job{}
   862  	e0 := make(map[string]map[string]*Vault, 0)
   863  
   864  	vj1 := &Vault{
   865  		Policies: []string{
   866  			"p1",
   867  			"p2",
   868  		},
   869  	}
   870  	vj2 := &Vault{
   871  		Policies: []string{
   872  			"p3",
   873  			"p4",
   874  		},
   875  	}
   876  	vj3 := &Vault{
   877  		Policies: []string{
   878  			"p5",
   879  		},
   880  	}
   881  	j1 := &Job{
   882  		TaskGroups: []*TaskGroup{
   883  			{
   884  				Name: "foo",
   885  				Tasks: []*Task{
   886  					{
   887  						Name: "t1",
   888  					},
   889  					{
   890  						Name:  "t2",
   891  						Vault: vj1,
   892  					},
   893  				},
   894  			},
   895  			{
   896  				Name: "bar",
   897  				Tasks: []*Task{
   898  					{
   899  						Name:  "t3",
   900  						Vault: vj2,
   901  					},
   902  					{
   903  						Name:  "t4",
   904  						Vault: vj3,
   905  					},
   906  				},
   907  			},
   908  		},
   909  	}
   910  
   911  	e1 := map[string]map[string]*Vault{
   912  		"foo": {
   913  			"t2": vj1,
   914  		},
   915  		"bar": {
   916  			"t3": vj2,
   917  			"t4": vj3,
   918  		},
   919  	}
   920  
   921  	cases := []struct {
   922  		Job      *Job
   923  		Expected map[string]map[string]*Vault
   924  	}{
   925  		{
   926  			Job:      j0,
   927  			Expected: e0,
   928  		},
   929  		{
   930  			Job:      j1,
   931  			Expected: e1,
   932  		},
   933  	}
   934  
   935  	for i, c := range cases {
   936  		got := c.Job.Vault()
   937  		if !reflect.DeepEqual(got, c.Expected) {
   938  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
   939  		}
   940  	}
   941  }
   942  
   943  func TestJob_ConnectTasks(t *testing.T) {
   944  	ci.Parallel(t)
   945  	r := require.New(t)
   946  
   947  	j0 := &Job{
   948  		TaskGroups: []*TaskGroup{{
   949  			Name: "tg1",
   950  			Tasks: []*Task{{
   951  				Name: "connect-proxy-task1",
   952  				Kind: "connect-proxy:task1",
   953  			}, {
   954  				Name: "task2",
   955  				Kind: "task2",
   956  			}, {
   957  				Name: "connect-proxy-task3",
   958  				Kind: "connect-proxy:task3",
   959  			}},
   960  		}, {
   961  			Name: "tg2",
   962  			Tasks: []*Task{{
   963  				Name: "task1",
   964  				Kind: "task1",
   965  			}, {
   966  				Name: "connect-proxy-task2",
   967  				Kind: "connect-proxy:task2",
   968  			}},
   969  		}, {
   970  			Name: "tg3",
   971  			Tasks: []*Task{{
   972  				Name: "ingress",
   973  				Kind: "connect-ingress:ingress",
   974  			}},
   975  		}, {
   976  			Name: "tg4",
   977  			Tasks: []*Task{{
   978  				Name: "frontend",
   979  				Kind: "connect-native:uuid-fe",
   980  			}, {
   981  				Name: "generator",
   982  				Kind: "connect-native:uuid-api",
   983  			}},
   984  		}, {
   985  			Name: "tg5",
   986  			Tasks: []*Task{{
   987  				Name: "t1000",
   988  				Kind: "connect-terminating:t1000",
   989  			}},
   990  		}},
   991  	}
   992  
   993  	connectTasks := j0.ConnectTasks()
   994  
   995  	exp := []TaskKind{
   996  		NewTaskKind(ConnectProxyPrefix, "task1"),
   997  		NewTaskKind(ConnectProxyPrefix, "task3"),
   998  		NewTaskKind(ConnectProxyPrefix, "task2"),
   999  		NewTaskKind(ConnectIngressPrefix, "ingress"),
  1000  		NewTaskKind(ConnectNativePrefix, "uuid-fe"),
  1001  		NewTaskKind(ConnectNativePrefix, "uuid-api"),
  1002  		NewTaskKind(ConnectTerminatingPrefix, "t1000"),
  1003  	}
  1004  
  1005  	r.Equal(exp, connectTasks)
  1006  }
  1007  
  1008  func TestJob_RequiredSignals(t *testing.T) {
  1009  	ci.Parallel(t)
  1010  
  1011  	j0 := &Job{}
  1012  	e0 := make(map[string]map[string][]string, 0)
  1013  
  1014  	vj1 := &Vault{
  1015  		Policies:   []string{"p1"},
  1016  		ChangeMode: VaultChangeModeNoop,
  1017  	}
  1018  	vj2 := &Vault{
  1019  		Policies:     []string{"p1"},
  1020  		ChangeMode:   VaultChangeModeSignal,
  1021  		ChangeSignal: "SIGUSR1",
  1022  	}
  1023  	tj1 := &Template{
  1024  		SourcePath: "foo",
  1025  		DestPath:   "bar",
  1026  		ChangeMode: TemplateChangeModeNoop,
  1027  	}
  1028  	tj2 := &Template{
  1029  		SourcePath:   "foo",
  1030  		DestPath:     "bar",
  1031  		ChangeMode:   TemplateChangeModeSignal,
  1032  		ChangeSignal: "SIGUSR2",
  1033  	}
  1034  	j1 := &Job{
  1035  		TaskGroups: []*TaskGroup{
  1036  			{
  1037  				Name: "foo",
  1038  				Tasks: []*Task{
  1039  					{
  1040  						Name: "t1",
  1041  					},
  1042  					{
  1043  						Name:      "t2",
  1044  						Vault:     vj2,
  1045  						Templates: []*Template{tj2},
  1046  					},
  1047  				},
  1048  			},
  1049  			{
  1050  				Name: "bar",
  1051  				Tasks: []*Task{
  1052  					{
  1053  						Name:      "t3",
  1054  						Vault:     vj1,
  1055  						Templates: []*Template{tj1},
  1056  					},
  1057  					{
  1058  						Name:  "t4",
  1059  						Vault: vj2,
  1060  					},
  1061  				},
  1062  			},
  1063  		},
  1064  	}
  1065  
  1066  	e1 := map[string]map[string][]string{
  1067  		"foo": {
  1068  			"t2": {"SIGUSR1", "SIGUSR2"},
  1069  		},
  1070  		"bar": {
  1071  			"t4": {"SIGUSR1"},
  1072  		},
  1073  	}
  1074  
  1075  	j2 := &Job{
  1076  		TaskGroups: []*TaskGroup{
  1077  			{
  1078  				Name: "foo",
  1079  				Tasks: []*Task{
  1080  					{
  1081  						Name:       "t1",
  1082  						KillSignal: "SIGQUIT",
  1083  					},
  1084  				},
  1085  			},
  1086  		},
  1087  	}
  1088  
  1089  	e2 := map[string]map[string][]string{
  1090  		"foo": {
  1091  			"t1": {"SIGQUIT"},
  1092  		},
  1093  	}
  1094  
  1095  	cases := []struct {
  1096  		Job      *Job
  1097  		Expected map[string]map[string][]string
  1098  	}{
  1099  		{
  1100  			Job:      j0,
  1101  			Expected: e0,
  1102  		},
  1103  		{
  1104  			Job:      j1,
  1105  			Expected: e1,
  1106  		},
  1107  		{
  1108  			Job:      j2,
  1109  			Expected: e2,
  1110  		},
  1111  	}
  1112  
  1113  	for i, c := range cases {
  1114  		got := c.Job.RequiredSignals()
  1115  		if !reflect.DeepEqual(got, c.Expected) {
  1116  			t.Fatalf("case %d: got %#v; want %#v", i+1, got, c.Expected)
  1117  		}
  1118  	}
  1119  }
  1120  
  1121  // test new Equal comparisons for components of Jobs
  1122  func TestJob_PartEqual(t *testing.T) {
  1123  	ci.Parallel(t)
  1124  
  1125  	ns := &Networks{}
  1126  	require.True(t, ns.Equal(&Networks{}))
  1127  
  1128  	ns = &Networks{
  1129  		&NetworkResource{Device: "eth0"},
  1130  	}
  1131  	require.True(t, ns.Equal(&Networks{
  1132  		&NetworkResource{Device: "eth0"},
  1133  	}))
  1134  
  1135  	ns = &Networks{
  1136  		&NetworkResource{Device: "eth0"},
  1137  		&NetworkResource{Device: "eth1"},
  1138  		&NetworkResource{Device: "eth2"},
  1139  	}
  1140  	require.True(t, ns.Equal(&Networks{
  1141  		&NetworkResource{Device: "eth2"},
  1142  		&NetworkResource{Device: "eth0"},
  1143  		&NetworkResource{Device: "eth1"},
  1144  	}))
  1145  
  1146  	cs := &Constraints{
  1147  		&Constraint{"left0", "right0", "="},
  1148  		&Constraint{"left1", "right1", "="},
  1149  		&Constraint{"left2", "right2", "="},
  1150  	}
  1151  	require.True(t, cs.Equal(&Constraints{
  1152  		&Constraint{"left0", "right0", "="},
  1153  		&Constraint{"left2", "right2", "="},
  1154  		&Constraint{"left1", "right1", "="},
  1155  	}))
  1156  
  1157  	as := &Affinities{
  1158  		&Affinity{"left0", "right0", "=", 0},
  1159  		&Affinity{"left1", "right1", "=", 0},
  1160  		&Affinity{"left2", "right2", "=", 0},
  1161  	}
  1162  	require.True(t, as.Equal(&Affinities{
  1163  		&Affinity{"left0", "right0", "=", 0},
  1164  		&Affinity{"left2", "right2", "=", 0},
  1165  		&Affinity{"left1", "right1", "=", 0},
  1166  	}))
  1167  }
  1168  
  1169  func TestTask_UsesConnect(t *testing.T) {
  1170  	ci.Parallel(t)
  1171  
  1172  	t.Run("normal task", func(t *testing.T) {
  1173  		task := testJob().TaskGroups[0].Tasks[0]
  1174  		usesConnect := task.UsesConnect()
  1175  		require.False(t, usesConnect)
  1176  	})
  1177  
  1178  	t.Run("sidecar proxy", func(t *testing.T) {
  1179  		task := &Task{
  1180  			Name: "connect-proxy-task1",
  1181  			Kind: NewTaskKind(ConnectProxyPrefix, "task1"),
  1182  		}
  1183  		usesConnect := task.UsesConnect()
  1184  		require.True(t, usesConnect)
  1185  	})
  1186  
  1187  	t.Run("native task", func(t *testing.T) {
  1188  		task := &Task{
  1189  			Name: "task1",
  1190  			Kind: NewTaskKind(ConnectNativePrefix, "task1"),
  1191  		}
  1192  		usesConnect := task.UsesConnect()
  1193  		require.True(t, usesConnect)
  1194  	})
  1195  
  1196  	t.Run("ingress gateway", func(t *testing.T) {
  1197  		task := &Task{
  1198  			Name: "task1",
  1199  			Kind: NewTaskKind(ConnectIngressPrefix, "task1"),
  1200  		}
  1201  		usesConnect := task.UsesConnect()
  1202  		require.True(t, usesConnect)
  1203  	})
  1204  
  1205  	t.Run("terminating gateway", func(t *testing.T) {
  1206  		task := &Task{
  1207  			Name: "task1",
  1208  			Kind: NewTaskKind(ConnectTerminatingPrefix, "task1"),
  1209  		}
  1210  		usesConnect := task.UsesConnect()
  1211  		require.True(t, usesConnect)
  1212  	})
  1213  }
  1214  
  1215  func TestTaskGroup_UsesConnect(t *testing.T) {
  1216  	ci.Parallel(t)
  1217  
  1218  	try := func(t *testing.T, tg *TaskGroup, exp bool) {
  1219  		result := tg.UsesConnect()
  1220  		require.Equal(t, exp, result)
  1221  	}
  1222  
  1223  	t.Run("tg uses native", func(t *testing.T) {
  1224  		try(t, &TaskGroup{
  1225  			Services: []*Service{
  1226  				{Connect: nil},
  1227  				{Connect: &ConsulConnect{Native: true}},
  1228  			},
  1229  		}, true)
  1230  	})
  1231  
  1232  	t.Run("tg uses sidecar", func(t *testing.T) {
  1233  		try(t, &TaskGroup{
  1234  			Services: []*Service{{
  1235  				Connect: &ConsulConnect{
  1236  					SidecarService: &ConsulSidecarService{
  1237  						Port: "9090",
  1238  					},
  1239  				},
  1240  			}},
  1241  		}, true)
  1242  	})
  1243  
  1244  	t.Run("tg uses gateway", func(t *testing.T) {
  1245  		try(t, &TaskGroup{
  1246  			Services: []*Service{{
  1247  				Connect: &ConsulConnect{
  1248  					Gateway: consulIngressGateway1,
  1249  				},
  1250  			}},
  1251  		}, true)
  1252  	})
  1253  
  1254  	t.Run("tg does not use connect", func(t *testing.T) {
  1255  		try(t, &TaskGroup{
  1256  			Services: []*Service{
  1257  				{Connect: nil},
  1258  			},
  1259  		}, false)
  1260  	})
  1261  }
  1262  
  1263  func TestTaskGroup_Validate(t *testing.T) {
  1264  	ci.Parallel(t)
  1265  
  1266  	tests := []struct {
  1267  		name    string
  1268  		tg      *TaskGroup
  1269  		expErr  []string
  1270  		jobType string
  1271  	}{
  1272  		{
  1273  			name: "task group is missing basic specs",
  1274  			tg: &TaskGroup{
  1275  				Count: -1,
  1276  				RestartPolicy: &RestartPolicy{
  1277  					Interval: 5 * time.Minute,
  1278  					Delay:    10 * time.Second,
  1279  					Attempts: 10,
  1280  					Mode:     RestartPolicyModeDelay,
  1281  				},
  1282  				ReschedulePolicy: &ReschedulePolicy{
  1283  					Interval: 5 * time.Minute,
  1284  					Attempts: 5,
  1285  					Delay:    5 * time.Second,
  1286  				},
  1287  			},
  1288  			expErr: []string{
  1289  				"group name",
  1290  				"count can't be negative",
  1291  				"Missing tasks",
  1292  			},
  1293  			jobType: JobTypeService,
  1294  		},
  1295  		{
  1296  			name: "two tasks using same port",
  1297  			tg: &TaskGroup{
  1298  				Tasks: []*Task{
  1299  					{
  1300  						Name: "task-a",
  1301  						Resources: &Resources{
  1302  							Networks: []*NetworkResource{
  1303  								{
  1304  									ReservedPorts: []Port{{Label: "foo", Value: 123}},
  1305  								},
  1306  							},
  1307  						},
  1308  					},
  1309  					{
  1310  						Name: "task-b",
  1311  						Resources: &Resources{
  1312  							Networks: []*NetworkResource{
  1313  								{
  1314  									ReservedPorts: []Port{{Label: "foo", Value: 123}},
  1315  								},
  1316  							},
  1317  						},
  1318  					},
  1319  				},
  1320  			},
  1321  			expErr: []string{
  1322  				"Static port 123 already reserved by task-a:foo",
  1323  			},
  1324  			jobType: JobTypeService,
  1325  		},
  1326  		{
  1327  			name: "one task using same port twice",
  1328  			tg: &TaskGroup{
  1329  				Tasks: []*Task{
  1330  					{
  1331  						Name: "task-a",
  1332  						Resources: &Resources{
  1333  							Networks: []*NetworkResource{
  1334  								{
  1335  									ReservedPorts: []Port{
  1336  										{Label: "foo", Value: 123},
  1337  										{Label: "bar", Value: 123},
  1338  									},
  1339  								},
  1340  							},
  1341  						},
  1342  					},
  1343  				},
  1344  			},
  1345  			expErr: []string{
  1346  				"Static port 123 already reserved by task-a:foo",
  1347  			},
  1348  			jobType: JobTypeService,
  1349  		},
  1350  		{
  1351  			name: "multiple leaders defined and one empty task",
  1352  			tg: &TaskGroup{
  1353  				Name:  "web",
  1354  				Count: 1,
  1355  				Tasks: []*Task{
  1356  					{Name: "web", Leader: true},
  1357  					{Name: "web", Leader: true},
  1358  					{},
  1359  				},
  1360  				RestartPolicy: &RestartPolicy{
  1361  					Interval: 5 * time.Minute,
  1362  					Delay:    10 * time.Second,
  1363  					Attempts: 10,
  1364  					Mode:     RestartPolicyModeDelay,
  1365  				},
  1366  				ReschedulePolicy: &ReschedulePolicy{
  1367  					Interval:      5 * time.Minute,
  1368  					Attempts:      10,
  1369  					Delay:         5 * time.Second,
  1370  					DelayFunction: "constant",
  1371  				},
  1372  			},
  1373  			expErr: []string{
  1374  				"should have an ephemeral disk object",
  1375  				"2 redefines 'web' from task 1",
  1376  				"Task 3 missing name",
  1377  				"Only one task may be marked as leader",
  1378  				"Task web validation failed",
  1379  			},
  1380  			jobType: JobTypeService,
  1381  		},
  1382  		{
  1383  			name: "invalid update block for batch job",
  1384  			tg: &TaskGroup{
  1385  				Name:  "web",
  1386  				Count: 1,
  1387  				Tasks: []*Task{
  1388  					{Name: "web", Leader: true},
  1389  				},
  1390  				Update: DefaultUpdateStrategy.Copy(),
  1391  			},
  1392  			expErr: []string{
  1393  				"does not allow update block",
  1394  			},
  1395  			jobType: JobTypeBatch,
  1396  		},
  1397  		{
  1398  			name: "invalid reschedule policy for system job",
  1399  			tg: &TaskGroup{
  1400  				Count: -1,
  1401  				RestartPolicy: &RestartPolicy{
  1402  					Interval: 5 * time.Minute,
  1403  					Delay:    10 * time.Second,
  1404  					Attempts: 10,
  1405  					Mode:     RestartPolicyModeDelay,
  1406  				},
  1407  				ReschedulePolicy: &ReschedulePolicy{
  1408  					Interval: 5 * time.Minute,
  1409  					Attempts: 5,
  1410  					Delay:    5 * time.Second,
  1411  				},
  1412  			},
  1413  			expErr: []string{
  1414  				"System jobs should not have a reschedule policy",
  1415  			},
  1416  			jobType: JobTypeSystem,
  1417  		},
  1418  		{
  1419  			name: "duplicated por label",
  1420  			tg: &TaskGroup{
  1421  				Networks: []*NetworkResource{
  1422  					{
  1423  						DynamicPorts: []Port{{"http", 0, 80, ""}},
  1424  					},
  1425  				},
  1426  				Tasks: []*Task{
  1427  					{
  1428  						Resources: &Resources{
  1429  							Networks: []*NetworkResource{
  1430  								{
  1431  									DynamicPorts: []Port{{"http", 0, 80, ""}},
  1432  								},
  1433  							},
  1434  						},
  1435  					},
  1436  				},
  1437  			},
  1438  			expErr: []string{
  1439  				"Port label http already in use",
  1440  			},
  1441  			jobType: JobTypeService,
  1442  		},
  1443  		{
  1444  			name: "invalid volume type",
  1445  			tg: &TaskGroup{
  1446  				Volumes: map[string]*VolumeRequest{
  1447  					"foo": {
  1448  						Type:   "nothost",
  1449  						Source: "foo",
  1450  					},
  1451  				},
  1452  				Tasks: []*Task{
  1453  					{
  1454  						Name:      "task-a",
  1455  						Resources: &Resources{},
  1456  					},
  1457  				},
  1458  			},
  1459  			expErr: []string{
  1460  				"volume has unrecognized type nothost",
  1461  			},
  1462  			jobType: JobTypeService,
  1463  		},
  1464  		{
  1465  			name: "invalid volume with wrong CSI and canary specs",
  1466  			tg: &TaskGroup{
  1467  				Name: "group-a",
  1468  				Update: &UpdateStrategy{
  1469  					Canary: 1,
  1470  				},
  1471  				Volumes: map[string]*VolumeRequest{
  1472  					"foo": {
  1473  						Type:     "csi",
  1474  						PerAlloc: true,
  1475  					},
  1476  				},
  1477  				Tasks: []*Task{
  1478  					{
  1479  						Name:      "task-a",
  1480  						Resources: &Resources{},
  1481  					},
  1482  				},
  1483  			},
  1484  			expErr: []string{
  1485  				`volume has an empty source`,
  1486  				`volume cannot be per_alloc when canaries are in use`,
  1487  				`CSI volumes must have an attachment mode`,
  1488  				`CSI volumes must have an access mode`,
  1489  			},
  1490  			jobType: JobTypeService,
  1491  		},
  1492  		{
  1493  			name: "invalid task referencing non existent task",
  1494  			tg: &TaskGroup{
  1495  				Name: "group-a",
  1496  				Services: []*Service{
  1497  					{
  1498  						Name:     "service-a",
  1499  						Provider: "consul",
  1500  						Checks: []*ServiceCheck{
  1501  							{
  1502  								Name:      "check-a",
  1503  								Type:      "tcp",
  1504  								TaskName:  "task-b",
  1505  								PortLabel: "http",
  1506  								Interval:  time.Duration(1 * time.Second),
  1507  								Timeout:   time.Duration(1 * time.Second),
  1508  							},
  1509  						},
  1510  					},
  1511  				},
  1512  				Tasks: []*Task{
  1513  					{Name: "task-a"},
  1514  				},
  1515  			},
  1516  			expErr: []string{
  1517  				"Check check-a invalid: refers to non-existent task task-b",
  1518  			},
  1519  			jobType: JobTypeService,
  1520  		},
  1521  		{
  1522  			name: "invalid volume for tasks",
  1523  			tg: &TaskGroup{
  1524  				Volumes: map[string]*VolumeRequest{
  1525  					"foo": {
  1526  						Type: "host",
  1527  					},
  1528  				},
  1529  				Tasks: []*Task{
  1530  					{
  1531  						Name:      "task-a",
  1532  						Resources: &Resources{},
  1533  						VolumeMounts: []*VolumeMount{
  1534  							{
  1535  								Volume: "",
  1536  							},
  1537  						},
  1538  					},
  1539  					{
  1540  						Name:      "task-b",
  1541  						Resources: &Resources{},
  1542  						VolumeMounts: []*VolumeMount{
  1543  							{
  1544  								Volume: "foob",
  1545  							},
  1546  						},
  1547  					},
  1548  				},
  1549  			},
  1550  			expErr: []string{
  1551  				`Volume Mount (0) references an empty volume`,
  1552  				`Volume Mount (0) references undefined volume foob`,
  1553  			},
  1554  			jobType: JobTypeService,
  1555  		},
  1556  		{
  1557  			name: "services inside group using different providers",
  1558  			tg: &TaskGroup{
  1559  				Name: "group-a",
  1560  				Services: []*Service{
  1561  					{
  1562  						Name:     "service-a",
  1563  						Provider: "nomad",
  1564  					},
  1565  					{
  1566  						Name:     "service-b",
  1567  						Provider: "consul",
  1568  					},
  1569  				},
  1570  				Tasks: []*Task{{Name: "task-a"}},
  1571  			},
  1572  			expErr: []string{
  1573  				"Multiple service providers used: task group services must use the same provider",
  1574  			},
  1575  			jobType: JobTypeService,
  1576  		},
  1577  		{
  1578  			name: "conflicting progress deadline and kill timeout",
  1579  			tg: &TaskGroup{
  1580  				Name:  "web",
  1581  				Count: 1,
  1582  				Tasks: []*Task{
  1583  					{
  1584  						Name:        "web",
  1585  						Leader:      true,
  1586  						KillTimeout: DefaultUpdateStrategy.ProgressDeadline + 25*time.Minute,
  1587  					},
  1588  				},
  1589  				Update: DefaultUpdateStrategy.Copy(),
  1590  			},
  1591  			expErr: []string{
  1592  				"KillTimout (35m0s) longer than the group's ProgressDeadline (10m0s)",
  1593  			},
  1594  			jobType: JobTypeService,
  1595  		},
  1596  		{
  1597  			name: "progress_deadline 0 does not conflict with kill_timeout",
  1598  			tg: &TaskGroup{
  1599  				Name:  "web",
  1600  				Count: 1,
  1601  				Tasks: []*Task{
  1602  					{
  1603  						Name:        "web",
  1604  						Driver:      "mock_driver",
  1605  						Leader:      true,
  1606  						KillTimeout: DefaultUpdateStrategy.ProgressDeadline + 25*time.Minute,
  1607  						Resources:   DefaultResources(),
  1608  						LogConfig:   DefaultLogConfig(),
  1609  					},
  1610  				},
  1611  				Update: &UpdateStrategy{
  1612  					Stagger:          30 * time.Second,
  1613  					MaxParallel:      1,
  1614  					HealthCheck:      UpdateStrategyHealthCheck_Checks,
  1615  					MinHealthyTime:   10 * time.Second,
  1616  					HealthyDeadline:  5 * time.Minute,
  1617  					ProgressDeadline: 0,
  1618  					AutoRevert:       false,
  1619  					AutoPromote:      false,
  1620  					Canary:           0,
  1621  				},
  1622  				RestartPolicy:    NewRestartPolicy(JobTypeService),
  1623  				ReschedulePolicy: NewReschedulePolicy(JobTypeService),
  1624  				Migrate:          DefaultMigrateStrategy(),
  1625  				EphemeralDisk:    DefaultEphemeralDisk(),
  1626  			},
  1627  			jobType: JobTypeService,
  1628  		},
  1629  		{
  1630  			name: "service and task using different provider",
  1631  			tg: &TaskGroup{
  1632  				Name: "group-a",
  1633  				Services: []*Service{
  1634  					{
  1635  						Name:     "service-a",
  1636  						Provider: "nomad",
  1637  					},
  1638  				},
  1639  				Tasks: []*Task{
  1640  					{
  1641  						Name: "task-a",
  1642  						Services: []*Service{
  1643  							{
  1644  								Name:     "service-b",
  1645  								Provider: "consul",
  1646  							},
  1647  						},
  1648  					},
  1649  				},
  1650  			},
  1651  			expErr: []string{
  1652  				"Multiple service providers used: task group services must use the same provider",
  1653  			},
  1654  			jobType: JobTypeService,
  1655  		},
  1656  	}
  1657  
  1658  	for _, tc := range tests {
  1659  		t.Run(tc.name, func(t *testing.T) {
  1660  			j := testJob()
  1661  			j.Type = tc.jobType
  1662  
  1663  			err := tc.tg.Validate(j)
  1664  			if len(tc.expErr) > 0 {
  1665  				requireErrors(t, err, tc.expErr...)
  1666  			} else {
  1667  				must.NoError(t, err)
  1668  			}
  1669  		})
  1670  	}
  1671  }
  1672  
  1673  func TestTaskGroupNetwork_Validate(t *testing.T) {
  1674  	ci.Parallel(t)
  1675  
  1676  	cases := []struct {
  1677  		TG          *TaskGroup
  1678  		ErrContains string
  1679  	}{
  1680  		{
  1681  			TG: &TaskGroup{
  1682  				Name: "group-static-value-ok",
  1683  				Networks: Networks{
  1684  					&NetworkResource{
  1685  						ReservedPorts: []Port{
  1686  							{
  1687  								Label: "ok",
  1688  								Value: 65535,
  1689  							},
  1690  						},
  1691  					},
  1692  				},
  1693  			},
  1694  		},
  1695  		{
  1696  			TG: &TaskGroup{
  1697  				Name: "group-dynamic-value-ok",
  1698  				Networks: Networks{
  1699  					&NetworkResource{
  1700  						DynamicPorts: []Port{
  1701  							{
  1702  								Label: "ok",
  1703  								Value: 65535,
  1704  							},
  1705  						},
  1706  					},
  1707  				},
  1708  			},
  1709  		},
  1710  		{
  1711  			TG: &TaskGroup{
  1712  				Name: "group-static-to-ok",
  1713  				Networks: Networks{
  1714  					&NetworkResource{
  1715  						ReservedPorts: []Port{
  1716  							{
  1717  								Label: "ok",
  1718  								To:    65535,
  1719  							},
  1720  						},
  1721  					},
  1722  				},
  1723  			},
  1724  		},
  1725  		{
  1726  			TG: &TaskGroup{
  1727  				Name: "group-dynamic-to-ok",
  1728  				Networks: Networks{
  1729  					&NetworkResource{
  1730  						DynamicPorts: []Port{
  1731  							{
  1732  								Label: "ok",
  1733  								To:    65535,
  1734  							},
  1735  						},
  1736  					},
  1737  				},
  1738  			},
  1739  		},
  1740  		{
  1741  			TG: &TaskGroup{
  1742  				Name: "group-static-value-too-high",
  1743  				Networks: Networks{
  1744  					&NetworkResource{
  1745  						ReservedPorts: []Port{
  1746  							{
  1747  								Label: "too-high",
  1748  								Value: 65536,
  1749  							},
  1750  						},
  1751  					},
  1752  				},
  1753  			},
  1754  			ErrContains: "greater than",
  1755  		},
  1756  		{
  1757  			TG: &TaskGroup{
  1758  				Name: "group-dynamic-value-too-high",
  1759  				Networks: Networks{
  1760  					&NetworkResource{
  1761  						DynamicPorts: []Port{
  1762  							{
  1763  								Label: "too-high",
  1764  								Value: 65536,
  1765  							},
  1766  						},
  1767  					},
  1768  				},
  1769  			},
  1770  			ErrContains: "greater than",
  1771  		},
  1772  		{
  1773  			TG: &TaskGroup{
  1774  				Name: "group-static-to-too-high",
  1775  				Networks: Networks{
  1776  					&NetworkResource{
  1777  						ReservedPorts: []Port{
  1778  							{
  1779  								Label: "too-high",
  1780  								To:    65536,
  1781  							},
  1782  						},
  1783  					},
  1784  				},
  1785  			},
  1786  			ErrContains: "greater than",
  1787  		},
  1788  		{
  1789  			TG: &TaskGroup{
  1790  				Name: "group-dynamic-to-too-high",
  1791  				Networks: Networks{
  1792  					&NetworkResource{
  1793  						DynamicPorts: []Port{
  1794  							{
  1795  								Label: "too-high",
  1796  								To:    65536,
  1797  							},
  1798  						},
  1799  					},
  1800  				},
  1801  			},
  1802  			ErrContains: "greater than",
  1803  		},
  1804  		{
  1805  			TG: &TaskGroup{
  1806  				Name: "group-same-static-port-different-host_network",
  1807  				Networks: Networks{
  1808  					&NetworkResource{
  1809  						ReservedPorts: []Port{
  1810  							{
  1811  								Label:       "net1_http",
  1812  								Value:       80,
  1813  								HostNetwork: "net1",
  1814  							},
  1815  							{
  1816  								Label:       "net2_http",
  1817  								Value:       80,
  1818  								HostNetwork: "net2",
  1819  							},
  1820  						},
  1821  					},
  1822  				},
  1823  			},
  1824  		},
  1825  		{
  1826  			TG: &TaskGroup{
  1827  				Name: "mixing-group-task-ports",
  1828  				Networks: Networks{
  1829  					&NetworkResource{
  1830  						ReservedPorts: []Port{
  1831  							{
  1832  								Label: "group_http",
  1833  								Value: 80,
  1834  							},
  1835  						},
  1836  					},
  1837  				},
  1838  				Tasks: []*Task{
  1839  					{
  1840  						Name: "task1",
  1841  						Resources: &Resources{
  1842  							Networks: Networks{
  1843  								&NetworkResource{
  1844  									ReservedPorts: []Port{
  1845  										{
  1846  											Label: "task_http",
  1847  											Value: 80,
  1848  										},
  1849  									},
  1850  								},
  1851  							},
  1852  						},
  1853  					},
  1854  				},
  1855  			},
  1856  			ErrContains: "already reserved by",
  1857  		},
  1858  		{
  1859  			TG: &TaskGroup{
  1860  				Name: "mixing-group-task-ports-with-host_network",
  1861  				Networks: Networks{
  1862  					&NetworkResource{
  1863  						ReservedPorts: []Port{
  1864  							{
  1865  								Label:       "group_http",
  1866  								Value:       80,
  1867  								HostNetwork: "net1",
  1868  							},
  1869  						},
  1870  					},
  1871  				},
  1872  				Tasks: []*Task{
  1873  					{
  1874  						Name: "task1",
  1875  						Resources: &Resources{
  1876  							Networks: Networks{
  1877  								&NetworkResource{
  1878  									ReservedPorts: []Port{
  1879  										{
  1880  											Label: "task_http",
  1881  											Value: 80,
  1882  										},
  1883  									},
  1884  								},
  1885  							},
  1886  						},
  1887  					},
  1888  				},
  1889  			},
  1890  		},
  1891  		{
  1892  			TG: &TaskGroup{
  1893  				Tasks: []*Task{
  1894  					{Driver: "docker"},
  1895  				},
  1896  				Networks: []*NetworkResource{
  1897  					{
  1898  						Mode:     "bridge",
  1899  						Hostname: "foobar",
  1900  					},
  1901  				},
  1902  			},
  1903  		},
  1904  		{
  1905  			TG: &TaskGroup{
  1906  				Tasks: []*Task{
  1907  					{Name: "hostname-invalid-dns-name"},
  1908  				},
  1909  				Networks: []*NetworkResource{
  1910  					{
  1911  						Mode:     "bridge",
  1912  						Hostname: "............",
  1913  					},
  1914  				},
  1915  			},
  1916  			ErrContains: "Hostname is not a valid DNS name",
  1917  		},
  1918  	}
  1919  
  1920  	for i := range cases {
  1921  		tc := cases[i]
  1922  		t.Run(tc.TG.Name, func(t *testing.T) {
  1923  			err := tc.TG.validateNetworks()
  1924  			t.Logf("%s -> %v", tc.TG.Name, err)
  1925  			if tc.ErrContains == "" {
  1926  				require.NoError(t, err)
  1927  				return
  1928  			}
  1929  
  1930  			require.Error(t, err)
  1931  			require.Contains(t, err.Error(), tc.ErrContains)
  1932  		})
  1933  	}
  1934  }
  1935  
  1936  func TestTask_Validate(t *testing.T) {
  1937  	ci.Parallel(t)
  1938  
  1939  	task := &Task{}
  1940  	tg := &TaskGroup{
  1941  		EphemeralDisk: DefaultEphemeralDisk(),
  1942  	}
  1943  	err := task.Validate(JobTypeBatch, tg)
  1944  	requireErrors(t, err,
  1945  		"task name",
  1946  		"task driver",
  1947  		"task resources",
  1948  	)
  1949  
  1950  	task = &Task{Name: "web/foo"}
  1951  	err = task.Validate(JobTypeBatch, tg)
  1952  	require.Error(t, err, "slashes")
  1953  
  1954  	task = &Task{
  1955  		Name:   "web",
  1956  		Driver: "docker",
  1957  		Resources: &Resources{
  1958  			CPU:      100,
  1959  			MemoryMB: 100,
  1960  		},
  1961  		LogConfig: DefaultLogConfig(),
  1962  	}
  1963  	tg.EphemeralDisk.SizeMB = 200
  1964  	err = task.Validate(JobTypeBatch, tg)
  1965  	if err != nil {
  1966  		t.Fatalf("err: %s", err)
  1967  	}
  1968  
  1969  	task.Constraints = append(task.Constraints,
  1970  		&Constraint{
  1971  			Operand: ConstraintDistinctHosts,
  1972  		},
  1973  		&Constraint{
  1974  			Operand: ConstraintDistinctProperty,
  1975  			LTarget: "${meta.rack}",
  1976  		})
  1977  
  1978  	err = task.Validate(JobTypeBatch, tg)
  1979  	requireErrors(t, err,
  1980  		"task level: distinct_hosts",
  1981  		"task level: distinct_property",
  1982  	)
  1983  }
  1984  
  1985  func TestTask_Validate_Resources(t *testing.T) {
  1986  	ci.Parallel(t)
  1987  
  1988  	cases := []struct {
  1989  		name string
  1990  		res  *Resources
  1991  		err  string
  1992  	}{
  1993  		{
  1994  			name: "Minimum",
  1995  			res:  MinResources(),
  1996  		},
  1997  		{
  1998  			name: "Default",
  1999  			res:  DefaultResources(),
  2000  		},
  2001  		{
  2002  			name: "Full",
  2003  			res: &Resources{
  2004  				CPU:         1000,
  2005  				MemoryMB:    1000,
  2006  				MemoryMaxMB: 2000,
  2007  				IOPS:        1000,
  2008  				Networks: []*NetworkResource{
  2009  					{
  2010  						Mode:   "host",
  2011  						Device: "localhost",
  2012  						CIDR:   "127.0.0.0/8",
  2013  						IP:     "127.0.0.1",
  2014  						MBits:  1000,
  2015  						DNS: &DNSConfig{
  2016  							Servers:  []string{"localhost"},
  2017  							Searches: []string{"localdomain"},
  2018  							Options:  []string{"ndots:5"},
  2019  						},
  2020  						ReservedPorts: []Port{
  2021  							{
  2022  								Label:       "reserved",
  2023  								Value:       1234,
  2024  								To:          1234,
  2025  								HostNetwork: "loopback",
  2026  							},
  2027  						},
  2028  						DynamicPorts: []Port{
  2029  							{
  2030  								Label:       "dynamic",
  2031  								Value:       5678,
  2032  								To:          5678,
  2033  								HostNetwork: "loopback",
  2034  							},
  2035  						},
  2036  					},
  2037  				},
  2038  			},
  2039  		},
  2040  		{
  2041  			name: "too little cpu",
  2042  			res: &Resources{
  2043  				CPU:      0,
  2044  				MemoryMB: 200,
  2045  			},
  2046  			err: "minimum CPU value is 1",
  2047  		},
  2048  		{
  2049  			name: "too little memory",
  2050  			res: &Resources{
  2051  				CPU:      100,
  2052  				MemoryMB: 1,
  2053  			},
  2054  			err: "minimum MemoryMB value is 10; got 1",
  2055  		},
  2056  		{
  2057  			name: "too little memory max",
  2058  			res: &Resources{
  2059  				CPU:         100,
  2060  				MemoryMB:    200,
  2061  				MemoryMaxMB: 10,
  2062  			},
  2063  			err: "MemoryMaxMB value (10) should be larger than MemoryMB value (200",
  2064  		},
  2065  	}
  2066  
  2067  	for i := range cases {
  2068  		tc := cases[i]
  2069  		t.Run(tc.name, func(t *testing.T) {
  2070  			err := tc.res.Validate()
  2071  			if tc.err == "" {
  2072  				require.NoError(t, err)
  2073  			} else {
  2074  				require.Error(t, err)
  2075  				require.Contains(t, err.Error(), tc.err)
  2076  			}
  2077  		})
  2078  	}
  2079  }
  2080  
  2081  func TestNetworkResource_Copy(t *testing.T) {
  2082  	ci.Parallel(t)
  2083  
  2084  	testCases := []struct {
  2085  		inputNetworkResource *NetworkResource
  2086  		name                 string
  2087  	}{
  2088  		{
  2089  			inputNetworkResource: nil,
  2090  			name:                 "nil input check",
  2091  		},
  2092  		{
  2093  			inputNetworkResource: &NetworkResource{
  2094  				Mode:     "bridge",
  2095  				Device:   "eth0",
  2096  				CIDR:     "10.0.0.1/8",
  2097  				IP:       "10.1.1.13",
  2098  				Hostname: "foobar",
  2099  				MBits:    1000,
  2100  				DNS: &DNSConfig{
  2101  					Servers:  []string{"8.8.8.8", "8.8.4.4"},
  2102  					Searches: []string{"example.com"},
  2103  					Options:  []string{"ndot:2"},
  2104  				},
  2105  				ReservedPorts: []Port{
  2106  					{
  2107  						Label:       "foo",
  2108  						Value:       1313,
  2109  						To:          1313,
  2110  						HostNetwork: "private",
  2111  					},
  2112  				},
  2113  				DynamicPorts: []Port{
  2114  					{
  2115  						Label:       "bar",
  2116  						To:          1414,
  2117  						HostNetwork: "public",
  2118  					},
  2119  				},
  2120  			},
  2121  			name: "fully populated input check",
  2122  		},
  2123  	}
  2124  
  2125  	for _, tc := range testCases {
  2126  		t.Run(tc.name, func(t *testing.T) {
  2127  			output := tc.inputNetworkResource.Copy()
  2128  			assert.Equal(t, tc.inputNetworkResource, output, tc.name)
  2129  
  2130  			if output == nil {
  2131  				return
  2132  			}
  2133  
  2134  			// Assert changes to the copy aren't propagated to the
  2135  			// original
  2136  			output.DNS.Servers[1] = "foo"
  2137  			assert.NotEqual(t, tc.inputNetworkResource, output, tc.name)
  2138  		})
  2139  	}
  2140  }
  2141  
  2142  func TestTask_Validate_Services(t *testing.T) {
  2143  	ci.Parallel(t)
  2144  
  2145  	s1 := &Service{
  2146  		Name:      "service-name",
  2147  		Provider:  "consul",
  2148  		PortLabel: "bar",
  2149  		Checks: []*ServiceCheck{
  2150  			{
  2151  				Name:     "check-name",
  2152  				Type:     ServiceCheckTCP,
  2153  				Interval: 0 * time.Second,
  2154  			},
  2155  			{
  2156  				Name:    "check-name",
  2157  				Type:    ServiceCheckTCP,
  2158  				Timeout: 2 * time.Second,
  2159  			},
  2160  			{
  2161  				Name:     "check-name",
  2162  				Type:     ServiceCheckTCP,
  2163  				Interval: 1 * time.Second,
  2164  			},
  2165  		},
  2166  	}
  2167  
  2168  	s2 := &Service{
  2169  		Name:      "service-name",
  2170  		Provider:  "consul",
  2171  		PortLabel: "bar",
  2172  	}
  2173  
  2174  	s3 := &Service{
  2175  		Name:      "service-A",
  2176  		Provider:  "consul",
  2177  		PortLabel: "a",
  2178  	}
  2179  	s4 := &Service{
  2180  		Name:      "service-A",
  2181  		Provider:  "consul",
  2182  		PortLabel: "b",
  2183  	}
  2184  
  2185  	ephemeralDisk := DefaultEphemeralDisk()
  2186  	ephemeralDisk.SizeMB = 200
  2187  	task := &Task{
  2188  		Name:   "web",
  2189  		Driver: "docker",
  2190  		Resources: &Resources{
  2191  			CPU:      100,
  2192  			MemoryMB: 100,
  2193  		},
  2194  		Services: []*Service{s1, s2},
  2195  	}
  2196  
  2197  	task1 := &Task{
  2198  		Name:      "web",
  2199  		Driver:    "docker",
  2200  		Resources: DefaultResources(),
  2201  		Services:  []*Service{s3, s4},
  2202  		LogConfig: DefaultLogConfig(),
  2203  	}
  2204  	tgNetworks := []*NetworkResource{
  2205  		{
  2206  			MBits: 10,
  2207  			DynamicPorts: []Port{
  2208  				{
  2209  					Label: "a",
  2210  					Value: 1000,
  2211  				},
  2212  				{
  2213  					Label: "b",
  2214  					Value: 2000,
  2215  				},
  2216  			},
  2217  		},
  2218  	}
  2219  	tg := &TaskGroup{
  2220  		Networks:      tgNetworks,
  2221  		EphemeralDisk: ephemeralDisk,
  2222  	}
  2223  
  2224  	err := task.Validate(JobTypeService, tg)
  2225  	if err == nil {
  2226  		t.Fatal("expected an error")
  2227  	}
  2228  
  2229  	if !strings.Contains(err.Error(), "service \"service-name\" is duplicate") {
  2230  		t.Fatalf("err: %v", err)
  2231  	}
  2232  
  2233  	if !strings.Contains(err.Error(), "check \"check-name\" is duplicate") {
  2234  		t.Fatalf("err: %v", err)
  2235  	}
  2236  
  2237  	if !strings.Contains(err.Error(), "missing required value interval") {
  2238  		t.Fatalf("err: %v", err)
  2239  	}
  2240  
  2241  	if !strings.Contains(err.Error(), "cannot be less than") {
  2242  		t.Fatalf("err: %v", err)
  2243  	}
  2244  
  2245  	if err = task1.Validate(JobTypeService, tg); err != nil {
  2246  		t.Fatalf("err : %v", err)
  2247  	}
  2248  }
  2249  
  2250  func TestTask_Validate_Service_AddressMode_Ok(t *testing.T) {
  2251  	ci.Parallel(t)
  2252  
  2253  	getTask := func(s *Service) *Task {
  2254  		task := &Task{
  2255  			Name:      "web",
  2256  			Driver:    "docker",
  2257  			Resources: DefaultResources(),
  2258  			Services:  []*Service{s},
  2259  			LogConfig: DefaultLogConfig(),
  2260  		}
  2261  
  2262  		return task
  2263  	}
  2264  
  2265  	cases := []*Service{
  2266  		{
  2267  			// https://github.com/hernad/nomad/issues/3681#issuecomment-357274177
  2268  			Name:        "DriverModeWithLabel",
  2269  			Provider:    "consul",
  2270  			PortLabel:   "http",
  2271  			AddressMode: AddressModeDriver,
  2272  		},
  2273  		{
  2274  			Name:        "DriverModeWithPort",
  2275  			Provider:    "consul",
  2276  			PortLabel:   "80",
  2277  			AddressMode: AddressModeDriver,
  2278  		},
  2279  		{
  2280  			Name:        "HostModeWithLabel",
  2281  			Provider:    "consul",
  2282  			PortLabel:   "http",
  2283  			AddressMode: AddressModeHost,
  2284  		},
  2285  		{
  2286  			Name:        "HostModeWithoutLabel",
  2287  			Provider:    "consul",
  2288  			AddressMode: AddressModeHost,
  2289  		},
  2290  		{
  2291  			Name:        "DriverModeWithoutLabel",
  2292  			Provider:    "consul",
  2293  			AddressMode: AddressModeDriver,
  2294  		},
  2295  	}
  2296  
  2297  	for _, service := range cases {
  2298  		task := getTask(service)
  2299  		tg := &TaskGroup{
  2300  			Networks: []*NetworkResource{
  2301  				{
  2302  					DynamicPorts: []Port{
  2303  						{
  2304  							Label: "http",
  2305  							Value: 80,
  2306  						},
  2307  					},
  2308  				},
  2309  			},
  2310  			EphemeralDisk: DefaultEphemeralDisk(),
  2311  		}
  2312  
  2313  		t.Run(service.Name, func(t *testing.T) {
  2314  			if err := task.Validate(JobTypeService, tg); err != nil {
  2315  				t.Fatalf("unexpected err: %v", err)
  2316  			}
  2317  		})
  2318  	}
  2319  }
  2320  
  2321  func TestTask_Validate_Service_AddressMode_Bad(t *testing.T) {
  2322  	ci.Parallel(t)
  2323  
  2324  	getTask := func(s *Service) *Task {
  2325  		return &Task{
  2326  			Name:      "web",
  2327  			Driver:    "docker",
  2328  			Resources: DefaultResources(),
  2329  			Services:  []*Service{s},
  2330  			LogConfig: DefaultLogConfig(),
  2331  		}
  2332  	}
  2333  
  2334  	cases := []*Service{
  2335  		{
  2336  			// https://github.com/hernad/nomad/issues/3681#issuecomment-357274177
  2337  			Name:        "DriverModeWithLabel",
  2338  			PortLabel:   "asdf",
  2339  			AddressMode: AddressModeDriver,
  2340  		},
  2341  		{
  2342  			Name:        "HostModeWithLabel",
  2343  			PortLabel:   "asdf",
  2344  			AddressMode: AddressModeHost,
  2345  		},
  2346  		{
  2347  			Name:        "HostModeWithPort",
  2348  			PortLabel:   "80",
  2349  			AddressMode: AddressModeHost,
  2350  		},
  2351  	}
  2352  
  2353  	for _, service := range cases {
  2354  		task := getTask(service)
  2355  		tg := &TaskGroup{
  2356  			Networks: []*NetworkResource{
  2357  				{
  2358  					DynamicPorts: []Port{
  2359  						{
  2360  							Label: "http",
  2361  							Value: 80,
  2362  						},
  2363  					},
  2364  				},
  2365  			},
  2366  			EphemeralDisk: DefaultEphemeralDisk(),
  2367  		}
  2368  
  2369  		t.Run(service.Name, func(t *testing.T) {
  2370  			err := task.Validate(JobTypeService, tg)
  2371  			if err == nil {
  2372  				t.Fatalf("expected an error")
  2373  			}
  2374  			//t.Logf("err: %v", err)
  2375  		})
  2376  	}
  2377  }
  2378  
  2379  func TestTask_Validate_Service_Check(t *testing.T) {
  2380  	ci.Parallel(t)
  2381  
  2382  	invalidCheck := ServiceCheck{
  2383  		Name:     "check-name",
  2384  		Command:  "/bin/true",
  2385  		Type:     ServiceCheckScript,
  2386  		Interval: 10 * time.Second,
  2387  	}
  2388  
  2389  	err := invalidCheck.validateConsul()
  2390  	if err == nil || !strings.Contains(err.Error(), "Timeout cannot be less") {
  2391  		t.Fatalf("expected a timeout validation error but received: %q", err)
  2392  	}
  2393  
  2394  	check1 := ServiceCheck{
  2395  		Name:     "check-name",
  2396  		Type:     ServiceCheckTCP,
  2397  		Interval: 10 * time.Second,
  2398  		Timeout:  2 * time.Second,
  2399  	}
  2400  
  2401  	if err := check1.validateConsul(); err != nil {
  2402  		t.Fatalf("err: %v", err)
  2403  	}
  2404  
  2405  	check1.InitialStatus = "foo"
  2406  	err = check1.validateConsul()
  2407  	if err == nil {
  2408  		t.Fatal("Expected an error")
  2409  	}
  2410  
  2411  	if !strings.Contains(err.Error(), "invalid initial check state (foo)") {
  2412  		t.Fatalf("err: %v", err)
  2413  	}
  2414  
  2415  	check1.InitialStatus = api.HealthCritical
  2416  	err = check1.validateConsul()
  2417  	if err != nil {
  2418  		t.Fatalf("err: %v", err)
  2419  	}
  2420  
  2421  	check1.InitialStatus = api.HealthPassing
  2422  	err = check1.validateConsul()
  2423  	if err != nil {
  2424  		t.Fatalf("err: %v", err)
  2425  	}
  2426  
  2427  	check1.InitialStatus = ""
  2428  	err = check1.validateConsul()
  2429  	if err != nil {
  2430  		t.Fatalf("err: %v", err)
  2431  	}
  2432  
  2433  	check2 := ServiceCheck{
  2434  		Name:     "check-name-2",
  2435  		Type:     ServiceCheckHTTP,
  2436  		Interval: 10 * time.Second,
  2437  		Timeout:  2 * time.Second,
  2438  		Path:     "/foo/bar",
  2439  	}
  2440  
  2441  	err = check2.validateConsul()
  2442  	if err != nil {
  2443  		t.Fatalf("err: %v", err)
  2444  	}
  2445  
  2446  	check2.Path = ""
  2447  	err = check2.validateConsul()
  2448  	if err == nil {
  2449  		t.Fatal("Expected an error")
  2450  	}
  2451  	if !strings.Contains(err.Error(), "http type must have http path") {
  2452  		t.Fatalf("err: %v", err)
  2453  	}
  2454  
  2455  	check2.Path = "http://www.example.com"
  2456  	err = check2.validateConsul()
  2457  	if err == nil {
  2458  		t.Fatal("Expected an error")
  2459  	}
  2460  	if !strings.Contains(err.Error(), "relative http path") {
  2461  		t.Fatalf("err: %v", err)
  2462  	}
  2463  
  2464  	t.Run("check expose", func(t *testing.T) {
  2465  		t.Run("type http", func(t *testing.T) {
  2466  			require.NoError(t, (&ServiceCheck{
  2467  				Type:     ServiceCheckHTTP,
  2468  				Interval: 1 * time.Second,
  2469  				Timeout:  1 * time.Second,
  2470  				Path:     "/health",
  2471  				Expose:   true,
  2472  			}).validateConsul())
  2473  		})
  2474  		t.Run("type tcp", func(t *testing.T) {
  2475  			require.EqualError(t, (&ServiceCheck{
  2476  				Type:     ServiceCheckTCP,
  2477  				Interval: 1 * time.Second,
  2478  				Timeout:  1 * time.Second,
  2479  				Expose:   true,
  2480  			}).validateConsul(), "expose may only be set on HTTP or gRPC checks")
  2481  		})
  2482  	})
  2483  }
  2484  
  2485  // TestTask_Validate_Service_Check_AddressMode asserts that checks do not
  2486  // inherit address mode but do inherit ports.
  2487  func TestTask_Validate_Service_Check_AddressMode(t *testing.T) {
  2488  	ci.Parallel(t)
  2489  
  2490  	getTask := func(s *Service) (*Task, *TaskGroup) {
  2491  		return &Task{
  2492  				Services: []*Service{s},
  2493  			}, &TaskGroup{
  2494  				Networks: []*NetworkResource{
  2495  					{
  2496  						DynamicPorts: []Port{
  2497  							{
  2498  								Label: "http",
  2499  								Value: 9999,
  2500  							},
  2501  						},
  2502  					},
  2503  				},
  2504  			}
  2505  	}
  2506  
  2507  	cases := []struct {
  2508  		Service     *Service
  2509  		ErrContains string
  2510  	}{
  2511  		{
  2512  			Service: &Service{
  2513  				Name:        "invalid-driver",
  2514  				Provider:    "consul",
  2515  				PortLabel:   "80",
  2516  				AddressMode: "host",
  2517  			},
  2518  			ErrContains: `port label "80" referenced`,
  2519  		},
  2520  		{
  2521  			Service: &Service{
  2522  				Name:        "http-driver-fail-1",
  2523  				PortLabel:   "80",
  2524  				AddressMode: "driver",
  2525  				Checks: []*ServiceCheck{
  2526  					{
  2527  						Name:     "invalid-check-1",
  2528  						Type:     "tcp",
  2529  						Interval: time.Second,
  2530  						Timeout:  time.Second,
  2531  					},
  2532  				},
  2533  			},
  2534  			ErrContains: `check "invalid-check-1" cannot use a numeric port`,
  2535  		},
  2536  		{
  2537  			Service: &Service{
  2538  				Name:        "http-driver-fail-2",
  2539  				Provider:    "consul",
  2540  				PortLabel:   "80",
  2541  				AddressMode: "driver",
  2542  				Checks: []*ServiceCheck{
  2543  					{
  2544  						Name:      "invalid-check-2",
  2545  						Type:      "tcp",
  2546  						PortLabel: "80",
  2547  						Interval:  time.Second,
  2548  						Timeout:   time.Second,
  2549  					},
  2550  				},
  2551  			},
  2552  			ErrContains: `check "invalid-check-2" cannot use a numeric port`,
  2553  		},
  2554  		{
  2555  			Service: &Service{
  2556  				Name:        "http-driver-fail-3",
  2557  				Provider:    "consul",
  2558  				PortLabel:   "80",
  2559  				AddressMode: "driver",
  2560  				Checks: []*ServiceCheck{
  2561  					{
  2562  						Name:      "invalid-check-3",
  2563  						Type:      "tcp",
  2564  						PortLabel: "missing-port-label",
  2565  						Interval:  time.Second,
  2566  						Timeout:   time.Second,
  2567  					},
  2568  				},
  2569  			},
  2570  			ErrContains: `port label "missing-port-label" referenced`,
  2571  		},
  2572  		{
  2573  			Service: &Service{
  2574  				Name:        "http-driver-passes",
  2575  				Provider:    "consul",
  2576  				PortLabel:   "80",
  2577  				AddressMode: "driver",
  2578  				Checks: []*ServiceCheck{
  2579  					{
  2580  						Name:     "valid-script-check",
  2581  						Type:     "script",
  2582  						Command:  "ok",
  2583  						Interval: time.Second,
  2584  						Timeout:  time.Second,
  2585  					},
  2586  					{
  2587  						Name:      "valid-host-check",
  2588  						Type:      "tcp",
  2589  						PortLabel: "http",
  2590  						Interval:  time.Second,
  2591  						Timeout:   time.Second,
  2592  					},
  2593  					{
  2594  						Name:        "valid-driver-check",
  2595  						Type:        "tcp",
  2596  						AddressMode: "driver",
  2597  						Interval:    time.Second,
  2598  						Timeout:     time.Second,
  2599  					},
  2600  				},
  2601  			},
  2602  		},
  2603  		{
  2604  			Service: &Service{
  2605  				Name:     "empty-address-3673-passes-1",
  2606  				Provider: "consul",
  2607  				Checks: []*ServiceCheck{
  2608  					{
  2609  						Name:      "valid-port-label",
  2610  						Type:      "tcp",
  2611  						PortLabel: "http",
  2612  						Interval:  time.Second,
  2613  						Timeout:   time.Second,
  2614  					},
  2615  					{
  2616  						Name:     "empty-is-ok",
  2617  						Type:     "script",
  2618  						Command:  "ok",
  2619  						Interval: time.Second,
  2620  						Timeout:  time.Second,
  2621  					},
  2622  				},
  2623  			},
  2624  		},
  2625  		{
  2626  			Service: &Service{
  2627  				Name: "empty-address-3673-passes-2",
  2628  			},
  2629  		},
  2630  		{
  2631  			Service: &Service{
  2632  				Name:     "empty-address-3673-fails",
  2633  				Provider: "consul",
  2634  				Checks: []*ServiceCheck{
  2635  					{
  2636  						Name:     "empty-is-not-ok",
  2637  						Type:     "tcp",
  2638  						Interval: time.Second,
  2639  						Timeout:  time.Second,
  2640  					},
  2641  				},
  2642  			},
  2643  			ErrContains: `invalid: check requires a port but neither check nor service`,
  2644  		},
  2645  		{
  2646  			Service: &Service{
  2647  				Name:    "conect-block-on-task-level",
  2648  				Connect: &ConsulConnect{SidecarService: &ConsulSidecarService{}},
  2649  			},
  2650  			ErrContains: `cannot have "connect" block`,
  2651  		},
  2652  	}
  2653  
  2654  	for _, tc := range cases {
  2655  		tc := tc
  2656  		task, tg := getTask(tc.Service)
  2657  		t.Run(tc.Service.Name, func(t *testing.T) {
  2658  			err := validateServices(task, tg.Networks)
  2659  			if err == nil && tc.ErrContains == "" {
  2660  				// Ok!
  2661  				return
  2662  			}
  2663  			if err == nil {
  2664  				t.Fatalf("no error returned. expected: %s", tc.ErrContains)
  2665  			}
  2666  			if !strings.Contains(err.Error(), tc.ErrContains) {
  2667  				t.Fatalf("expected %q but found: %v", tc.ErrContains, err)
  2668  			}
  2669  		})
  2670  	}
  2671  }
  2672  
  2673  func TestTask_Validate_Service_Check_GRPC(t *testing.T) {
  2674  	ci.Parallel(t)
  2675  	// Bad (no port)
  2676  	invalidGRPC := &ServiceCheck{
  2677  		Type:     ServiceCheckGRPC,
  2678  		Interval: time.Second,
  2679  		Timeout:  time.Second,
  2680  	}
  2681  	service := &Service{
  2682  		Name:     "test",
  2683  		Provider: "consul",
  2684  		Checks:   []*ServiceCheck{invalidGRPC},
  2685  	}
  2686  
  2687  	assert.Error(t, service.Validate())
  2688  
  2689  	// Good
  2690  	service.Checks[0] = &ServiceCheck{
  2691  		Type:      ServiceCheckGRPC,
  2692  		Interval:  time.Second,
  2693  		Timeout:   time.Second,
  2694  		PortLabel: "some-port-label",
  2695  	}
  2696  
  2697  	assert.NoError(t, service.Validate())
  2698  }
  2699  
  2700  func TestTask_Validate_Service_Check_CheckRestart(t *testing.T) {
  2701  	ci.Parallel(t)
  2702  	invalidCheckRestart := &CheckRestart{
  2703  		Limit: -1,
  2704  		Grace: -1,
  2705  	}
  2706  
  2707  	err := invalidCheckRestart.Validate()
  2708  	assert.NotNil(t, err, "invalidateCheckRestart.Validate()")
  2709  	assert.Len(t, err.(*multierror.Error).Errors, 2)
  2710  
  2711  	validCheckRestart := &CheckRestart{}
  2712  	assert.Nil(t, validCheckRestart.Validate())
  2713  
  2714  	validCheckRestart.Limit = 1
  2715  	validCheckRestart.Grace = 1
  2716  	assert.Nil(t, validCheckRestart.Validate())
  2717  }
  2718  
  2719  func TestTask_Validate_ConnectProxyKind(t *testing.T) {
  2720  	ci.Parallel(t)
  2721  
  2722  	getTask := func(kind TaskKind, leader bool) *Task {
  2723  		task := &Task{
  2724  			Name:      "web",
  2725  			Driver:    "docker",
  2726  			Resources: DefaultResources(),
  2727  			LogConfig: DefaultLogConfig(),
  2728  			Kind:      kind,
  2729  			Leader:    leader,
  2730  		}
  2731  		task.Resources.Networks = []*NetworkResource{
  2732  			{
  2733  				MBits: 10,
  2734  				DynamicPorts: []Port{
  2735  					{
  2736  						Label: "http",
  2737  						Value: 80,
  2738  					},
  2739  				},
  2740  			},
  2741  		}
  2742  		return task
  2743  	}
  2744  
  2745  	cases := []struct {
  2746  		Desc        string
  2747  		Kind        TaskKind
  2748  		Leader      bool
  2749  		Service     *Service
  2750  		TgService   []*Service
  2751  		ErrContains string
  2752  	}{
  2753  		{
  2754  			Desc: "Not connect",
  2755  			Kind: "test",
  2756  		},
  2757  		{
  2758  			Desc: "Invalid because of service in task definition",
  2759  			Kind: "connect-proxy:redis",
  2760  			Service: &Service{
  2761  				Name: "redis",
  2762  			},
  2763  			ErrContains: "Connect proxy task must not have a service block",
  2764  		},
  2765  		{
  2766  			Desc:   "Leader should not be set",
  2767  			Kind:   "connect-proxy:redis",
  2768  			Leader: true,
  2769  			Service: &Service{
  2770  				Name: "redis",
  2771  			},
  2772  			ErrContains: "Connect proxy task must not have leader set",
  2773  		},
  2774  		{
  2775  			Desc: "Service name invalid",
  2776  			Kind: "connect-proxy:redis:test",
  2777  			Service: &Service{
  2778  				Name: "redis",
  2779  			},
  2780  			ErrContains: `No Connect services in task group with Connect proxy ("redis:test")`,
  2781  		},
  2782  		{
  2783  			Desc:        "Service name not found in group",
  2784  			Kind:        "connect-proxy:redis",
  2785  			ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
  2786  		},
  2787  		{
  2788  			Desc: "Connect block not configured in group",
  2789  			Kind: "connect-proxy:redis",
  2790  			TgService: []*Service{{
  2791  				Name: "redis",
  2792  			}},
  2793  			ErrContains: `No Connect services in task group with Connect proxy ("redis")`,
  2794  		},
  2795  		{
  2796  			Desc: "Valid connect proxy kind",
  2797  			Kind: "connect-proxy:redis",
  2798  			TgService: []*Service{{
  2799  				Name: "redis",
  2800  				Connect: &ConsulConnect{
  2801  					SidecarService: &ConsulSidecarService{
  2802  						Port: "db",
  2803  					},
  2804  				},
  2805  			}},
  2806  		},
  2807  	}
  2808  
  2809  	for _, tc := range cases {
  2810  		tc := tc
  2811  		task := getTask(tc.Kind, tc.Leader)
  2812  		if tc.Service != nil {
  2813  			task.Services = []*Service{tc.Service}
  2814  		}
  2815  		t.Run(tc.Desc, func(t *testing.T) {
  2816  			tg := &TaskGroup{
  2817  				EphemeralDisk: DefaultEphemeralDisk(),
  2818  				Services:      tc.TgService,
  2819  			}
  2820  			err := task.Validate("service", tg)
  2821  			if err == nil && tc.ErrContains == "" {
  2822  				// Ok!
  2823  				return
  2824  			}
  2825  			require.Errorf(t, err, "no error returned. expected: %s", tc.ErrContains)
  2826  			require.Containsf(t, err.Error(), tc.ErrContains, "expected %q but found: %v", tc.ErrContains, err)
  2827  		})
  2828  	}
  2829  
  2830  }
  2831  func TestTask_Validate_LogConfig(t *testing.T) {
  2832  	ci.Parallel(t)
  2833  
  2834  	task := &Task{
  2835  		LogConfig: DefaultLogConfig(),
  2836  	}
  2837  	tg := &TaskGroup{
  2838  		EphemeralDisk: &EphemeralDisk{
  2839  			SizeMB: 1,
  2840  		},
  2841  	}
  2842  
  2843  	err := task.Validate(JobTypeService, tg)
  2844  	require.Error(t, err, "log storage")
  2845  }
  2846  
  2847  func TestLogConfig_Equals(t *testing.T) {
  2848  	ci.Parallel(t)
  2849  
  2850  	t.Run("both nil", func(t *testing.T) {
  2851  		a := (*LogConfig)(nil)
  2852  		b := (*LogConfig)(nil)
  2853  		require.True(t, a.Equal(b))
  2854  	})
  2855  
  2856  	t.Run("one nil", func(t *testing.T) {
  2857  		a := new(LogConfig)
  2858  		b := (*LogConfig)(nil)
  2859  		require.False(t, a.Equal(b))
  2860  	})
  2861  
  2862  	t.Run("max files", func(t *testing.T) {
  2863  		a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2864  		b := &LogConfig{MaxFiles: 2, MaxFileSizeMB: 200}
  2865  		require.False(t, a.Equal(b))
  2866  	})
  2867  
  2868  	t.Run("max file size", func(t *testing.T) {
  2869  		a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 100}
  2870  		b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2871  		require.False(t, a.Equal(b))
  2872  	})
  2873  
  2874  	t.Run("same", func(t *testing.T) {
  2875  		a := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2876  		b := &LogConfig{MaxFiles: 1, MaxFileSizeMB: 200}
  2877  		require.True(t, a.Equal(b))
  2878  	})
  2879  }
  2880  
  2881  func TestTask_Validate_CSIPluginConfig(t *testing.T) {
  2882  	ci.Parallel(t)
  2883  
  2884  	table := []struct {
  2885  		name          string
  2886  		pc            *TaskCSIPluginConfig
  2887  		expectedErr   string
  2888  		unexpectedErr string
  2889  	}{
  2890  		{
  2891  			name:          "no errors when not specified",
  2892  			pc:            nil,
  2893  			unexpectedErr: "CSIPluginConfig",
  2894  		},
  2895  		{
  2896  			name:        "requires non-empty plugin id",
  2897  			pc:          &TaskCSIPluginConfig{},
  2898  			expectedErr: "CSIPluginConfig must have a non-empty PluginID",
  2899  		},
  2900  		{
  2901  			name: "requires valid plugin type",
  2902  			pc: &TaskCSIPluginConfig{
  2903  				ID:   "com.hashicorp.csi",
  2904  				Type: "nonsense",
  2905  			},
  2906  			expectedErr: "CSIPluginConfig PluginType must be one of 'node', 'controller', or 'monolith', got: \"nonsense\"",
  2907  		},
  2908  	}
  2909  
  2910  	for _, tt := range table {
  2911  		t.Run(tt.name, func(t *testing.T) {
  2912  			task := testJob().TaskGroups[0].Tasks[0]
  2913  			task.CSIPluginConfig = tt.pc
  2914  			tg := &TaskGroup{
  2915  				EphemeralDisk: &EphemeralDisk{
  2916  					SizeMB: 100,
  2917  				},
  2918  			}
  2919  
  2920  			err := task.Validate(JobTypeService, tg)
  2921  			if tt.expectedErr != "" {
  2922  				require.Error(t, err)
  2923  				require.Contains(t, err.Error(), tt.expectedErr)
  2924  			} else {
  2925  				require.NoError(t, err)
  2926  			}
  2927  		})
  2928  	}
  2929  }
  2930  
  2931  func TestTask_Validate_Template(t *testing.T) {
  2932  	ci.Parallel(t)
  2933  
  2934  	bad := &Template{}
  2935  	task := &Task{
  2936  		Templates: []*Template{bad},
  2937  	}
  2938  	tg := &TaskGroup{
  2939  		EphemeralDisk: &EphemeralDisk{
  2940  			SizeMB: 1,
  2941  		},
  2942  	}
  2943  
  2944  	err := task.Validate(JobTypeService, tg)
  2945  	if !strings.Contains(err.Error(), "Template 1 validation failed") {
  2946  		t.Fatalf("err: %s", err)
  2947  	}
  2948  
  2949  	// Have two templates that share the same destination
  2950  	good := &Template{
  2951  		SourcePath: "foo",
  2952  		DestPath:   "local/foo",
  2953  		ChangeMode: "noop",
  2954  	}
  2955  
  2956  	task.Templates = []*Template{good, good}
  2957  	err = task.Validate(JobTypeService, tg)
  2958  	if !strings.Contains(err.Error(), "same destination as") {
  2959  		t.Fatalf("err: %s", err)
  2960  	}
  2961  
  2962  	// Env templates can't use signals
  2963  	task.Templates = []*Template{
  2964  		{
  2965  			Envvars:    true,
  2966  			ChangeMode: "signal",
  2967  		},
  2968  	}
  2969  
  2970  	err = task.Validate(JobTypeService, tg)
  2971  	if err == nil {
  2972  		t.Fatalf("expected error from Template.Validate")
  2973  	}
  2974  	if expected := "cannot use signals"; !strings.Contains(err.Error(), expected) {
  2975  		t.Errorf("expected to find %q but found %v", expected, err)
  2976  	}
  2977  }
  2978  
  2979  func TestTemplate_Copy(t *testing.T) {
  2980  	ci.Parallel(t)
  2981  
  2982  	t1 := &Template{
  2983  		SourcePath:   "/local/file.txt",
  2984  		DestPath:     "/local/dest.txt",
  2985  		EmbeddedTmpl: "tpl",
  2986  		ChangeMode:   TemplateChangeModeScript,
  2987  		ChangeScript: &ChangeScript{
  2988  			Command: "/bin/foo",
  2989  			Args:    []string{"--force", "--debug"},
  2990  		},
  2991  		Splay:      10 * time.Second,
  2992  		Perms:      "777",
  2993  		Uid:        pointer.Of(1000),
  2994  		Gid:        pointer.Of(2000),
  2995  		LeftDelim:  "[[",
  2996  		RightDelim: "]]",
  2997  		Envvars:    true,
  2998  		VaultGrace: time.Minute,
  2999  		Wait: &WaitConfig{
  3000  			Min: pointer.Of(time.Second),
  3001  			Max: pointer.Of(time.Minute),
  3002  		},
  3003  	}
  3004  	t2 := t1.Copy()
  3005  
  3006  	t1.SourcePath = "/local/file2.txt"
  3007  	t1.DestPath = "/local/dest2.txt"
  3008  	t1.EmbeddedTmpl = "tpl2"
  3009  	t1.ChangeMode = TemplateChangeModeSignal
  3010  	t1.ChangeScript.Command = "/bin/foobar"
  3011  	t1.ChangeScript.Args = []string{"--forces", "--debugs"}
  3012  	t1.Splay = 5 * time.Second
  3013  	t1.Perms = "700"
  3014  	t1.Uid = pointer.Of(5000)
  3015  	t1.Gid = pointer.Of(6000)
  3016  	t1.LeftDelim = "(("
  3017  	t1.RightDelim = "))"
  3018  	t1.Envvars = false
  3019  	t1.VaultGrace = 2 * time.Minute
  3020  	t1.Wait.Min = pointer.Of(2 * time.Second)
  3021  	t1.Wait.Max = pointer.Of(2 * time.Minute)
  3022  
  3023  	require.NotEqual(t, t1.SourcePath, t2.SourcePath)
  3024  	require.NotEqual(t, t1.DestPath, t2.DestPath)
  3025  	require.NotEqual(t, t1.EmbeddedTmpl, t2.EmbeddedTmpl)
  3026  	require.NotEqual(t, t1.ChangeMode, t2.ChangeMode)
  3027  	require.NotEqual(t, t1.ChangeScript.Command, t2.ChangeScript.Command)
  3028  	require.NotEqual(t, t1.ChangeScript.Args, t2.ChangeScript.Args)
  3029  	require.NotEqual(t, t1.Splay, t2.Splay)
  3030  	require.NotEqual(t, t1.Perms, t2.Perms)
  3031  	require.NotEqual(t, t1.Uid, t2.Uid)
  3032  	require.NotEqual(t, t1.Gid, t2.Gid)
  3033  	require.NotEqual(t, t1.LeftDelim, t2.LeftDelim)
  3034  	require.NotEqual(t, t1.RightDelim, t2.RightDelim)
  3035  	require.NotEqual(t, t1.Envvars, t2.Envvars)
  3036  	require.NotEqual(t, t1.VaultGrace, t2.VaultGrace)
  3037  	require.NotEqual(t, t1.Wait.Min, t2.Wait.Min)
  3038  	require.NotEqual(t, t1.Wait.Max, t2.Wait.Max)
  3039  
  3040  }
  3041  
  3042  func TestTemplate_Validate(t *testing.T) {
  3043  	ci.Parallel(t)
  3044  
  3045  	cases := []struct {
  3046  		Tmpl         *Template
  3047  		Fail         bool
  3048  		ContainsErrs []string
  3049  	}{
  3050  		{
  3051  			Tmpl: &Template{},
  3052  			Fail: true,
  3053  			ContainsErrs: []string{
  3054  				"specify a source path",
  3055  				"specify a destination",
  3056  				TemplateChangeModeInvalidError.Error(),
  3057  			},
  3058  		},
  3059  		{
  3060  			Tmpl: &Template{
  3061  				Splay: -100,
  3062  			},
  3063  			Fail: true,
  3064  			ContainsErrs: []string{
  3065  				"positive splay",
  3066  			},
  3067  		},
  3068  		{
  3069  			Tmpl: &Template{
  3070  				ChangeMode: "foo",
  3071  			},
  3072  			Fail: true,
  3073  			ContainsErrs: []string{
  3074  				TemplateChangeModeInvalidError.Error(),
  3075  			},
  3076  		},
  3077  		{
  3078  			Tmpl: &Template{
  3079  				ChangeMode: "signal",
  3080  			},
  3081  			Fail: true,
  3082  			ContainsErrs: []string{
  3083  				"specify signal value",
  3084  			},
  3085  		},
  3086  		{
  3087  			Tmpl: &Template{
  3088  				SourcePath: "foo",
  3089  				DestPath:   "../../root",
  3090  				ChangeMode: "noop",
  3091  			},
  3092  			Fail: true,
  3093  			ContainsErrs: []string{
  3094  				"destination escapes",
  3095  			},
  3096  		},
  3097  		{
  3098  			Tmpl: &Template{
  3099  				SourcePath: "foo",
  3100  				DestPath:   "local/foo",
  3101  				ChangeMode: "noop",
  3102  			},
  3103  			Fail: false,
  3104  		},
  3105  		{
  3106  			Tmpl: &Template{
  3107  				SourcePath: "foo",
  3108  				DestPath:   "local/foo",
  3109  				ChangeMode: "noop",
  3110  				Perms:      "0444",
  3111  			},
  3112  			Fail: false,
  3113  		},
  3114  		{
  3115  			Tmpl: &Template{
  3116  				SourcePath: "foo",
  3117  				DestPath:   "local/foo",
  3118  				ChangeMode: "noop",
  3119  				Perms:      "zza",
  3120  			},
  3121  			Fail: true,
  3122  			ContainsErrs: []string{
  3123  				"as octal",
  3124  			},
  3125  		},
  3126  		{
  3127  			Tmpl: &Template{
  3128  				SourcePath: "foo",
  3129  				DestPath:   "local/foo",
  3130  				ChangeMode: "noop",
  3131  				Wait: &WaitConfig{
  3132  					Min: pointer.Of(10 * time.Second),
  3133  					Max: pointer.Of(5 * time.Second),
  3134  				},
  3135  			},
  3136  			Fail: true,
  3137  			ContainsErrs: []string{
  3138  				"greater than",
  3139  			},
  3140  		},
  3141  		{
  3142  			Tmpl: &Template{
  3143  				SourcePath: "foo",
  3144  				DestPath:   "local/foo",
  3145  				ChangeMode: "noop",
  3146  				Wait: &WaitConfig{
  3147  					Min: pointer.Of(5 * time.Second),
  3148  					Max: pointer.Of(5 * time.Second),
  3149  				},
  3150  			},
  3151  			Fail: false,
  3152  		},
  3153  		{
  3154  			Tmpl: &Template{
  3155  				SourcePath: "foo",
  3156  				DestPath:   "local/foo",
  3157  				ChangeMode: "noop",
  3158  				Wait: &WaitConfig{
  3159  					Min: pointer.Of(5 * time.Second),
  3160  					Max: pointer.Of(10 * time.Second),
  3161  				},
  3162  			},
  3163  			Fail: false,
  3164  		},
  3165  		{
  3166  			Tmpl: &Template{
  3167  				SourcePath:   "foo",
  3168  				DestPath:     "local/foo",
  3169  				ChangeMode:   "script",
  3170  				ChangeScript: nil,
  3171  			},
  3172  			Fail: true,
  3173  		},
  3174  		{
  3175  			Tmpl: &Template{
  3176  				SourcePath:   "foo",
  3177  				DestPath:     "local/foo",
  3178  				ChangeMode:   "script",
  3179  				ChangeScript: &ChangeScript{Command: ""},
  3180  			},
  3181  			Fail: true,
  3182  		},
  3183  		{
  3184  			Tmpl: &Template{
  3185  				SourcePath:   "foo",
  3186  				DestPath:     "local/foo",
  3187  				ChangeMode:   "script",
  3188  				ChangeScript: &ChangeScript{Command: "/bin/foo"},
  3189  			},
  3190  			Fail: false,
  3191  		},
  3192  	}
  3193  
  3194  	for i, c := range cases {
  3195  		err := c.Tmpl.Validate()
  3196  		if err != nil {
  3197  			if !c.Fail {
  3198  				t.Fatalf("Case %d: shouldn't have failed: %v", i+1, err)
  3199  			}
  3200  
  3201  			e := err.Error()
  3202  			for _, exp := range c.ContainsErrs {
  3203  				if !strings.Contains(e, exp) {
  3204  					t.Fatalf("Cased %d: should have contained error %q: %q", i+1, exp, e)
  3205  				}
  3206  			}
  3207  		} else if c.Fail {
  3208  			t.Fatalf("Case %d: should have failed: %v", i+1, err)
  3209  		}
  3210  	}
  3211  }
  3212  
  3213  func TestTaskWaitConfig_Equals(t *testing.T) {
  3214  	ci.Parallel(t)
  3215  
  3216  	testCases := []struct {
  3217  		name string
  3218  		wc1  *WaitConfig
  3219  		wc2  *WaitConfig
  3220  		exp  bool
  3221  	}{
  3222  		{
  3223  			name: "all-fields",
  3224  			wc1: &WaitConfig{
  3225  				Min: pointer.Of(5 * time.Second),
  3226  				Max: pointer.Of(10 * time.Second),
  3227  			},
  3228  			wc2: &WaitConfig{
  3229  				Min: pointer.Of(5 * time.Second),
  3230  				Max: pointer.Of(10 * time.Second),
  3231  			},
  3232  			exp: true,
  3233  		},
  3234  		{
  3235  			name: "no-fields",
  3236  			wc1:  &WaitConfig{},
  3237  			wc2:  &WaitConfig{},
  3238  			exp:  true,
  3239  		},
  3240  		{
  3241  			name: "min-only",
  3242  			wc1: &WaitConfig{
  3243  				Min: pointer.Of(5 * time.Second),
  3244  			},
  3245  			wc2: &WaitConfig{
  3246  				Min: pointer.Of(5 * time.Second),
  3247  			},
  3248  			exp: true,
  3249  		},
  3250  		{
  3251  			name: "max-only",
  3252  			wc1: &WaitConfig{
  3253  				Max: pointer.Of(10 * time.Second),
  3254  			},
  3255  			wc2: &WaitConfig{
  3256  				Max: pointer.Of(10 * time.Second),
  3257  			},
  3258  			exp: true,
  3259  		},
  3260  		{
  3261  			name: "min-nil-vs-set",
  3262  			wc1: &WaitConfig{
  3263  				Min: pointer.Of(1 * time.Second),
  3264  			},
  3265  			wc2: &WaitConfig{
  3266  				Min: nil,
  3267  			},
  3268  			exp: false,
  3269  		},
  3270  		{
  3271  			name: "max-nil-vs-set",
  3272  			wc1: &WaitConfig{
  3273  				Max: pointer.Of(1 * time.Second),
  3274  			},
  3275  			wc2: &WaitConfig{
  3276  				Max: nil,
  3277  			},
  3278  			exp: false,
  3279  		},
  3280  	}
  3281  
  3282  	for _, tc := range testCases {
  3283  		t.Run(tc.name, func(t *testing.T) {
  3284  			must.Eq(t, tc.exp, tc.wc1.Equal(tc.wc2))
  3285  		})
  3286  	}
  3287  }
  3288  
  3289  func TestConstraint_Validate(t *testing.T) {
  3290  	ci.Parallel(t)
  3291  
  3292  	c := &Constraint{}
  3293  	err := c.Validate()
  3294  	require.Error(t, err, "Missing constraint operand")
  3295  
  3296  	c = &Constraint{
  3297  		LTarget: "$attr.kernel.name",
  3298  		RTarget: "linux",
  3299  		Operand: "=",
  3300  	}
  3301  	err = c.Validate()
  3302  	require.NoError(t, err)
  3303  
  3304  	// Perform additional regexp validation
  3305  	c.Operand = ConstraintRegex
  3306  	c.RTarget = "(foo"
  3307  	err = c.Validate()
  3308  	require.Error(t, err, "missing closing")
  3309  
  3310  	// Perform version validation
  3311  	c.Operand = ConstraintVersion
  3312  	c.RTarget = "~> foo"
  3313  	err = c.Validate()
  3314  	require.Error(t, err, "Malformed constraint")
  3315  
  3316  	// Perform semver validation
  3317  	c.Operand = ConstraintSemver
  3318  	err = c.Validate()
  3319  	require.Error(t, err, "Malformed constraint")
  3320  
  3321  	c.RTarget = ">= 0.6.1"
  3322  	require.NoError(t, c.Validate())
  3323  
  3324  	// Perform distinct_property validation
  3325  	c.Operand = ConstraintDistinctProperty
  3326  	c.RTarget = "0"
  3327  	err = c.Validate()
  3328  	require.Error(t, err, "count of 1 or greater")
  3329  
  3330  	c.RTarget = "-1"
  3331  	err = c.Validate()
  3332  	require.Error(t, err, "to uint64")
  3333  
  3334  	// Perform distinct_hosts validation
  3335  	c.Operand = ConstraintDistinctHosts
  3336  	c.LTarget = ""
  3337  	c.RTarget = ""
  3338  	if err := c.Validate(); err != nil {
  3339  		t.Fatalf("expected valid constraint: %v", err)
  3340  	}
  3341  
  3342  	// Perform set_contains* validation
  3343  	c.RTarget = ""
  3344  	for _, o := range []string{ConstraintSetContains, ConstraintSetContainsAll, ConstraintSetContainsAny} {
  3345  		c.Operand = o
  3346  		err = c.Validate()
  3347  		require.Error(t, err, "requires an RTarget")
  3348  	}
  3349  
  3350  	// Perform LTarget validation
  3351  	c.Operand = ConstraintRegex
  3352  	c.RTarget = "foo"
  3353  	c.LTarget = ""
  3354  	err = c.Validate()
  3355  	require.Error(t, err, "No LTarget")
  3356  
  3357  	// Perform constraint type validation
  3358  	c.Operand = "foo"
  3359  	err = c.Validate()
  3360  	require.Error(t, err, "Unknown constraint type")
  3361  }
  3362  
  3363  func TestAffinity_Validate(t *testing.T) {
  3364  	ci.Parallel(t)
  3365  
  3366  	type tc struct {
  3367  		affinity *Affinity
  3368  		err      error
  3369  		name     string
  3370  	}
  3371  	testCases := []tc{
  3372  		{
  3373  			affinity: &Affinity{},
  3374  			err:      fmt.Errorf("Missing affinity operand"),
  3375  		},
  3376  		{
  3377  			affinity: &Affinity{
  3378  				Operand: "foo",
  3379  				LTarget: "${meta.node_class}",
  3380  				Weight:  10,
  3381  			},
  3382  			err: fmt.Errorf("Unknown affinity operator \"foo\""),
  3383  		},
  3384  		{
  3385  			affinity: &Affinity{
  3386  				Operand: "=",
  3387  				LTarget: "${meta.node_class}",
  3388  				Weight:  10,
  3389  			},
  3390  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  3391  		},
  3392  		{
  3393  			affinity: &Affinity{
  3394  				Operand: "=",
  3395  				LTarget: "${meta.node_class}",
  3396  				RTarget: "c4",
  3397  				Weight:  0,
  3398  			},
  3399  			err: fmt.Errorf("Affinity weight cannot be zero"),
  3400  		},
  3401  		{
  3402  			affinity: &Affinity{
  3403  				Operand: "=",
  3404  				LTarget: "${meta.node_class}",
  3405  				RTarget: "c4",
  3406  				Weight:  110,
  3407  			},
  3408  			err: fmt.Errorf("Affinity weight must be within the range [-100,100]"),
  3409  		},
  3410  		{
  3411  			affinity: &Affinity{
  3412  				Operand: "=",
  3413  				LTarget: "${node.class}",
  3414  				Weight:  10,
  3415  			},
  3416  			err: fmt.Errorf("Operator \"=\" requires an RTarget"),
  3417  		},
  3418  		{
  3419  			affinity: &Affinity{
  3420  				Operand: "version",
  3421  				LTarget: "${meta.os}",
  3422  				RTarget: ">>2.0",
  3423  				Weight:  110,
  3424  			},
  3425  			err: fmt.Errorf("Version affinity is invalid"),
  3426  		},
  3427  		{
  3428  			affinity: &Affinity{
  3429  				Operand: "regexp",
  3430  				LTarget: "${meta.os}",
  3431  				RTarget: "\\K2.0",
  3432  				Weight:  100,
  3433  			},
  3434  			err: fmt.Errorf("Regular expression failed to compile"),
  3435  		},
  3436  	}
  3437  
  3438  	for _, tc := range testCases {
  3439  		t.Run(tc.name, func(t *testing.T) {
  3440  			err := tc.affinity.Validate()
  3441  			if tc.err != nil {
  3442  				require.NotNil(t, err)
  3443  				require.Contains(t, err.Error(), tc.err.Error())
  3444  			} else {
  3445  				require.Nil(t, err)
  3446  			}
  3447  		})
  3448  	}
  3449  }
  3450  
  3451  func TestUpdateStrategy_Validate(t *testing.T) {
  3452  	ci.Parallel(t)
  3453  
  3454  	u := &UpdateStrategy{
  3455  		MaxParallel:      -1,
  3456  		HealthCheck:      "foo",
  3457  		MinHealthyTime:   -10,
  3458  		HealthyDeadline:  -15,
  3459  		ProgressDeadline: -25,
  3460  		AutoRevert:       false,
  3461  		Canary:           -1,
  3462  	}
  3463  
  3464  	err := u.Validate()
  3465  	requireErrors(t, err,
  3466  		"Invalid health check given",
  3467  		"Max parallel can not be less than zero",
  3468  		"Canary count can not be less than zero",
  3469  		"Minimum healthy time may not be less than zero",
  3470  		"Healthy deadline must be greater than zero",
  3471  		"Progress deadline must be zero or greater",
  3472  		"Minimum healthy time must be less than healthy deadline",
  3473  		"Healthy deadline must be less than progress deadline",
  3474  	)
  3475  }
  3476  
  3477  func TestResource_NetIndex(t *testing.T) {
  3478  	ci.Parallel(t)
  3479  
  3480  	r := &Resources{
  3481  		Networks: []*NetworkResource{
  3482  			{Device: "eth0"},
  3483  			{Device: "lo0"},
  3484  			{Device: ""},
  3485  		},
  3486  	}
  3487  	if idx := r.NetIndex(&NetworkResource{Device: "eth0"}); idx != 0 {
  3488  		t.Fatalf("Bad: %d", idx)
  3489  	}
  3490  	if idx := r.NetIndex(&NetworkResource{Device: "lo0"}); idx != 1 {
  3491  		t.Fatalf("Bad: %d", idx)
  3492  	}
  3493  	if idx := r.NetIndex(&NetworkResource{Device: "eth1"}); idx != -1 {
  3494  		t.Fatalf("Bad: %d", idx)
  3495  	}
  3496  }
  3497  
  3498  func TestResource_Add(t *testing.T) {
  3499  	ci.Parallel(t)
  3500  
  3501  	r1 := &Resources{
  3502  		CPU:      2000,
  3503  		MemoryMB: 2048,
  3504  		DiskMB:   10000,
  3505  		Networks: []*NetworkResource{
  3506  			{
  3507  				CIDR:          "10.0.0.0/8",
  3508  				MBits:         100,
  3509  				ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3510  			},
  3511  		},
  3512  	}
  3513  	r2 := &Resources{
  3514  		CPU:      2000,
  3515  		MemoryMB: 1024,
  3516  		DiskMB:   5000,
  3517  		Networks: []*NetworkResource{
  3518  			{
  3519  				IP:            "10.0.0.1",
  3520  				MBits:         50,
  3521  				ReservedPorts: []Port{{"web", 80, 0, ""}},
  3522  			},
  3523  		},
  3524  	}
  3525  
  3526  	r1.Add(r2)
  3527  
  3528  	expect := &Resources{
  3529  		CPU:      3000,
  3530  		MemoryMB: 3072,
  3531  		DiskMB:   15000,
  3532  		Networks: []*NetworkResource{
  3533  			{
  3534  				CIDR:          "10.0.0.0/8",
  3535  				MBits:         150,
  3536  				ReservedPorts: []Port{{"ssh", 22, 0, ""}, {"web", 80, 0, ""}},
  3537  			},
  3538  		},
  3539  	}
  3540  
  3541  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  3542  		t.Fatalf("bad: %#v %#v", expect, r1)
  3543  	}
  3544  }
  3545  
  3546  func TestResource_Add_Network(t *testing.T) {
  3547  	ci.Parallel(t)
  3548  
  3549  	r1 := &Resources{}
  3550  	r2 := &Resources{
  3551  		Networks: []*NetworkResource{
  3552  			{
  3553  				MBits:        50,
  3554  				DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}},
  3555  			},
  3556  		},
  3557  	}
  3558  	r3 := &Resources{
  3559  		Networks: []*NetworkResource{
  3560  			{
  3561  				MBits:        25,
  3562  				DynamicPorts: []Port{{"admin", 0, 8080, ""}},
  3563  			},
  3564  		},
  3565  	}
  3566  
  3567  	r1.Add(r2)
  3568  	r1.Add(r3)
  3569  
  3570  	expect := &Resources{
  3571  		Networks: []*NetworkResource{
  3572  			{
  3573  				MBits:        75,
  3574  				DynamicPorts: []Port{{"http", 0, 80, ""}, {"https", 0, 443, ""}, {"admin", 0, 8080, ""}},
  3575  			},
  3576  		},
  3577  	}
  3578  
  3579  	if !reflect.DeepEqual(expect.Networks, r1.Networks) {
  3580  		t.Fatalf("bad: %#v %#v", expect.Networks[0], r1.Networks[0])
  3581  	}
  3582  }
  3583  
  3584  func TestComparableResources_Subtract(t *testing.T) {
  3585  	ci.Parallel(t)
  3586  
  3587  	r1 := &ComparableResources{
  3588  		Flattened: AllocatedTaskResources{
  3589  			Cpu: AllocatedCpuResources{
  3590  				CpuShares:     2000,
  3591  				ReservedCores: []uint16{0, 1},
  3592  			},
  3593  			Memory: AllocatedMemoryResources{
  3594  				MemoryMB:    2048,
  3595  				MemoryMaxMB: 3048,
  3596  			},
  3597  			Networks: []*NetworkResource{
  3598  				{
  3599  					CIDR:          "10.0.0.0/8",
  3600  					MBits:         100,
  3601  					ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3602  				},
  3603  			},
  3604  		},
  3605  		Shared: AllocatedSharedResources{
  3606  			DiskMB: 10000,
  3607  		},
  3608  	}
  3609  
  3610  	r2 := &ComparableResources{
  3611  		Flattened: AllocatedTaskResources{
  3612  			Cpu: AllocatedCpuResources{
  3613  				CpuShares:     1000,
  3614  				ReservedCores: []uint16{0},
  3615  			},
  3616  			Memory: AllocatedMemoryResources{
  3617  				MemoryMB:    1024,
  3618  				MemoryMaxMB: 1524,
  3619  			},
  3620  			Networks: []*NetworkResource{
  3621  				{
  3622  					CIDR:          "10.0.0.0/8",
  3623  					MBits:         20,
  3624  					ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3625  				},
  3626  			},
  3627  		},
  3628  		Shared: AllocatedSharedResources{
  3629  			DiskMB: 5000,
  3630  		},
  3631  	}
  3632  	r1.Subtract(r2)
  3633  
  3634  	expect := &ComparableResources{
  3635  		Flattened: AllocatedTaskResources{
  3636  			Cpu: AllocatedCpuResources{
  3637  				CpuShares:     1000,
  3638  				ReservedCores: []uint16{1},
  3639  			},
  3640  			Memory: AllocatedMemoryResources{
  3641  				MemoryMB:    1024,
  3642  				MemoryMaxMB: 1524,
  3643  			},
  3644  			Networks: []*NetworkResource{
  3645  				{
  3646  					CIDR:          "10.0.0.0/8",
  3647  					MBits:         100,
  3648  					ReservedPorts: []Port{{"ssh", 22, 0, ""}},
  3649  				},
  3650  			},
  3651  		},
  3652  		Shared: AllocatedSharedResources{
  3653  			DiskMB: 5000,
  3654  		},
  3655  	}
  3656  
  3657  	require := require.New(t)
  3658  	require.Equal(expect, r1)
  3659  }
  3660  
  3661  func TestMemoryResources_Add(t *testing.T) {
  3662  	ci.Parallel(t)
  3663  
  3664  	r := &AllocatedMemoryResources{}
  3665  
  3666  	// adding plain no max
  3667  	r.Add(&AllocatedMemoryResources{
  3668  		MemoryMB: 100,
  3669  	})
  3670  	require.Equal(t, &AllocatedMemoryResources{
  3671  		MemoryMB:    100,
  3672  		MemoryMaxMB: 100,
  3673  	}, r)
  3674  
  3675  	// adding with max
  3676  	r.Add(&AllocatedMemoryResources{
  3677  		MemoryMB:    100,
  3678  		MemoryMaxMB: 200,
  3679  	})
  3680  	require.Equal(t, &AllocatedMemoryResources{
  3681  		MemoryMB:    200,
  3682  		MemoryMaxMB: 300,
  3683  	}, r)
  3684  }
  3685  
  3686  func TestNodeNetworkResource_Copy(t *testing.T) {
  3687  	ci.Parallel(t)
  3688  
  3689  	netResource := &NodeNetworkResource{
  3690  		Mode:       "host",
  3691  		Device:     "eth0",
  3692  		MacAddress: "00:00:00:00:00:00",
  3693  		Speed:      1000,
  3694  		Addresses: []NodeNetworkAddress{
  3695  			{
  3696  				Family:        NodeNetworkAF_IPv4,
  3697  				Alias:         "default",
  3698  				Address:       "192.168.0.2",
  3699  				ReservedPorts: "22",
  3700  				Gateway:       "192.168.0.1",
  3701  			},
  3702  		},
  3703  	}
  3704  
  3705  	// Copy must be equal.
  3706  	netResourceCopy := netResource.Copy()
  3707  	require.Equal(t, netResource, netResourceCopy)
  3708  
  3709  	// Modifying copy should not modify original value.
  3710  	netResourceCopy.Mode = "alloc"
  3711  	netResourceCopy.Device = "eth1"
  3712  	netResourceCopy.MacAddress = "11:11:11:11:11:11"
  3713  	netResourceCopy.Speed = 500
  3714  	netResourceCopy.Addresses[0].Alias = "copy"
  3715  	require.NotEqual(t, netResource, netResourceCopy)
  3716  }
  3717  
  3718  func TestEncodeDecode(t *testing.T) {
  3719  	ci.Parallel(t)
  3720  
  3721  	type FooRequest struct {
  3722  		Foo string
  3723  		Bar int
  3724  		Baz bool
  3725  	}
  3726  	arg := &FooRequest{
  3727  		Foo: "test",
  3728  		Bar: 42,
  3729  		Baz: true,
  3730  	}
  3731  	buf, err := Encode(1, arg)
  3732  	if err != nil {
  3733  		t.Fatalf("err: %v", err)
  3734  	}
  3735  
  3736  	var out FooRequest
  3737  	err = Decode(buf[1:], &out)
  3738  	if err != nil {
  3739  		t.Fatalf("err: %v", err)
  3740  	}
  3741  
  3742  	if !reflect.DeepEqual(arg, &out) {
  3743  		t.Fatalf("bad: %#v %#v", arg, out)
  3744  	}
  3745  }
  3746  
  3747  func BenchmarkEncodeDecode(b *testing.B) {
  3748  	job := testJob()
  3749  
  3750  	for i := 0; i < b.N; i++ {
  3751  		buf, err := Encode(1, job)
  3752  		if err != nil {
  3753  			b.Fatalf("err: %v", err)
  3754  		}
  3755  
  3756  		var out Job
  3757  		err = Decode(buf[1:], &out)
  3758  		if err != nil {
  3759  			b.Fatalf("err: %v", err)
  3760  		}
  3761  	}
  3762  }
  3763  
  3764  func TestInvalidServiceCheck(t *testing.T) {
  3765  	ci.Parallel(t)
  3766  
  3767  	s := Service{
  3768  		Name:      "service-name",
  3769  		Provider:  "consul",
  3770  		PortLabel: "bar",
  3771  		Checks: []*ServiceCheck{
  3772  			{
  3773  				Name: "check-name",
  3774  				Type: "lol",
  3775  			},
  3776  		},
  3777  	}
  3778  	if err := s.Validate(); err == nil {
  3779  		t.Fatalf("Service should be invalid (invalid type)")
  3780  	}
  3781  
  3782  	s = Service{
  3783  		Name:      "service.name",
  3784  		Provider:  "consul",
  3785  		PortLabel: "bar",
  3786  	}
  3787  	if err := s.ValidateName(s.Name); err == nil {
  3788  		t.Fatalf("Service should be invalid (contains a dot): %v", err)
  3789  	}
  3790  
  3791  	s = Service{
  3792  		Name:      "-my-service",
  3793  		Provider:  "consul",
  3794  		PortLabel: "bar",
  3795  	}
  3796  	if err := s.Validate(); err == nil {
  3797  		t.Fatalf("Service should be invalid (begins with a hyphen): %v", err)
  3798  	}
  3799  
  3800  	s = Service{
  3801  		Name:      "my-service-${NOMAD_META_FOO}",
  3802  		Provider:  "consul",
  3803  		PortLabel: "bar",
  3804  	}
  3805  	if err := s.Validate(); err != nil {
  3806  		t.Fatalf("Service should be valid: %v", err)
  3807  	}
  3808  
  3809  	s = Service{
  3810  		Name:      "my_service-${NOMAD_META_FOO}",
  3811  		Provider:  "consul",
  3812  		PortLabel: "bar",
  3813  	}
  3814  	if err := s.Validate(); err == nil {
  3815  		t.Fatalf("Service should be invalid (contains underscore but not in a variable name): %v", err)
  3816  	}
  3817  
  3818  	s = Service{
  3819  		Name:      "abcdef0123456789-abcdef0123456789-abcdef0123456789-abcdef0123456",
  3820  		Provider:  "consul",
  3821  		PortLabel: "bar",
  3822  	}
  3823  	if err := s.ValidateName(s.Name); err == nil {
  3824  		t.Fatalf("Service should be invalid (too long): %v", err)
  3825  	}
  3826  
  3827  	s = Service{
  3828  		Name:     "service-name",
  3829  		Provider: "consul",
  3830  		Checks: []*ServiceCheck{
  3831  			{
  3832  				Name:     "check-tcp",
  3833  				Type:     ServiceCheckTCP,
  3834  				Interval: 5 * time.Second,
  3835  				Timeout:  2 * time.Second,
  3836  			},
  3837  			{
  3838  				Name:     "check-http",
  3839  				Type:     ServiceCheckHTTP,
  3840  				Path:     "/foo",
  3841  				Interval: 5 * time.Second,
  3842  				Timeout:  2 * time.Second,
  3843  			},
  3844  		},
  3845  	}
  3846  	if err := s.Validate(); err == nil {
  3847  		t.Fatalf("service should be invalid (tcp/http checks with no port): %v", err)
  3848  	}
  3849  
  3850  	s = Service{
  3851  		Name:     "service-name",
  3852  		Provider: "consul",
  3853  		Checks: []*ServiceCheck{
  3854  			{
  3855  				Name:     "check-script",
  3856  				Type:     ServiceCheckScript,
  3857  				Command:  "/bin/date",
  3858  				Interval: 5 * time.Second,
  3859  				Timeout:  2 * time.Second,
  3860  			},
  3861  		},
  3862  	}
  3863  	if err := s.Validate(); err != nil {
  3864  		t.Fatalf("un-expected error: %v", err)
  3865  	}
  3866  
  3867  	s = Service{
  3868  		Name:     "service-name",
  3869  		Provider: "consul",
  3870  		Checks: []*ServiceCheck{
  3871  			{
  3872  				Name:     "tcp-check",
  3873  				Type:     ServiceCheckTCP,
  3874  				Interval: 5 * time.Second,
  3875  				Timeout:  2 * time.Second,
  3876  			},
  3877  		},
  3878  		Connect: &ConsulConnect{
  3879  			SidecarService: &ConsulSidecarService{},
  3880  		},
  3881  	}
  3882  	require.Error(t, s.Validate())
  3883  }
  3884  
  3885  func TestDistinctCheckID(t *testing.T) {
  3886  	ci.Parallel(t)
  3887  
  3888  	c1 := ServiceCheck{
  3889  		Name:     "web-health",
  3890  		Type:     "http",
  3891  		Path:     "/health",
  3892  		Interval: 2 * time.Second,
  3893  		Timeout:  3 * time.Second,
  3894  	}
  3895  	c2 := ServiceCheck{
  3896  		Name:     "web-health",
  3897  		Type:     "http",
  3898  		Path:     "/health1",
  3899  		Interval: 2 * time.Second,
  3900  		Timeout:  3 * time.Second,
  3901  	}
  3902  
  3903  	c3 := ServiceCheck{
  3904  		Name:     "web-health",
  3905  		Type:     "http",
  3906  		Path:     "/health",
  3907  		Interval: 4 * time.Second,
  3908  		Timeout:  3 * time.Second,
  3909  	}
  3910  	serviceID := "123"
  3911  	c1Hash := c1.Hash(serviceID)
  3912  	c2Hash := c2.Hash(serviceID)
  3913  	c3Hash := c3.Hash(serviceID)
  3914  
  3915  	if c1Hash == c2Hash || c1Hash == c3Hash || c3Hash == c2Hash {
  3916  		t.Fatalf("Checks need to be uniq c1: %s, c2: %s, c3: %s", c1Hash, c2Hash, c3Hash)
  3917  	}
  3918  
  3919  }
  3920  
  3921  func TestService_Canonicalize(t *testing.T) {
  3922  	ci.Parallel(t)
  3923  
  3924  	testCases := []struct {
  3925  		inputService          *Service
  3926  		inputJob              string
  3927  		inputTaskGroup        string
  3928  		inputTask             string
  3929  		inputJobNamespace     string
  3930  		expectedOutputService *Service
  3931  		name                  string
  3932  	}{
  3933  		{
  3934  			inputService: &Service{
  3935  				Name: "${TASK}-db",
  3936  			},
  3937  			inputJob:          "example",
  3938  			inputTaskGroup:    "cache",
  3939  			inputTask:         "redis",
  3940  			inputJobNamespace: "platform",
  3941  			expectedOutputService: &Service{
  3942  				Name:      "redis-db",
  3943  				Provider:  "consul",
  3944  				Namespace: "default",
  3945  				TaskName:  "redis",
  3946  			},
  3947  			name: "interpolate task in name",
  3948  		},
  3949  		{
  3950  			inputService: &Service{
  3951  				Name: "db",
  3952  			},
  3953  			inputJob:          "example",
  3954  			inputTaskGroup:    "cache",
  3955  			inputTask:         "redis",
  3956  			inputJobNamespace: "platform",
  3957  			expectedOutputService: &Service{
  3958  				Name:      "db",
  3959  				Provider:  "consul",
  3960  				Namespace: "default",
  3961  				TaskName:  "redis",
  3962  			},
  3963  			name: "no interpolation in name",
  3964  		},
  3965  		{
  3966  			inputService: &Service{
  3967  				Name: "${JOB}-${TASKGROUP}-${TASK}-db",
  3968  			},
  3969  			inputJob:          "example",
  3970  			inputTaskGroup:    "cache",
  3971  			inputTask:         "redis",
  3972  			inputJobNamespace: "platform",
  3973  			expectedOutputService: &Service{
  3974  				Name:      "example-cache-redis-db",
  3975  				Provider:  "consul",
  3976  				Namespace: "default",
  3977  				TaskName:  "redis",
  3978  			},
  3979  			name: "interpolate job, taskgroup and task in name",
  3980  		},
  3981  		{
  3982  			inputService: &Service{
  3983  				Name: "${BASE}-db",
  3984  			},
  3985  			inputJob:          "example",
  3986  			inputTaskGroup:    "cache",
  3987  			inputTask:         "redis",
  3988  			inputJobNamespace: "platform",
  3989  			expectedOutputService: &Service{
  3990  				Name:      "example-cache-redis-db",
  3991  				Provider:  "consul",
  3992  				Namespace: "default",
  3993  				TaskName:  "redis",
  3994  			},
  3995  			name: "interpolate base in name",
  3996  		},
  3997  		{
  3998  			inputService: &Service{
  3999  				Name:     "db",
  4000  				Provider: "nomad",
  4001  			},
  4002  			inputJob:          "example",
  4003  			inputTaskGroup:    "cache",
  4004  			inputTask:         "redis",
  4005  			inputJobNamespace: "platform",
  4006  			expectedOutputService: &Service{
  4007  				Name:      "db",
  4008  				Provider:  "nomad",
  4009  				Namespace: "platform",
  4010  				TaskName:  "redis",
  4011  			},
  4012  			name: "nomad provider",
  4013  		},
  4014  	}
  4015  
  4016  	for _, tc := range testCases {
  4017  		t.Run(tc.name, func(t *testing.T) {
  4018  			tc.inputService.Canonicalize(tc.inputJob, tc.inputTaskGroup, tc.inputTask, tc.inputJobNamespace)
  4019  			assert.Equal(t, tc.expectedOutputService, tc.inputService)
  4020  		})
  4021  	}
  4022  }
  4023  
  4024  func TestJob_ExpandServiceNames(t *testing.T) {
  4025  	ci.Parallel(t)
  4026  
  4027  	j := &Job{
  4028  		Name: "my-job",
  4029  		TaskGroups: []*TaskGroup{
  4030  			{
  4031  				Name: "web",
  4032  				Tasks: []*Task{
  4033  					{
  4034  						Name: "frontend",
  4035  						Services: []*Service{
  4036  							{
  4037  								Name: "${BASE}-default",
  4038  							},
  4039  							{
  4040  								Name: "jmx",
  4041  							},
  4042  						},
  4043  					},
  4044  				},
  4045  			},
  4046  			{
  4047  				Name: "admin",
  4048  				Tasks: []*Task{
  4049  					{
  4050  						Name: "admin-web",
  4051  					},
  4052  				},
  4053  			},
  4054  		},
  4055  	}
  4056  
  4057  	j.Canonicalize()
  4058  
  4059  	service1Name := j.TaskGroups[0].Tasks[0].Services[0].Name
  4060  	if service1Name != "my-job-web-frontend-default" {
  4061  		t.Fatalf("Expected Service Name: %s, Actual: %s", "my-job-web-frontend-default", service1Name)
  4062  	}
  4063  
  4064  	service2Name := j.TaskGroups[0].Tasks[0].Services[1].Name
  4065  	if service2Name != "jmx" {
  4066  		t.Fatalf("Expected Service Name: %s, Actual: %s", "jmx", service2Name)
  4067  	}
  4068  
  4069  }
  4070  
  4071  func TestJob_CombinedTaskMeta(t *testing.T) {
  4072  	ci.Parallel(t)
  4073  
  4074  	j := &Job{
  4075  		Meta: map[string]string{
  4076  			"job_test":   "job",
  4077  			"group_test": "job",
  4078  			"task_test":  "job",
  4079  		},
  4080  		TaskGroups: []*TaskGroup{
  4081  			{
  4082  				Name: "group",
  4083  				Meta: map[string]string{
  4084  					"group_test": "group",
  4085  					"task_test":  "group",
  4086  				},
  4087  				Tasks: []*Task{
  4088  					{
  4089  						Name: "task",
  4090  						Meta: map[string]string{
  4091  							"task_test": "task",
  4092  						},
  4093  					},
  4094  				},
  4095  			},
  4096  		},
  4097  	}
  4098  
  4099  	require := require.New(t)
  4100  	require.EqualValues(map[string]string{
  4101  		"job_test":   "job",
  4102  		"group_test": "group",
  4103  		"task_test":  "task",
  4104  	}, j.CombinedTaskMeta("group", "task"))
  4105  	require.EqualValues(map[string]string{
  4106  		"job_test":   "job",
  4107  		"group_test": "group",
  4108  		"task_test":  "group",
  4109  	}, j.CombinedTaskMeta("group", ""))
  4110  	require.EqualValues(map[string]string{
  4111  		"job_test":   "job",
  4112  		"group_test": "job",
  4113  		"task_test":  "job",
  4114  	}, j.CombinedTaskMeta("", "task"))
  4115  
  4116  }
  4117  
  4118  func TestPeriodicConfig_EnabledInvalid(t *testing.T) {
  4119  	ci.Parallel(t)
  4120  
  4121  	// Create a config that is enabled but with no interval specified.
  4122  	p := &PeriodicConfig{Enabled: true}
  4123  	if err := p.Validate(); err == nil {
  4124  		t.Fatal("Enabled PeriodicConfig with no spec or type shouldn't be valid")
  4125  	}
  4126  
  4127  	// Create a config that is enabled, with a spec but no type specified.
  4128  	p = &PeriodicConfig{Enabled: true, Spec: "foo"}
  4129  	if err := p.Validate(); err == nil {
  4130  		t.Fatal("Enabled PeriodicConfig with no spec type shouldn't be valid")
  4131  	}
  4132  
  4133  	// Create a config that is enabled, with a spec type but no spec specified.
  4134  	p = &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron}
  4135  	if err := p.Validate(); err == nil {
  4136  		t.Fatal("Enabled PeriodicConfig with no spec shouldn't be valid")
  4137  	}
  4138  
  4139  	// Create a config that is enabled, with a bad time zone.
  4140  	p = &PeriodicConfig{Enabled: true, TimeZone: "FOO"}
  4141  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "time zone") {
  4142  		t.Fatalf("Enabled PeriodicConfig with bad time zone shouldn't be valid: %v", err)
  4143  	}
  4144  }
  4145  
  4146  func TestPeriodicConfig_InvalidCron(t *testing.T) {
  4147  	ci.Parallel(t)
  4148  
  4149  	specs := []string{"foo", "* *", "@foo"}
  4150  	for _, spec := range specs {
  4151  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  4152  		p.Canonicalize()
  4153  		if err := p.Validate(); err == nil {
  4154  			t.Fatal("Invalid cron spec")
  4155  		}
  4156  	}
  4157  }
  4158  
  4159  func TestPeriodicConfig_ValidCron(t *testing.T) {
  4160  	ci.Parallel(t)
  4161  
  4162  	specs := []string{"0 0 29 2 *", "@hourly", "0 0-15 * * *"}
  4163  	for _, spec := range specs {
  4164  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: spec}
  4165  		p.Canonicalize()
  4166  		if err := p.Validate(); err != nil {
  4167  			t.Fatal("Passed valid cron")
  4168  		}
  4169  	}
  4170  }
  4171  
  4172  func TestPeriodicConfig_NextCron(t *testing.T) {
  4173  	ci.Parallel(t)
  4174  
  4175  	from := time.Date(2009, time.November, 10, 23, 22, 30, 0, time.UTC)
  4176  
  4177  	cases := []struct {
  4178  		spec     string
  4179  		nextTime time.Time
  4180  		errorMsg string
  4181  	}{
  4182  		{
  4183  			spec:     "0 0 29 2 * 1980",
  4184  			nextTime: time.Time{},
  4185  		},
  4186  		{
  4187  			spec:     "*/5 * * * *",
  4188  			nextTime: time.Date(2009, time.November, 10, 23, 25, 0, 0, time.UTC),
  4189  		},
  4190  		{
  4191  			spec:     "1 15-0 *",
  4192  			nextTime: time.Time{},
  4193  			errorMsg: "failed parsing cron expression",
  4194  		},
  4195  	}
  4196  
  4197  	for i, c := range cases {
  4198  		t.Run(fmt.Sprintf("case: %d: %s", i, c.spec), func(t *testing.T) {
  4199  			p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: c.spec}
  4200  			p.Canonicalize()
  4201  			n, err := p.Next(from)
  4202  
  4203  			require.Equal(t, c.nextTime, n)
  4204  			if c.errorMsg == "" {
  4205  				require.NoError(t, err)
  4206  			} else {
  4207  				require.Error(t, err)
  4208  				require.Contains(t, err.Error(), c.errorMsg)
  4209  			}
  4210  		})
  4211  	}
  4212  }
  4213  
  4214  func TestPeriodicConfig_ValidTimeZone(t *testing.T) {
  4215  	ci.Parallel(t)
  4216  
  4217  	zones := []string{"Africa/Abidjan", "America/Chicago", "Europe/Minsk", "UTC"}
  4218  	for _, zone := range zones {
  4219  		p := &PeriodicConfig{Enabled: true, SpecType: PeriodicSpecCron, Spec: "0 0 29 2 * 1980", TimeZone: zone}
  4220  		p.Canonicalize()
  4221  		if err := p.Validate(); err != nil {
  4222  			t.Fatalf("Valid tz errored: %v", err)
  4223  		}
  4224  	}
  4225  }
  4226  
  4227  func TestPeriodicConfig_DST(t *testing.T) {
  4228  	ci.Parallel(t)
  4229  
  4230  	require := require.New(t)
  4231  
  4232  	// On Sun, Mar 12, 2:00 am 2017: +1 hour UTC
  4233  	p := &PeriodicConfig{
  4234  		Enabled:  true,
  4235  		SpecType: PeriodicSpecCron,
  4236  		Spec:     "0 2 11-13 3 * 2017",
  4237  		TimeZone: "America/Los_Angeles",
  4238  	}
  4239  	p.Canonicalize()
  4240  
  4241  	t1 := time.Date(2017, time.March, 11, 1, 0, 0, 0, p.location)
  4242  	t2 := time.Date(2017, time.March, 12, 1, 0, 0, 0, p.location)
  4243  
  4244  	// E1 is an 8 hour adjustment, E2 is a 7 hour adjustment
  4245  	e1 := time.Date(2017, time.March, 11, 10, 0, 0, 0, time.UTC)
  4246  	e2 := time.Date(2017, time.March, 13, 9, 0, 0, 0, time.UTC)
  4247  
  4248  	n1, err := p.Next(t1)
  4249  	require.Nil(err)
  4250  
  4251  	n2, err := p.Next(t2)
  4252  	require.Nil(err)
  4253  
  4254  	require.Equal(e1, n1.UTC())
  4255  	require.Equal(e2, n2.UTC())
  4256  }
  4257  
  4258  func TestTaskLifecycleConfig_Validate(t *testing.T) {
  4259  	ci.Parallel(t)
  4260  
  4261  	testCases := []struct {
  4262  		name string
  4263  		tlc  *TaskLifecycleConfig
  4264  		err  error
  4265  	}{
  4266  		{
  4267  			name: "prestart completed",
  4268  			tlc: &TaskLifecycleConfig{
  4269  				Hook:    "prestart",
  4270  				Sidecar: false,
  4271  			},
  4272  			err: nil,
  4273  		},
  4274  		{
  4275  			name: "prestart running",
  4276  			tlc: &TaskLifecycleConfig{
  4277  				Hook:    "prestart",
  4278  				Sidecar: true,
  4279  			},
  4280  			err: nil,
  4281  		},
  4282  		{
  4283  			name: "no hook",
  4284  			tlc: &TaskLifecycleConfig{
  4285  				Sidecar: true,
  4286  			},
  4287  			err: fmt.Errorf("no lifecycle hook provided"),
  4288  		},
  4289  	}
  4290  
  4291  	for _, tc := range testCases {
  4292  		t.Run(tc.name, func(t *testing.T) {
  4293  			err := tc.tlc.Validate()
  4294  			if tc.err != nil {
  4295  				require.Error(t, err)
  4296  				require.Contains(t, err.Error(), tc.err.Error())
  4297  			} else {
  4298  				require.Nil(t, err)
  4299  			}
  4300  		})
  4301  
  4302  	}
  4303  }
  4304  
  4305  func TestRestartPolicy_Validate(t *testing.T) {
  4306  	ci.Parallel(t)
  4307  
  4308  	// Policy with acceptable restart options passes
  4309  	p := &RestartPolicy{
  4310  		Mode:     RestartPolicyModeFail,
  4311  		Attempts: 0,
  4312  		Interval: 5 * time.Second,
  4313  	}
  4314  	if err := p.Validate(); err != nil {
  4315  		t.Fatalf("err: %v", err)
  4316  	}
  4317  
  4318  	// Policy with ambiguous restart options fails
  4319  	p = &RestartPolicy{
  4320  		Mode:     RestartPolicyModeDelay,
  4321  		Attempts: 0,
  4322  		Interval: 5 * time.Second,
  4323  	}
  4324  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "ambiguous") {
  4325  		t.Fatalf("expect ambiguity error, got: %v", err)
  4326  	}
  4327  
  4328  	// Bad policy mode fails
  4329  	p = &RestartPolicy{
  4330  		Mode:     "nope",
  4331  		Attempts: 1,
  4332  		Interval: 5 * time.Second,
  4333  	}
  4334  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "mode") {
  4335  		t.Fatalf("expect mode error, got: %v", err)
  4336  	}
  4337  
  4338  	// Fails when attempts*delay does not fit inside interval
  4339  	p = &RestartPolicy{
  4340  		Mode:     RestartPolicyModeDelay,
  4341  		Attempts: 3,
  4342  		Delay:    5 * time.Second,
  4343  		Interval: 5 * time.Second,
  4344  	}
  4345  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "can't restart") {
  4346  		t.Fatalf("expect restart interval error, got: %v", err)
  4347  	}
  4348  
  4349  	// Fails when interval is to small
  4350  	p = &RestartPolicy{
  4351  		Mode:     RestartPolicyModeDelay,
  4352  		Attempts: 3,
  4353  		Delay:    5 * time.Second,
  4354  		Interval: 2 * time.Second,
  4355  	}
  4356  	if err := p.Validate(); err == nil || !strings.Contains(err.Error(), "Interval can not be less than") {
  4357  		t.Fatalf("expect interval too small error, got: %v", err)
  4358  	}
  4359  }
  4360  
  4361  func TestReschedulePolicy_Validate(t *testing.T) {
  4362  	ci.Parallel(t)
  4363  	type testCase struct {
  4364  		desc             string
  4365  		ReschedulePolicy *ReschedulePolicy
  4366  		errors           []error
  4367  	}
  4368  	testCases := []testCase{
  4369  		{
  4370  			desc: "Nil",
  4371  		},
  4372  		{
  4373  			desc: "Disabled",
  4374  			ReschedulePolicy: &ReschedulePolicy{
  4375  				Attempts: 0,
  4376  				Interval: 0 * time.Second},
  4377  		},
  4378  		{
  4379  			desc: "Disabled",
  4380  			ReschedulePolicy: &ReschedulePolicy{
  4381  				Attempts: -1,
  4382  				Interval: 5 * time.Minute},
  4383  		},
  4384  		{
  4385  			desc: "Valid Linear Delay",
  4386  			ReschedulePolicy: &ReschedulePolicy{
  4387  				Attempts:      1,
  4388  				Interval:      5 * time.Minute,
  4389  				Delay:         10 * time.Second,
  4390  				DelayFunction: "constant"},
  4391  		},
  4392  		{
  4393  			desc: "Valid Exponential Delay",
  4394  			ReschedulePolicy: &ReschedulePolicy{
  4395  				Attempts:      5,
  4396  				Interval:      1 * time.Hour,
  4397  				Delay:         30 * time.Second,
  4398  				MaxDelay:      5 * time.Minute,
  4399  				DelayFunction: "exponential"},
  4400  		},
  4401  		{
  4402  			desc: "Valid Fibonacci Delay",
  4403  			ReschedulePolicy: &ReschedulePolicy{
  4404  				Attempts:      5,
  4405  				Interval:      15 * time.Minute,
  4406  				Delay:         10 * time.Second,
  4407  				MaxDelay:      5 * time.Minute,
  4408  				DelayFunction: "fibonacci"},
  4409  		},
  4410  		{
  4411  			desc: "Invalid delay function",
  4412  			ReschedulePolicy: &ReschedulePolicy{
  4413  				Attempts:      1,
  4414  				Interval:      1 * time.Second,
  4415  				DelayFunction: "blah"},
  4416  			errors: []error{
  4417  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  4418  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4419  				fmt.Errorf("Invalid delay function %q, must be one of %q", "blah", RescheduleDelayFunctions),
  4420  			},
  4421  		},
  4422  		{
  4423  			desc: "Invalid delay ceiling",
  4424  			ReschedulePolicy: &ReschedulePolicy{
  4425  				Attempts:      1,
  4426  				Interval:      8 * time.Second,
  4427  				DelayFunction: "exponential",
  4428  				Delay:         15 * time.Second,
  4429  				MaxDelay:      5 * time.Second},
  4430  			errors: []error{
  4431  				fmt.Errorf("Max Delay cannot be less than Delay %v (got %v)",
  4432  					15*time.Second, 5*time.Second),
  4433  			},
  4434  		},
  4435  		{
  4436  			desc: "Invalid delay and interval",
  4437  			ReschedulePolicy: &ReschedulePolicy{
  4438  				Attempts:      1,
  4439  				Interval:      1 * time.Second,
  4440  				DelayFunction: "constant"},
  4441  			errors: []error{
  4442  				fmt.Errorf("Interval cannot be less than %v (got %v)", ReschedulePolicyMinInterval, time.Second),
  4443  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4444  			},
  4445  		}, {
  4446  			// Should suggest 2h40m as the interval
  4447  			desc: "Invalid Attempts - linear delay",
  4448  			ReschedulePolicy: &ReschedulePolicy{
  4449  				Attempts:      10,
  4450  				Interval:      1 * time.Hour,
  4451  				Delay:         20 * time.Minute,
  4452  				DelayFunction: "constant",
  4453  			},
  4454  			errors: []error{
  4455  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v and"+
  4456  					" delay function %q", 3, time.Hour, 20*time.Minute, "constant"),
  4457  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  4458  					200*time.Minute, 10),
  4459  			},
  4460  		},
  4461  		{
  4462  			// Should suggest 4h40m as the interval
  4463  			// Delay progression in minutes {5, 10, 20, 40, 40, 40, 40, 40, 40, 40}
  4464  			desc: "Invalid Attempts - exponential delay",
  4465  			ReschedulePolicy: &ReschedulePolicy{
  4466  				Attempts:      10,
  4467  				Interval:      30 * time.Minute,
  4468  				Delay:         5 * time.Minute,
  4469  				MaxDelay:      40 * time.Minute,
  4470  				DelayFunction: "exponential",
  4471  			},
  4472  			errors: []error{
  4473  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  4474  					"delay function %q, and delay ceiling %v", 3, 30*time.Minute, 5*time.Minute,
  4475  					"exponential", 40*time.Minute),
  4476  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  4477  					280*time.Minute, 10),
  4478  			},
  4479  		},
  4480  		{
  4481  			// Should suggest 8h as the interval
  4482  			// Delay progression in minutes {20, 20, 40, 60, 80, 80, 80, 80, 80, 80}
  4483  			desc: "Invalid Attempts - fibonacci delay",
  4484  			ReschedulePolicy: &ReschedulePolicy{
  4485  				Attempts:      10,
  4486  				Interval:      1 * time.Hour,
  4487  				Delay:         20 * time.Minute,
  4488  				MaxDelay:      80 * time.Minute,
  4489  				DelayFunction: "fibonacci",
  4490  			},
  4491  			errors: []error{
  4492  				fmt.Errorf("Nomad can only make %v attempts in %v with initial delay %v, "+
  4493  					"delay function %q, and delay ceiling %v", 4, 1*time.Hour, 20*time.Minute,
  4494  					"fibonacci", 80*time.Minute),
  4495  				fmt.Errorf("Set the interval to at least %v to accommodate %v attempts",
  4496  					480*time.Minute, 10),
  4497  			},
  4498  		},
  4499  		{
  4500  			desc: "Ambiguous Unlimited config, has both attempts and unlimited set",
  4501  			ReschedulePolicy: &ReschedulePolicy{
  4502  				Attempts:      1,
  4503  				Unlimited:     true,
  4504  				DelayFunction: "exponential",
  4505  				Delay:         5 * time.Minute,
  4506  				MaxDelay:      1 * time.Hour,
  4507  			},
  4508  			errors: []error{
  4509  				fmt.Errorf("Interval must be a non zero value if Attempts > 0"),
  4510  				fmt.Errorf("Reschedule Policy with Attempts = %v, Interval = %v, and Unlimited = %v is ambiguous", 1, time.Duration(0), true),
  4511  			},
  4512  		},
  4513  		{
  4514  			desc: "Invalid Unlimited config",
  4515  			ReschedulePolicy: &ReschedulePolicy{
  4516  				Attempts:      1,
  4517  				Interval:      1 * time.Second,
  4518  				Unlimited:     true,
  4519  				DelayFunction: "exponential",
  4520  			},
  4521  			errors: []error{
  4522  				fmt.Errorf("Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4523  				fmt.Errorf("Max Delay cannot be less than %v (got %v)", ReschedulePolicyMinDelay, 0*time.Second),
  4524  			},
  4525  		},
  4526  		{
  4527  			desc: "Valid Unlimited config",
  4528  			ReschedulePolicy: &ReschedulePolicy{
  4529  				Unlimited:     true,
  4530  				DelayFunction: "exponential",
  4531  				Delay:         5 * time.Second,
  4532  				MaxDelay:      1 * time.Hour,
  4533  			},
  4534  		},
  4535  	}
  4536  
  4537  	for _, tc := range testCases {
  4538  		t.Run(tc.desc, func(t *testing.T) {
  4539  			require := require.New(t)
  4540  			gotErr := tc.ReschedulePolicy.Validate()
  4541  			if tc.errors != nil {
  4542  				// Validate all errors
  4543  				for _, err := range tc.errors {
  4544  					require.Contains(gotErr.Error(), err.Error())
  4545  				}
  4546  			} else {
  4547  				require.Nil(gotErr)
  4548  			}
  4549  		})
  4550  	}
  4551  }
  4552  
  4553  func TestAllocation_Index(t *testing.T) {
  4554  	ci.Parallel(t)
  4555  
  4556  	a1 := Allocation{
  4557  		Name:      "example.cache[1]",
  4558  		TaskGroup: "cache",
  4559  		JobID:     "example",
  4560  		Job: &Job{
  4561  			ID:         "example",
  4562  			TaskGroups: []*TaskGroup{{Name: "cache"}}},
  4563  	}
  4564  	e1 := uint(1)
  4565  	a2 := a1.Copy()
  4566  	a2.Name = "example.cache[713127]"
  4567  	e2 := uint(713127)
  4568  
  4569  	if a1.Index() != e1 || a2.Index() != e2 {
  4570  		t.Fatalf("Got %d and %d", a1.Index(), a2.Index())
  4571  	}
  4572  }
  4573  
  4574  func TestTaskArtifact_Validate_Source(t *testing.T) {
  4575  	ci.Parallel(t)
  4576  
  4577  	valid := &TaskArtifact{GetterSource: "google.com"}
  4578  	if err := valid.Validate(); err != nil {
  4579  		t.Fatalf("unexpected error: %v", err)
  4580  	}
  4581  }
  4582  
  4583  func TestTaskArtifact_Validate_Dest(t *testing.T) {
  4584  	ci.Parallel(t)
  4585  
  4586  	valid := &TaskArtifact{GetterSource: "google.com"}
  4587  	if err := valid.Validate(); err != nil {
  4588  		t.Fatalf("unexpected error: %v", err)
  4589  	}
  4590  
  4591  	valid.RelativeDest = "local/"
  4592  	if err := valid.Validate(); err != nil {
  4593  		t.Fatalf("unexpected error: %v", err)
  4594  	}
  4595  
  4596  	valid.RelativeDest = "local/.."
  4597  	if err := valid.Validate(); err != nil {
  4598  		t.Fatalf("unexpected error: %v", err)
  4599  	}
  4600  
  4601  	valid.RelativeDest = "local/../../.."
  4602  	if err := valid.Validate(); err == nil {
  4603  		t.Fatalf("expected error: %v", err)
  4604  	}
  4605  }
  4606  
  4607  // TestTaskArtifact_Hash asserts an artifact's hash changes when any of the
  4608  // fields change.
  4609  func TestTaskArtifact_Hash(t *testing.T) {
  4610  	ci.Parallel(t)
  4611  
  4612  	cases := []TaskArtifact{
  4613  		{},
  4614  		{
  4615  			GetterSource: "a",
  4616  		},
  4617  		{
  4618  			GetterSource: "b",
  4619  		},
  4620  		{
  4621  			GetterSource:  "b",
  4622  			GetterOptions: map[string]string{"c": "c"},
  4623  		},
  4624  		{
  4625  			GetterSource: "b",
  4626  			GetterOptions: map[string]string{
  4627  				"c": "c",
  4628  				"d": "d",
  4629  			},
  4630  		},
  4631  		{
  4632  			GetterSource: "b",
  4633  			GetterOptions: map[string]string{
  4634  				"c": "c",
  4635  				"d": "e",
  4636  			},
  4637  		},
  4638  		{
  4639  			GetterSource: "b",
  4640  			GetterOptions: map[string]string{
  4641  				"c": "c",
  4642  				"d": "e",
  4643  			},
  4644  			GetterMode: "f",
  4645  		},
  4646  		{
  4647  			GetterSource: "b",
  4648  			GetterOptions: map[string]string{
  4649  				"c": "c",
  4650  				"d": "e",
  4651  			},
  4652  			GetterMode: "g",
  4653  		},
  4654  		{
  4655  			GetterSource: "b",
  4656  			GetterOptions: map[string]string{
  4657  				"c": "c",
  4658  				"d": "e",
  4659  			},
  4660  			GetterMode:   "g",
  4661  			RelativeDest: "h",
  4662  		},
  4663  		{
  4664  			GetterSource: "b",
  4665  			GetterOptions: map[string]string{
  4666  				"c": "c",
  4667  				"d": "e",
  4668  			},
  4669  			GetterMode:   "g",
  4670  			RelativeDest: "i",
  4671  		},
  4672  	}
  4673  
  4674  	// Map of hash to source
  4675  	hashes := make(map[string]TaskArtifact, len(cases))
  4676  	for _, tc := range cases {
  4677  		h := tc.Hash()
  4678  
  4679  		// Hash should be deterministic
  4680  		require.Equal(t, h, tc.Hash())
  4681  
  4682  		// Hash should be unique
  4683  		if orig, ok := hashes[h]; ok {
  4684  			require.Failf(t, "hashes match", "artifact 1: %s\n\n artifact 2: %s\n",
  4685  				pretty.Sprint(tc), pretty.Sprint(orig),
  4686  			)
  4687  		}
  4688  		hashes[h] = tc
  4689  	}
  4690  
  4691  	require.Len(t, hashes, len(cases))
  4692  }
  4693  
  4694  func TestAllocation_ShouldMigrate(t *testing.T) {
  4695  	ci.Parallel(t)
  4696  
  4697  	testCases := []struct {
  4698  		name   string
  4699  		expect bool
  4700  		alloc  Allocation
  4701  	}{
  4702  		{
  4703  			name:   "should migrate with previous alloc and migrate=true sticky=true",
  4704  			expect: true,
  4705  			alloc: Allocation{
  4706  				PreviousAllocation: "123",
  4707  				TaskGroup:          "foo",
  4708  				Job: &Job{
  4709  					TaskGroups: []*TaskGroup{
  4710  						{
  4711  							Name: "foo",
  4712  							EphemeralDisk: &EphemeralDisk{
  4713  								Migrate: true,
  4714  								Sticky:  true,
  4715  							},
  4716  						},
  4717  					},
  4718  				},
  4719  			},
  4720  		},
  4721  		{
  4722  			name:   "should not migrate with migrate=false sticky=false",
  4723  			expect: false,
  4724  			alloc: Allocation{
  4725  				PreviousAllocation: "123",
  4726  				TaskGroup:          "foo",
  4727  				Job: &Job{
  4728  					TaskGroups: []*TaskGroup{
  4729  						{
  4730  							Name:          "foo",
  4731  							EphemeralDisk: &EphemeralDisk{},
  4732  						},
  4733  					},
  4734  				},
  4735  			},
  4736  		},
  4737  		{
  4738  			name:   "should migrate with migrate=true sticky=false",
  4739  			expect: true,
  4740  			alloc: Allocation{
  4741  				PreviousAllocation: "123",
  4742  				TaskGroup:          "foo",
  4743  				Job: &Job{
  4744  					TaskGroups: []*TaskGroup{
  4745  						{
  4746  							Name: "foo",
  4747  							EphemeralDisk: &EphemeralDisk{
  4748  								Sticky:  false,
  4749  								Migrate: true,
  4750  							},
  4751  						},
  4752  					},
  4753  				},
  4754  			},
  4755  		},
  4756  		{
  4757  			name:   "should not migrate with nil ephemeral disk",
  4758  			expect: false,
  4759  			alloc: Allocation{
  4760  				PreviousAllocation: "123",
  4761  				TaskGroup:          "foo",
  4762  				Job: &Job{
  4763  					TaskGroups: []*TaskGroup{{Name: "foo"}},
  4764  				},
  4765  			},
  4766  		},
  4767  		{
  4768  			name:   "should not migrate without previous alloc",
  4769  			expect: false,
  4770  			alloc: Allocation{
  4771  				TaskGroup: "foo",
  4772  				Job: &Job{
  4773  					TaskGroups: []*TaskGroup{
  4774  						{
  4775  							Name: "foo",
  4776  							EphemeralDisk: &EphemeralDisk{
  4777  								Migrate: true,
  4778  								Sticky:  true,
  4779  							},
  4780  						},
  4781  					},
  4782  				},
  4783  			},
  4784  		},
  4785  	}
  4786  
  4787  	for _, tc := range testCases {
  4788  		t.Run(tc.name, func(t *testing.T) {
  4789  			must.Eq(t, tc.expect, tc.alloc.ShouldMigrate())
  4790  		})
  4791  	}
  4792  }
  4793  
  4794  func TestTaskArtifact_Validate_Checksum(t *testing.T) {
  4795  	ci.Parallel(t)
  4796  
  4797  	cases := []struct {
  4798  		Input *TaskArtifact
  4799  		Err   bool
  4800  	}{
  4801  		{
  4802  			&TaskArtifact{
  4803  				GetterSource: "foo.com",
  4804  				GetterOptions: map[string]string{
  4805  					"checksum": "no-type",
  4806  				},
  4807  			},
  4808  			true,
  4809  		},
  4810  		{
  4811  			&TaskArtifact{
  4812  				GetterSource: "foo.com",
  4813  				GetterOptions: map[string]string{
  4814  					"checksum": "md5:toosmall",
  4815  				},
  4816  			},
  4817  			true,
  4818  		},
  4819  		{
  4820  			&TaskArtifact{
  4821  				GetterSource: "foo.com",
  4822  				GetterOptions: map[string]string{
  4823  					"checksum": "invalid:type",
  4824  				},
  4825  			},
  4826  			true,
  4827  		},
  4828  		{
  4829  			&TaskArtifact{
  4830  				GetterSource: "foo.com",
  4831  				GetterOptions: map[string]string{
  4832  					"checksum": "md5:${ARTIFACT_CHECKSUM}",
  4833  				},
  4834  			},
  4835  			false,
  4836  		},
  4837  	}
  4838  
  4839  	for i, tc := range cases {
  4840  		err := tc.Input.Validate()
  4841  		if (err != nil) != tc.Err {
  4842  			t.Fatalf("case %d: %v", i, err)
  4843  		}
  4844  	}
  4845  }
  4846  
  4847  func TestPlan_NormalizeAllocations(t *testing.T) {
  4848  	ci.Parallel(t)
  4849  	plan := &Plan{
  4850  		NodeUpdate:      make(map[string][]*Allocation),
  4851  		NodePreemptions: make(map[string][]*Allocation),
  4852  	}
  4853  	stoppedAlloc := MockAlloc()
  4854  	desiredDesc := "Desired desc"
  4855  	plan.AppendStoppedAlloc(stoppedAlloc, desiredDesc, AllocClientStatusLost, "followup-eval-id")
  4856  	preemptedAlloc := MockAlloc()
  4857  	preemptingAllocID := uuid.Generate()
  4858  	plan.AppendPreemptedAlloc(preemptedAlloc, preemptingAllocID)
  4859  
  4860  	plan.NormalizeAllocations()
  4861  
  4862  	actualStoppedAlloc := plan.NodeUpdate[stoppedAlloc.NodeID][0]
  4863  	expectedStoppedAlloc := &Allocation{
  4864  		ID:                 stoppedAlloc.ID,
  4865  		DesiredDescription: desiredDesc,
  4866  		ClientStatus:       AllocClientStatusLost,
  4867  		FollowupEvalID:     "followup-eval-id",
  4868  	}
  4869  	assert.Equal(t, expectedStoppedAlloc, actualStoppedAlloc)
  4870  	actualPreemptedAlloc := plan.NodePreemptions[preemptedAlloc.NodeID][0]
  4871  	expectedPreemptedAlloc := &Allocation{
  4872  		ID:                    preemptedAlloc.ID,
  4873  		PreemptedByAllocation: preemptingAllocID,
  4874  	}
  4875  	assert.Equal(t, expectedPreemptedAlloc, actualPreemptedAlloc)
  4876  }
  4877  
  4878  func TestPlan_AppendStoppedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) {
  4879  	ci.Parallel(t)
  4880  	plan := &Plan{
  4881  		NodeUpdate: make(map[string][]*Allocation),
  4882  	}
  4883  	alloc := MockAlloc()
  4884  	desiredDesc := "Desired desc"
  4885  
  4886  	plan.AppendStoppedAlloc(alloc, desiredDesc, AllocClientStatusLost, "")
  4887  
  4888  	expectedAlloc := new(Allocation)
  4889  	*expectedAlloc = *alloc
  4890  	expectedAlloc.DesiredDescription = desiredDesc
  4891  	expectedAlloc.DesiredStatus = AllocDesiredStatusStop
  4892  	expectedAlloc.ClientStatus = AllocClientStatusLost
  4893  	expectedAlloc.Job = nil
  4894  	expectedAlloc.AllocStates = []*AllocState{{
  4895  		Field: AllocStateFieldClientStatus,
  4896  		Value: "lost",
  4897  	}}
  4898  
  4899  	// This value is set to time.Now() in AppendStoppedAlloc, so clear it
  4900  	appendedAlloc := plan.NodeUpdate[alloc.NodeID][0]
  4901  	appendedAlloc.AllocStates[0].Time = time.Time{}
  4902  
  4903  	assert.Equal(t, expectedAlloc, appendedAlloc)
  4904  	assert.Equal(t, alloc.Job, plan.Job)
  4905  }
  4906  
  4907  func TestPlan_AppendPreemptedAllocAppendsAllocWithUpdatedAttrs(t *testing.T) {
  4908  	ci.Parallel(t)
  4909  	plan := &Plan{
  4910  		NodePreemptions: make(map[string][]*Allocation),
  4911  	}
  4912  	alloc := MockAlloc()
  4913  	preemptingAllocID := uuid.Generate()
  4914  
  4915  	plan.AppendPreemptedAlloc(alloc, preemptingAllocID)
  4916  
  4917  	appendedAlloc := plan.NodePreemptions[alloc.NodeID][0]
  4918  	expectedAlloc := &Allocation{
  4919  		ID:                    alloc.ID,
  4920  		PreemptedByAllocation: preemptingAllocID,
  4921  		JobID:                 alloc.JobID,
  4922  		Namespace:             alloc.Namespace,
  4923  		DesiredStatus:         AllocDesiredStatusEvict,
  4924  		DesiredDescription:    fmt.Sprintf("Preempted by alloc ID %v", preemptingAllocID),
  4925  		AllocatedResources:    alloc.AllocatedResources,
  4926  		TaskResources:         alloc.TaskResources,
  4927  		SharedResources:       alloc.SharedResources,
  4928  	}
  4929  	assert.Equal(t, expectedAlloc, appendedAlloc)
  4930  }
  4931  
  4932  func TestAllocation_MsgPackTags(t *testing.T) {
  4933  	ci.Parallel(t)
  4934  	planType := reflect.TypeOf(Allocation{})
  4935  
  4936  	msgPackTags, _ := planType.FieldByName("_struct")
  4937  
  4938  	assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`))
  4939  }
  4940  
  4941  func TestEvaluation_MsgPackTags(t *testing.T) {
  4942  	ci.Parallel(t)
  4943  	planType := reflect.TypeOf(Evaluation{})
  4944  
  4945  	msgPackTags, _ := planType.FieldByName("_struct")
  4946  
  4947  	assert.Equal(t, msgPackTags.Tag, reflect.StructTag(`codec:",omitempty"`))
  4948  }
  4949  
  4950  func TestAllocation_Terminated(t *testing.T) {
  4951  	ci.Parallel(t)
  4952  	type desiredState struct {
  4953  		ClientStatus  string
  4954  		DesiredStatus string
  4955  		Terminated    bool
  4956  	}
  4957  	harness := []desiredState{
  4958  		{
  4959  			ClientStatus:  AllocClientStatusPending,
  4960  			DesiredStatus: AllocDesiredStatusStop,
  4961  			Terminated:    false,
  4962  		},
  4963  		{
  4964  			ClientStatus:  AllocClientStatusRunning,
  4965  			DesiredStatus: AllocDesiredStatusStop,
  4966  			Terminated:    false,
  4967  		},
  4968  		{
  4969  			ClientStatus:  AllocClientStatusFailed,
  4970  			DesiredStatus: AllocDesiredStatusStop,
  4971  			Terminated:    true,
  4972  		},
  4973  		{
  4974  			ClientStatus:  AllocClientStatusFailed,
  4975  			DesiredStatus: AllocDesiredStatusRun,
  4976  			Terminated:    true,
  4977  		},
  4978  	}
  4979  
  4980  	for _, state := range harness {
  4981  		alloc := Allocation{}
  4982  		alloc.DesiredStatus = state.DesiredStatus
  4983  		alloc.ClientStatus = state.ClientStatus
  4984  		if alloc.Terminated() != state.Terminated {
  4985  			t.Fatalf("expected: %v, actual: %v", state.Terminated, alloc.Terminated())
  4986  		}
  4987  	}
  4988  }
  4989  
  4990  func TestAllocation_ShouldReschedule(t *testing.T) {
  4991  	ci.Parallel(t)
  4992  	type testCase struct {
  4993  		Desc               string
  4994  		FailTime           time.Time
  4995  		ClientStatus       string
  4996  		DesiredStatus      string
  4997  		ReschedulePolicy   *ReschedulePolicy
  4998  		RescheduleTrackers []*RescheduleEvent
  4999  		ShouldReschedule   bool
  5000  	}
  5001  	fail := time.Now()
  5002  
  5003  	harness := []testCase{
  5004  		{
  5005  			Desc:             "Reschedule when desired state is stop",
  5006  			ClientStatus:     AllocClientStatusPending,
  5007  			DesiredStatus:    AllocDesiredStatusStop,
  5008  			FailTime:         fail,
  5009  			ReschedulePolicy: nil,
  5010  			ShouldReschedule: false,
  5011  		},
  5012  		{
  5013  			Desc:             "Disabled rescheduling",
  5014  			ClientStatus:     AllocClientStatusFailed,
  5015  			DesiredStatus:    AllocDesiredStatusRun,
  5016  			FailTime:         fail,
  5017  			ReschedulePolicy: &ReschedulePolicy{Attempts: 0, Interval: 1 * time.Minute},
  5018  			ShouldReschedule: false,
  5019  		},
  5020  		{
  5021  			Desc:             "Reschedule when client status is complete",
  5022  			ClientStatus:     AllocClientStatusComplete,
  5023  			DesiredStatus:    AllocDesiredStatusRun,
  5024  			FailTime:         fail,
  5025  			ReschedulePolicy: nil,
  5026  			ShouldReschedule: false,
  5027  		},
  5028  		{
  5029  			Desc:             "Reschedule with nil reschedule policy",
  5030  			ClientStatus:     AllocClientStatusFailed,
  5031  			DesiredStatus:    AllocDesiredStatusRun,
  5032  			FailTime:         fail,
  5033  			ReschedulePolicy: nil,
  5034  			ShouldReschedule: false,
  5035  		},
  5036  		{
  5037  			Desc:             "Reschedule with unlimited and attempts >0",
  5038  			ClientStatus:     AllocClientStatusFailed,
  5039  			DesiredStatus:    AllocDesiredStatusRun,
  5040  			FailTime:         fail,
  5041  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Unlimited: true},
  5042  			ShouldReschedule: true,
  5043  		},
  5044  		{
  5045  			Desc:             "Reschedule when client status is complete",
  5046  			ClientStatus:     AllocClientStatusComplete,
  5047  			DesiredStatus:    AllocDesiredStatusRun,
  5048  			FailTime:         fail,
  5049  			ReschedulePolicy: nil,
  5050  			ShouldReschedule: false,
  5051  		},
  5052  		{
  5053  			Desc:             "Reschedule with policy when client status complete",
  5054  			ClientStatus:     AllocClientStatusComplete,
  5055  			DesiredStatus:    AllocDesiredStatusRun,
  5056  			FailTime:         fail,
  5057  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  5058  			ShouldReschedule: false,
  5059  		},
  5060  		{
  5061  			Desc:             "Reschedule with no previous attempts",
  5062  			ClientStatus:     AllocClientStatusFailed,
  5063  			DesiredStatus:    AllocDesiredStatusRun,
  5064  			FailTime:         fail,
  5065  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 1 * time.Minute},
  5066  			ShouldReschedule: true,
  5067  		},
  5068  		{
  5069  			Desc:             "Reschedule with leftover attempts",
  5070  			ClientStatus:     AllocClientStatusFailed,
  5071  			DesiredStatus:    AllocDesiredStatusRun,
  5072  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  5073  			FailTime:         fail,
  5074  			RescheduleTrackers: []*RescheduleEvent{
  5075  				{
  5076  					RescheduleTime: fail.Add(-1 * time.Minute).UTC().UnixNano(),
  5077  				},
  5078  			},
  5079  			ShouldReschedule: true,
  5080  		},
  5081  		{
  5082  			Desc:             "Reschedule with too old previous attempts",
  5083  			ClientStatus:     AllocClientStatusFailed,
  5084  			DesiredStatus:    AllocDesiredStatusRun,
  5085  			FailTime:         fail,
  5086  			ReschedulePolicy: &ReschedulePolicy{Attempts: 1, Interval: 5 * time.Minute},
  5087  			RescheduleTrackers: []*RescheduleEvent{
  5088  				{
  5089  					RescheduleTime: fail.Add(-6 * time.Minute).UTC().UnixNano(),
  5090  				},
  5091  			},
  5092  			ShouldReschedule: true,
  5093  		},
  5094  		{
  5095  			Desc:             "Reschedule with no leftover attempts",
  5096  			ClientStatus:     AllocClientStatusFailed,
  5097  			DesiredStatus:    AllocDesiredStatusRun,
  5098  			FailTime:         fail,
  5099  			ReschedulePolicy: &ReschedulePolicy{Attempts: 2, Interval: 5 * time.Minute},
  5100  			RescheduleTrackers: []*RescheduleEvent{
  5101  				{
  5102  					RescheduleTime: fail.Add(-3 * time.Minute).UTC().UnixNano(),
  5103  				},
  5104  				{
  5105  					RescheduleTime: fail.Add(-4 * time.Minute).UTC().UnixNano(),
  5106  				},
  5107  			},
  5108  			ShouldReschedule: false,
  5109  		},
  5110  	}
  5111  
  5112  	for _, state := range harness {
  5113  		alloc := Allocation{}
  5114  		alloc.DesiredStatus = state.DesiredStatus
  5115  		alloc.ClientStatus = state.ClientStatus
  5116  		alloc.RescheduleTracker = &RescheduleTracker{state.RescheduleTrackers}
  5117  
  5118  		t.Run(state.Desc, func(t *testing.T) {
  5119  			if got := alloc.ShouldReschedule(state.ReschedulePolicy, state.FailTime); got != state.ShouldReschedule {
  5120  				t.Fatalf("expected %v but got %v", state.ShouldReschedule, got)
  5121  			}
  5122  		})
  5123  
  5124  	}
  5125  }
  5126  
  5127  func TestAllocation_LastEventTime(t *testing.T) {
  5128  	ci.Parallel(t)
  5129  	type testCase struct {
  5130  		desc                  string
  5131  		taskState             map[string]*TaskState
  5132  		expectedLastEventTime time.Time
  5133  	}
  5134  	t1 := time.Now().UTC()
  5135  
  5136  	testCases := []testCase{
  5137  		{
  5138  			desc:                  "nil task state",
  5139  			expectedLastEventTime: t1,
  5140  		},
  5141  		{
  5142  			desc:                  "empty task state",
  5143  			taskState:             make(map[string]*TaskState),
  5144  			expectedLastEventTime: t1,
  5145  		},
  5146  		{
  5147  			desc: "Finished At not set",
  5148  			taskState: map[string]*TaskState{"foo": {State: "start",
  5149  				StartedAt: t1.Add(-2 * time.Hour)}},
  5150  			expectedLastEventTime: t1,
  5151  		},
  5152  		{
  5153  			desc: "One finished ",
  5154  			taskState: map[string]*TaskState{"foo": {State: "start",
  5155  				StartedAt:  t1.Add(-2 * time.Hour),
  5156  				FinishedAt: t1.Add(-1 * time.Hour)}},
  5157  			expectedLastEventTime: t1.Add(-1 * time.Hour),
  5158  		},
  5159  		{
  5160  			desc: "Multiple task groups",
  5161  			taskState: map[string]*TaskState{"foo": {State: "start",
  5162  				StartedAt:  t1.Add(-2 * time.Hour),
  5163  				FinishedAt: t1.Add(-1 * time.Hour)},
  5164  				"bar": {State: "start",
  5165  					StartedAt:  t1.Add(-2 * time.Hour),
  5166  					FinishedAt: t1.Add(-40 * time.Minute)}},
  5167  			expectedLastEventTime: t1.Add(-40 * time.Minute),
  5168  		},
  5169  		{
  5170  			desc: "No finishedAt set, one task event, should use modify time",
  5171  			taskState: map[string]*TaskState{"foo": {
  5172  				State:     "run",
  5173  				StartedAt: t1.Add(-2 * time.Hour),
  5174  				Events: []*TaskEvent{
  5175  					{Type: "start", Time: t1.Add(-20 * time.Minute).UnixNano()},
  5176  				}},
  5177  			},
  5178  			expectedLastEventTime: t1,
  5179  		},
  5180  	}
  5181  	for _, tc := range testCases {
  5182  		t.Run(tc.desc, func(t *testing.T) {
  5183  			alloc := &Allocation{CreateTime: t1.UnixNano(), ModifyTime: t1.UnixNano()}
  5184  			alloc.TaskStates = tc.taskState
  5185  			require.Equal(t, tc.expectedLastEventTime, alloc.LastEventTime())
  5186  		})
  5187  	}
  5188  }
  5189  
  5190  func TestAllocation_NextDelay(t *testing.T) {
  5191  	ci.Parallel(t)
  5192  	type testCase struct {
  5193  		desc                       string
  5194  		reschedulePolicy           *ReschedulePolicy
  5195  		alloc                      *Allocation
  5196  		expectedRescheduleTime     time.Time
  5197  		expectedRescheduleEligible bool
  5198  	}
  5199  	now := time.Now()
  5200  	testCases := []testCase{
  5201  		{
  5202  			desc: "Allocation hasn't failed yet",
  5203  			reschedulePolicy: &ReschedulePolicy{
  5204  				DelayFunction: "constant",
  5205  				Delay:         5 * time.Second,
  5206  			},
  5207  			alloc:                      &Allocation{},
  5208  			expectedRescheduleTime:     time.Time{},
  5209  			expectedRescheduleEligible: false,
  5210  		},
  5211  		{
  5212  			desc:                       "Allocation has no reschedule policy",
  5213  			alloc:                      &Allocation{},
  5214  			expectedRescheduleTime:     time.Time{},
  5215  			expectedRescheduleEligible: false,
  5216  		},
  5217  		{
  5218  			desc: "Allocation lacks task state",
  5219  			reschedulePolicy: &ReschedulePolicy{
  5220  				DelayFunction: "constant",
  5221  				Delay:         5 * time.Second,
  5222  				Unlimited:     true,
  5223  			},
  5224  			alloc:                      &Allocation{ClientStatus: AllocClientStatusFailed, ModifyTime: now.UnixNano()},
  5225  			expectedRescheduleTime:     now.UTC().Add(5 * time.Second),
  5226  			expectedRescheduleEligible: true,
  5227  		},
  5228  		{
  5229  			desc: "linear delay, unlimited restarts, no reschedule tracker",
  5230  			reschedulePolicy: &ReschedulePolicy{
  5231  				DelayFunction: "constant",
  5232  				Delay:         5 * time.Second,
  5233  				Unlimited:     true,
  5234  			},
  5235  			alloc: &Allocation{
  5236  				ClientStatus: AllocClientStatusFailed,
  5237  				TaskStates: map[string]*TaskState{"foo": {State: "dead",
  5238  					StartedAt:  now.Add(-1 * time.Hour),
  5239  					FinishedAt: now.Add(-2 * time.Second)}},
  5240  			},
  5241  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  5242  			expectedRescheduleEligible: true,
  5243  		},
  5244  		{
  5245  			desc: "linear delay with reschedule tracker",
  5246  			reschedulePolicy: &ReschedulePolicy{
  5247  				DelayFunction: "constant",
  5248  				Delay:         5 * time.Second,
  5249  				Interval:      10 * time.Minute,
  5250  				Attempts:      2,
  5251  			},
  5252  			alloc: &Allocation{
  5253  				ClientStatus: AllocClientStatusFailed,
  5254  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5255  					StartedAt:  now.Add(-1 * time.Hour),
  5256  					FinishedAt: now.Add(-2 * time.Second)}},
  5257  				RescheduleTracker: &RescheduleTracker{
  5258  					Events: []*RescheduleEvent{{
  5259  						RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  5260  						Delay:          5 * time.Second,
  5261  					}},
  5262  				}},
  5263  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  5264  			expectedRescheduleEligible: true,
  5265  		},
  5266  		{
  5267  			desc: "linear delay with reschedule tracker, attempts exhausted",
  5268  			reschedulePolicy: &ReschedulePolicy{
  5269  				DelayFunction: "constant",
  5270  				Delay:         5 * time.Second,
  5271  				Interval:      10 * time.Minute,
  5272  				Attempts:      2,
  5273  			},
  5274  			alloc: &Allocation{
  5275  				ClientStatus: AllocClientStatusFailed,
  5276  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5277  					StartedAt:  now.Add(-1 * time.Hour),
  5278  					FinishedAt: now.Add(-2 * time.Second)}},
  5279  				RescheduleTracker: &RescheduleTracker{
  5280  					Events: []*RescheduleEvent{
  5281  						{
  5282  							RescheduleTime: now.Add(-3 * time.Minute).UTC().UnixNano(),
  5283  							Delay:          5 * time.Second,
  5284  						},
  5285  						{
  5286  							RescheduleTime: now.Add(-2 * time.Minute).UTC().UnixNano(),
  5287  							Delay:          5 * time.Second,
  5288  						},
  5289  					},
  5290  				}},
  5291  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  5292  			expectedRescheduleEligible: false,
  5293  		},
  5294  		{
  5295  			desc: "exponential delay - no reschedule tracker",
  5296  			reschedulePolicy: &ReschedulePolicy{
  5297  				DelayFunction: "exponential",
  5298  				Delay:         5 * time.Second,
  5299  				MaxDelay:      90 * time.Second,
  5300  				Unlimited:     true,
  5301  			},
  5302  			alloc: &Allocation{
  5303  				ClientStatus: AllocClientStatusFailed,
  5304  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5305  					StartedAt:  now.Add(-1 * time.Hour),
  5306  					FinishedAt: now.Add(-2 * time.Second)}},
  5307  			},
  5308  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  5309  			expectedRescheduleEligible: true,
  5310  		},
  5311  		{
  5312  			desc: "exponential delay with reschedule tracker",
  5313  			reschedulePolicy: &ReschedulePolicy{
  5314  				DelayFunction: "exponential",
  5315  				Delay:         5 * time.Second,
  5316  				MaxDelay:      90 * time.Second,
  5317  				Unlimited:     true,
  5318  			},
  5319  			alloc: &Allocation{
  5320  				ClientStatus: AllocClientStatusFailed,
  5321  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5322  					StartedAt:  now.Add(-1 * time.Hour),
  5323  					FinishedAt: now.Add(-2 * time.Second)}},
  5324  				RescheduleTracker: &RescheduleTracker{
  5325  					Events: []*RescheduleEvent{
  5326  						{
  5327  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5328  							Delay:          5 * time.Second,
  5329  						},
  5330  						{
  5331  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5332  							Delay:          10 * time.Second,
  5333  						},
  5334  						{
  5335  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5336  							Delay:          20 * time.Second,
  5337  						},
  5338  					},
  5339  				}},
  5340  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  5341  			expectedRescheduleEligible: true,
  5342  		},
  5343  		{
  5344  			desc: "exponential delay with delay ceiling reached",
  5345  			reschedulePolicy: &ReschedulePolicy{
  5346  				DelayFunction: "exponential",
  5347  				Delay:         5 * time.Second,
  5348  				MaxDelay:      90 * time.Second,
  5349  				Unlimited:     true,
  5350  			},
  5351  			alloc: &Allocation{
  5352  				ClientStatus: AllocClientStatusFailed,
  5353  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5354  					StartedAt:  now.Add(-1 * time.Hour),
  5355  					FinishedAt: now.Add(-15 * time.Second)}},
  5356  				RescheduleTracker: &RescheduleTracker{
  5357  					Events: []*RescheduleEvent{
  5358  						{
  5359  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5360  							Delay:          5 * time.Second,
  5361  						},
  5362  						{
  5363  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5364  							Delay:          10 * time.Second,
  5365  						},
  5366  						{
  5367  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5368  							Delay:          20 * time.Second,
  5369  						},
  5370  						{
  5371  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5372  							Delay:          40 * time.Second,
  5373  						},
  5374  						{
  5375  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  5376  							Delay:          80 * time.Second,
  5377  						},
  5378  					},
  5379  				}},
  5380  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(90 * time.Second),
  5381  			expectedRescheduleEligible: true,
  5382  		},
  5383  		{
  5384  			// Test case where most recent reschedule ran longer than delay ceiling
  5385  			desc: "exponential delay, delay ceiling reset condition met",
  5386  			reschedulePolicy: &ReschedulePolicy{
  5387  				DelayFunction: "exponential",
  5388  				Delay:         5 * time.Second,
  5389  				MaxDelay:      90 * time.Second,
  5390  				Unlimited:     true,
  5391  			},
  5392  			alloc: &Allocation{
  5393  				ClientStatus: AllocClientStatusFailed,
  5394  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5395  					StartedAt:  now.Add(-1 * time.Hour),
  5396  					FinishedAt: now.Add(-15 * time.Minute)}},
  5397  				RescheduleTracker: &RescheduleTracker{
  5398  					Events: []*RescheduleEvent{
  5399  						{
  5400  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5401  							Delay:          5 * time.Second,
  5402  						},
  5403  						{
  5404  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5405  							Delay:          10 * time.Second,
  5406  						},
  5407  						{
  5408  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5409  							Delay:          20 * time.Second,
  5410  						},
  5411  						{
  5412  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5413  							Delay:          40 * time.Second,
  5414  						},
  5415  						{
  5416  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5417  							Delay:          80 * time.Second,
  5418  						},
  5419  						{
  5420  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5421  							Delay:          90 * time.Second,
  5422  						},
  5423  						{
  5424  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5425  							Delay:          90 * time.Second,
  5426  						},
  5427  					},
  5428  				}},
  5429  			expectedRescheduleTime:     now.Add(-15 * time.Minute).Add(5 * time.Second),
  5430  			expectedRescheduleEligible: true,
  5431  		},
  5432  		{
  5433  			desc: "fibonacci delay - no reschedule tracker",
  5434  			reschedulePolicy: &ReschedulePolicy{
  5435  				DelayFunction: "fibonacci",
  5436  				Delay:         5 * time.Second,
  5437  				MaxDelay:      90 * time.Second,
  5438  				Unlimited:     true,
  5439  			},
  5440  			alloc: &Allocation{
  5441  				ClientStatus: AllocClientStatusFailed,
  5442  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5443  					StartedAt:  now.Add(-1 * time.Hour),
  5444  					FinishedAt: now.Add(-2 * time.Second)}}},
  5445  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(5 * time.Second),
  5446  			expectedRescheduleEligible: true,
  5447  		},
  5448  		{
  5449  			desc: "fibonacci delay with reschedule tracker",
  5450  			reschedulePolicy: &ReschedulePolicy{
  5451  				DelayFunction: "fibonacci",
  5452  				Delay:         5 * time.Second,
  5453  				MaxDelay:      90 * time.Second,
  5454  				Unlimited:     true,
  5455  			},
  5456  			alloc: &Allocation{
  5457  				ClientStatus: AllocClientStatusFailed,
  5458  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5459  					StartedAt:  now.Add(-1 * time.Hour),
  5460  					FinishedAt: now.Add(-2 * time.Second)}},
  5461  				RescheduleTracker: &RescheduleTracker{
  5462  					Events: []*RescheduleEvent{
  5463  						{
  5464  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5465  							Delay:          5 * time.Second,
  5466  						},
  5467  						{
  5468  							RescheduleTime: now.Add(-5 * time.Second).UTC().UnixNano(),
  5469  							Delay:          5 * time.Second,
  5470  						},
  5471  					},
  5472  				}},
  5473  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(10 * time.Second),
  5474  			expectedRescheduleEligible: true,
  5475  		},
  5476  		{
  5477  			desc: "fibonacci delay with more events",
  5478  			reschedulePolicy: &ReschedulePolicy{
  5479  				DelayFunction: "fibonacci",
  5480  				Delay:         5 * time.Second,
  5481  				MaxDelay:      90 * time.Second,
  5482  				Unlimited:     true,
  5483  			},
  5484  			alloc: &Allocation{
  5485  				ClientStatus: AllocClientStatusFailed,
  5486  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5487  					StartedAt:  now.Add(-1 * time.Hour),
  5488  					FinishedAt: now.Add(-2 * time.Second)}},
  5489  				RescheduleTracker: &RescheduleTracker{
  5490  					Events: []*RescheduleEvent{
  5491  						{
  5492  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5493  							Delay:          5 * time.Second,
  5494  						},
  5495  						{
  5496  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5497  							Delay:          5 * time.Second,
  5498  						},
  5499  						{
  5500  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5501  							Delay:          10 * time.Second,
  5502  						},
  5503  						{
  5504  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5505  							Delay:          15 * time.Second,
  5506  						},
  5507  						{
  5508  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5509  							Delay:          25 * time.Second,
  5510  						},
  5511  					},
  5512  				}},
  5513  			expectedRescheduleTime:     now.Add(-2 * time.Second).Add(40 * time.Second),
  5514  			expectedRescheduleEligible: true,
  5515  		},
  5516  		{
  5517  			desc: "fibonacci delay with delay ceiling reached",
  5518  			reschedulePolicy: &ReschedulePolicy{
  5519  				DelayFunction: "fibonacci",
  5520  				Delay:         5 * time.Second,
  5521  				MaxDelay:      50 * time.Second,
  5522  				Unlimited:     true,
  5523  			},
  5524  			alloc: &Allocation{
  5525  				ClientStatus: AllocClientStatusFailed,
  5526  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5527  					StartedAt:  now.Add(-1 * time.Hour),
  5528  					FinishedAt: now.Add(-15 * time.Second)}},
  5529  				RescheduleTracker: &RescheduleTracker{
  5530  					Events: []*RescheduleEvent{
  5531  						{
  5532  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5533  							Delay:          5 * time.Second,
  5534  						},
  5535  						{
  5536  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5537  							Delay:          5 * time.Second,
  5538  						},
  5539  						{
  5540  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5541  							Delay:          10 * time.Second,
  5542  						},
  5543  						{
  5544  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5545  							Delay:          15 * time.Second,
  5546  						},
  5547  						{
  5548  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5549  							Delay:          25 * time.Second,
  5550  						},
  5551  						{
  5552  							RescheduleTime: now.Add(-40 * time.Second).UTC().UnixNano(),
  5553  							Delay:          40 * time.Second,
  5554  						},
  5555  					},
  5556  				}},
  5557  			expectedRescheduleTime:     now.Add(-15 * time.Second).Add(50 * time.Second),
  5558  			expectedRescheduleEligible: true,
  5559  		},
  5560  		{
  5561  			desc: "fibonacci delay with delay reset condition met",
  5562  			reschedulePolicy: &ReschedulePolicy{
  5563  				DelayFunction: "fibonacci",
  5564  				Delay:         5 * time.Second,
  5565  				MaxDelay:      50 * time.Second,
  5566  				Unlimited:     true,
  5567  			},
  5568  			alloc: &Allocation{
  5569  				ClientStatus: AllocClientStatusFailed,
  5570  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5571  					StartedAt:  now.Add(-1 * time.Hour),
  5572  					FinishedAt: now.Add(-5 * time.Minute)}},
  5573  				RescheduleTracker: &RescheduleTracker{
  5574  					Events: []*RescheduleEvent{
  5575  						{
  5576  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5577  							Delay:          5 * time.Second,
  5578  						},
  5579  						{
  5580  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5581  							Delay:          5 * time.Second,
  5582  						},
  5583  						{
  5584  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5585  							Delay:          10 * time.Second,
  5586  						},
  5587  						{
  5588  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5589  							Delay:          15 * time.Second,
  5590  						},
  5591  						{
  5592  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5593  							Delay:          25 * time.Second,
  5594  						},
  5595  						{
  5596  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5597  							Delay:          40 * time.Second,
  5598  						},
  5599  					},
  5600  				}},
  5601  			expectedRescheduleTime:     now.Add(-5 * time.Minute).Add(5 * time.Second),
  5602  			expectedRescheduleEligible: true,
  5603  		},
  5604  		{
  5605  			desc: "fibonacci delay with the most recent event that reset delay value",
  5606  			reschedulePolicy: &ReschedulePolicy{
  5607  				DelayFunction: "fibonacci",
  5608  				Delay:         5 * time.Second,
  5609  				MaxDelay:      50 * time.Second,
  5610  				Unlimited:     true,
  5611  			},
  5612  			alloc: &Allocation{
  5613  				ClientStatus: AllocClientStatusFailed,
  5614  				TaskStates: map[string]*TaskState{"foo": {State: "start",
  5615  					StartedAt:  now.Add(-1 * time.Hour),
  5616  					FinishedAt: now.Add(-5 * time.Second)}},
  5617  				RescheduleTracker: &RescheduleTracker{
  5618  					Events: []*RescheduleEvent{
  5619  						{
  5620  							RescheduleTime: now.Add(-2 * time.Hour).UTC().UnixNano(),
  5621  							Delay:          5 * time.Second,
  5622  						},
  5623  						{
  5624  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5625  							Delay:          5 * time.Second,
  5626  						},
  5627  						{
  5628  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5629  							Delay:          10 * time.Second,
  5630  						},
  5631  						{
  5632  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5633  							Delay:          15 * time.Second,
  5634  						},
  5635  						{
  5636  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5637  							Delay:          25 * time.Second,
  5638  						},
  5639  						{
  5640  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5641  							Delay:          40 * time.Second,
  5642  						},
  5643  						{
  5644  							RescheduleTime: now.Add(-1 * time.Hour).UTC().UnixNano(),
  5645  							Delay:          50 * time.Second,
  5646  						},
  5647  						{
  5648  							RescheduleTime: now.Add(-1 * time.Minute).UTC().UnixNano(),
  5649  							Delay:          5 * time.Second,
  5650  						},
  5651  					},
  5652  				}},
  5653  			expectedRescheduleTime:     now.Add(-5 * time.Second).Add(5 * time.Second),
  5654  			expectedRescheduleEligible: true,
  5655  		},
  5656  	}
  5657  	for _, tc := range testCases {
  5658  		t.Run(tc.desc, func(t *testing.T) {
  5659  			require := require.New(t)
  5660  			j := testJob()
  5661  			if tc.reschedulePolicy != nil {
  5662  				j.TaskGroups[0].ReschedulePolicy = tc.reschedulePolicy
  5663  			}
  5664  			tc.alloc.Job = j
  5665  			tc.alloc.TaskGroup = j.TaskGroups[0].Name
  5666  			reschedTime, allowed := tc.alloc.NextRescheduleTime()
  5667  			require.Equal(tc.expectedRescheduleEligible, allowed)
  5668  			require.Equal(tc.expectedRescheduleTime, reschedTime)
  5669  		})
  5670  	}
  5671  
  5672  }
  5673  
  5674  func TestAllocation_WaitClientStop(t *testing.T) {
  5675  	ci.Parallel(t)
  5676  	type testCase struct {
  5677  		desc                   string
  5678  		stop                   time.Duration
  5679  		status                 string
  5680  		expectedShould         bool
  5681  		expectedRescheduleTime time.Time
  5682  	}
  5683  	now := time.Now().UTC()
  5684  	testCases := []testCase{
  5685  		{
  5686  			desc:           "running",
  5687  			stop:           2 * time.Second,
  5688  			status:         AllocClientStatusRunning,
  5689  			expectedShould: true,
  5690  		},
  5691  		{
  5692  			desc:           "no stop_after_client_disconnect",
  5693  			status:         AllocClientStatusLost,
  5694  			expectedShould: false,
  5695  		},
  5696  		{
  5697  			desc:                   "stop",
  5698  			status:                 AllocClientStatusLost,
  5699  			stop:                   2 * time.Second,
  5700  			expectedShould:         true,
  5701  			expectedRescheduleTime: now.Add((2 + 5) * time.Second),
  5702  		},
  5703  	}
  5704  	for _, tc := range testCases {
  5705  		t.Run(tc.desc, func(t *testing.T) {
  5706  			j := testJob()
  5707  			a := &Allocation{
  5708  				ClientStatus: tc.status,
  5709  				Job:          j,
  5710  				TaskStates:   map[string]*TaskState{},
  5711  			}
  5712  
  5713  			if tc.status == AllocClientStatusLost {
  5714  				a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost)
  5715  			}
  5716  
  5717  			j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop
  5718  			a.TaskGroup = j.TaskGroups[0].Name
  5719  
  5720  			require.Equal(t, tc.expectedShould, a.ShouldClientStop())
  5721  
  5722  			if !tc.expectedShould || tc.status != AllocClientStatusLost {
  5723  				return
  5724  			}
  5725  
  5726  			// the reschedTime is close to the expectedRescheduleTime
  5727  			reschedTime := a.WaitClientStop()
  5728  			e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix()
  5729  			require.Less(t, e, int64(2))
  5730  		})
  5731  	}
  5732  }
  5733  
  5734  func TestAllocation_DisconnectTimeout(t *testing.T) {
  5735  	type testCase struct {
  5736  		desc          string
  5737  		maxDisconnect *time.Duration
  5738  	}
  5739  
  5740  	testCases := []testCase{
  5741  		{
  5742  			desc:          "no max_client_disconnect",
  5743  			maxDisconnect: nil,
  5744  		},
  5745  		{
  5746  			desc:          "has max_client_disconnect",
  5747  			maxDisconnect: pointer.Of(30 * time.Second),
  5748  		},
  5749  		{
  5750  			desc:          "zero max_client_disconnect",
  5751  			maxDisconnect: pointer.Of(0 * time.Second),
  5752  		},
  5753  	}
  5754  	for _, tc := range testCases {
  5755  		t.Run(tc.desc, func(t *testing.T) {
  5756  			j := testJob()
  5757  			a := &Allocation{
  5758  				Job: j,
  5759  			}
  5760  
  5761  			j.TaskGroups[0].MaxClientDisconnect = tc.maxDisconnect
  5762  			a.TaskGroup = j.TaskGroups[0].Name
  5763  
  5764  			now := time.Now()
  5765  
  5766  			reschedTime := a.DisconnectTimeout(now)
  5767  
  5768  			if tc.maxDisconnect == nil {
  5769  				require.Equal(t, now, reschedTime, "expected to be now")
  5770  			} else {
  5771  				difference := reschedTime.Sub(now)
  5772  				require.Equal(t, *tc.maxDisconnect, difference, "expected durations to be equal")
  5773  			}
  5774  
  5775  		})
  5776  	}
  5777  }
  5778  
  5779  func TestAllocation_Expired(t *testing.T) {
  5780  	type testCase struct {
  5781  		name             string
  5782  		maxDisconnect    string
  5783  		ellapsed         int
  5784  		expected         bool
  5785  		nilJob           bool
  5786  		badTaskGroup     bool
  5787  		mixedUTC         bool
  5788  		noReconnectEvent bool
  5789  		status           string
  5790  	}
  5791  
  5792  	testCases := []testCase{
  5793  		{
  5794  			name:          "has-expired",
  5795  			maxDisconnect: "5s",
  5796  			ellapsed:      10,
  5797  			expected:      true,
  5798  		},
  5799  		{
  5800  			name:          "has-not-expired",
  5801  			maxDisconnect: "5s",
  5802  			ellapsed:      3,
  5803  			expected:      false,
  5804  		},
  5805  		{
  5806  			name:          "are-equal",
  5807  			maxDisconnect: "5s",
  5808  			ellapsed:      5,
  5809  			expected:      true,
  5810  		},
  5811  		{
  5812  			name:          "nil-job",
  5813  			maxDisconnect: "5s",
  5814  			ellapsed:      10,
  5815  			expected:      false,
  5816  			nilJob:        true,
  5817  		},
  5818  		{
  5819  			name:          "wrong-status",
  5820  			maxDisconnect: "5s",
  5821  			ellapsed:      10,
  5822  			expected:      false,
  5823  			status:        AllocClientStatusRunning,
  5824  		},
  5825  		{
  5826  			name:          "bad-task-group",
  5827  			maxDisconnect: "",
  5828  			badTaskGroup:  true,
  5829  			ellapsed:      10,
  5830  			expected:      false,
  5831  		},
  5832  		{
  5833  			name:          "no-max-disconnect",
  5834  			maxDisconnect: "",
  5835  			ellapsed:      10,
  5836  			expected:      false,
  5837  		},
  5838  		{
  5839  			name:          "mixed-utc-has-expired",
  5840  			maxDisconnect: "5s",
  5841  			ellapsed:      10,
  5842  			mixedUTC:      true,
  5843  			expected:      true,
  5844  		},
  5845  		{
  5846  			name:          "mixed-utc-has-not-expired",
  5847  			maxDisconnect: "5s",
  5848  			ellapsed:      3,
  5849  			mixedUTC:      true,
  5850  			expected:      false,
  5851  		},
  5852  		{
  5853  			name:             "no-reconnect-event",
  5854  			maxDisconnect:    "5s",
  5855  			ellapsed:         2,
  5856  			expected:         false,
  5857  			noReconnectEvent: true,
  5858  		},
  5859  	}
  5860  	for _, tc := range testCases {
  5861  		t.Run(tc.name, func(t *testing.T) {
  5862  			alloc := MockAlloc()
  5863  			var err error
  5864  			var maxDisconnect time.Duration
  5865  
  5866  			if tc.maxDisconnect != "" {
  5867  				maxDisconnect, err = time.ParseDuration(tc.maxDisconnect)
  5868  				require.NoError(t, err)
  5869  				alloc.Job.TaskGroups[0].MaxClientDisconnect = &maxDisconnect
  5870  			}
  5871  
  5872  			if tc.nilJob {
  5873  				alloc.Job = nil
  5874  			}
  5875  
  5876  			if tc.badTaskGroup {
  5877  				alloc.TaskGroup = "bad"
  5878  			}
  5879  
  5880  			alloc.ClientStatus = AllocClientStatusUnknown
  5881  			if tc.status != "" {
  5882  				alloc.ClientStatus = tc.status
  5883  			}
  5884  
  5885  			alloc.AllocStates = []*AllocState{{
  5886  				Field: AllocStateFieldClientStatus,
  5887  				Value: AllocClientStatusUnknown,
  5888  				Time:  time.Now(),
  5889  			}}
  5890  
  5891  			require.NoError(t, err)
  5892  			now := time.Now().UTC()
  5893  			if tc.mixedUTC {
  5894  				now = time.Now()
  5895  			}
  5896  
  5897  			if !tc.noReconnectEvent {
  5898  				event := NewTaskEvent(TaskClientReconnected)
  5899  				event.Time = now.UnixNano()
  5900  
  5901  				alloc.TaskStates = map[string]*TaskState{
  5902  					"web": {
  5903  						Events: []*TaskEvent{event},
  5904  					},
  5905  				}
  5906  			}
  5907  
  5908  			ellapsedDuration := time.Duration(tc.ellapsed) * time.Second
  5909  			now = now.Add(ellapsedDuration)
  5910  
  5911  			require.Equal(t, tc.expected, alloc.Expired(now))
  5912  		})
  5913  	}
  5914  }
  5915  
  5916  func TestAllocation_NeedsToReconnect(t *testing.T) {
  5917  	ci.Parallel(t)
  5918  
  5919  	testCases := []struct {
  5920  		name     string
  5921  		states   []*AllocState
  5922  		expected bool
  5923  	}{
  5924  		{
  5925  			name:     "no state",
  5926  			expected: false,
  5927  		},
  5928  		{
  5929  			name:     "never disconnected",
  5930  			states:   []*AllocState{},
  5931  			expected: false,
  5932  		},
  5933  		{
  5934  			name: "disconnected once",
  5935  			states: []*AllocState{
  5936  				{
  5937  					Field: AllocStateFieldClientStatus,
  5938  					Value: AllocClientStatusUnknown,
  5939  					Time:  time.Now(),
  5940  				},
  5941  			},
  5942  			expected: true,
  5943  		},
  5944  		{
  5945  			name: "disconnect reconnect disconnect",
  5946  			states: []*AllocState{
  5947  				{
  5948  					Field: AllocStateFieldClientStatus,
  5949  					Value: AllocClientStatusUnknown,
  5950  					Time:  time.Now().Add(-2 * time.Minute),
  5951  				},
  5952  				{
  5953  					Field: AllocStateFieldClientStatus,
  5954  					Value: AllocClientStatusRunning,
  5955  					Time:  time.Now().Add(-1 * time.Minute),
  5956  				},
  5957  				{
  5958  					Field: AllocStateFieldClientStatus,
  5959  					Value: AllocClientStatusUnknown,
  5960  					Time:  time.Now(),
  5961  				},
  5962  			},
  5963  			expected: true,
  5964  		},
  5965  		{
  5966  			name: "disconnect multiple times before reconnect",
  5967  			states: []*AllocState{
  5968  				{
  5969  					Field: AllocStateFieldClientStatus,
  5970  					Value: AllocClientStatusUnknown,
  5971  					Time:  time.Now().Add(-2 * time.Minute),
  5972  				},
  5973  				{
  5974  					Field: AllocStateFieldClientStatus,
  5975  					Value: AllocClientStatusUnknown,
  5976  					Time:  time.Now().Add(-1 * time.Minute),
  5977  				},
  5978  				{
  5979  					Field: AllocStateFieldClientStatus,
  5980  					Value: AllocClientStatusRunning,
  5981  					Time:  time.Now(),
  5982  				},
  5983  			},
  5984  			expected: false,
  5985  		},
  5986  		{
  5987  			name: "disconnect after multiple updates",
  5988  			states: []*AllocState{
  5989  				{
  5990  					Field: AllocStateFieldClientStatus,
  5991  					Value: AllocClientStatusPending,
  5992  					Time:  time.Now().Add(-2 * time.Minute),
  5993  				},
  5994  				{
  5995  					Field: AllocStateFieldClientStatus,
  5996  					Value: AllocClientStatusRunning,
  5997  					Time:  time.Now().Add(-1 * time.Minute),
  5998  				},
  5999  				{
  6000  					Field: AllocStateFieldClientStatus,
  6001  					Value: AllocClientStatusUnknown,
  6002  					Time:  time.Now(),
  6003  				},
  6004  			},
  6005  			expected: true,
  6006  		},
  6007  	}
  6008  
  6009  	for _, tc := range testCases {
  6010  		t.Run(tc.name, func(t *testing.T) {
  6011  			alloc := MockAlloc()
  6012  			alloc.AllocStates = tc.states
  6013  
  6014  			got := alloc.NeedsToReconnect()
  6015  			require.Equal(t, tc.expected, got)
  6016  		})
  6017  	}
  6018  }
  6019  
  6020  func TestAllocation_Canonicalize_Old(t *testing.T) {
  6021  	ci.Parallel(t)
  6022  
  6023  	alloc := MockAlloc()
  6024  	alloc.AllocatedResources = nil
  6025  	alloc.TaskResources = map[string]*Resources{
  6026  		"web": {
  6027  			CPU:      500,
  6028  			MemoryMB: 256,
  6029  			Networks: []*NetworkResource{
  6030  				{
  6031  					Device:        "eth0",
  6032  					IP:            "192.168.0.100",
  6033  					ReservedPorts: []Port{{Label: "admin", Value: 5000}},
  6034  					MBits:         50,
  6035  					DynamicPorts:  []Port{{Label: "http", Value: 9876}},
  6036  				},
  6037  			},
  6038  		},
  6039  	}
  6040  	alloc.SharedResources = &Resources{
  6041  		DiskMB: 150,
  6042  	}
  6043  	alloc.Canonicalize()
  6044  
  6045  	expected := &AllocatedResources{
  6046  		Tasks: map[string]*AllocatedTaskResources{
  6047  			"web": {
  6048  				Cpu: AllocatedCpuResources{
  6049  					CpuShares: 500,
  6050  				},
  6051  				Memory: AllocatedMemoryResources{
  6052  					MemoryMB: 256,
  6053  				},
  6054  				Networks: []*NetworkResource{
  6055  					{
  6056  						Device:        "eth0",
  6057  						IP:            "192.168.0.100",
  6058  						ReservedPorts: []Port{{Label: "admin", Value: 5000}},
  6059  						MBits:         50,
  6060  						DynamicPorts:  []Port{{Label: "http", Value: 9876}},
  6061  					},
  6062  				},
  6063  			},
  6064  		},
  6065  		Shared: AllocatedSharedResources{
  6066  			DiskMB: 150,
  6067  		},
  6068  	}
  6069  
  6070  	require.Equal(t, expected, alloc.AllocatedResources)
  6071  }
  6072  
  6073  // TestAllocation_Canonicalize_New asserts that an alloc with latest
  6074  // schema isn't modified with Canonicalize
  6075  func TestAllocation_Canonicalize_New(t *testing.T) {
  6076  	ci.Parallel(t)
  6077  
  6078  	alloc := MockAlloc()
  6079  	copy := alloc.Copy()
  6080  
  6081  	alloc.Canonicalize()
  6082  	require.Equal(t, copy, alloc)
  6083  }
  6084  
  6085  func TestRescheduleTracker_Copy(t *testing.T) {
  6086  	ci.Parallel(t)
  6087  	type testCase struct {
  6088  		original *RescheduleTracker
  6089  		expected *RescheduleTracker
  6090  	}
  6091  	cases := []testCase{
  6092  		{nil, nil},
  6093  		{&RescheduleTracker{Events: []*RescheduleEvent{
  6094  			{RescheduleTime: 2,
  6095  				PrevAllocID: "12",
  6096  				PrevNodeID:  "12",
  6097  				Delay:       30 * time.Second},
  6098  		}}, &RescheduleTracker{Events: []*RescheduleEvent{
  6099  			{RescheduleTime: 2,
  6100  				PrevAllocID: "12",
  6101  				PrevNodeID:  "12",
  6102  				Delay:       30 * time.Second},
  6103  		}}},
  6104  	}
  6105  
  6106  	for _, tc := range cases {
  6107  		if got := tc.original.Copy(); !reflect.DeepEqual(got, tc.expected) {
  6108  			t.Fatalf("expected %v but got %v", *tc.expected, *got)
  6109  		}
  6110  	}
  6111  }
  6112  
  6113  func TestVault_Validate(t *testing.T) {
  6114  	ci.Parallel(t)
  6115  
  6116  	v := &Vault{
  6117  		Env:        true,
  6118  		ChangeMode: VaultChangeModeNoop,
  6119  	}
  6120  
  6121  	if err := v.Validate(); err == nil || !strings.Contains(err.Error(), "Policy list") {
  6122  		t.Fatalf("Expected policy list empty error")
  6123  	}
  6124  
  6125  	v.Policies = []string{"foo", "root"}
  6126  	v.ChangeMode = VaultChangeModeSignal
  6127  
  6128  	err := v.Validate()
  6129  	if err == nil {
  6130  		t.Fatalf("Expected validation errors")
  6131  	}
  6132  
  6133  	if !strings.Contains(err.Error(), "Signal must") {
  6134  		t.Fatalf("Expected signal empty error")
  6135  	}
  6136  	if !strings.Contains(err.Error(), "root") {
  6137  		t.Fatalf("Expected root error")
  6138  	}
  6139  }
  6140  
  6141  func TestVault_Copy(t *testing.T) {
  6142  	v := &Vault{
  6143  		Policies:     []string{"policy1", "policy2"},
  6144  		Namespace:    "ns1",
  6145  		Env:          false,
  6146  		ChangeMode:   "noop",
  6147  		ChangeSignal: "SIGKILL",
  6148  	}
  6149  
  6150  	// Copy and modify.
  6151  	vc := v.Copy()
  6152  	vc.Policies[0] = "policy0"
  6153  	vc.Namespace = "ns2"
  6154  	vc.Env = true
  6155  	vc.ChangeMode = "signal"
  6156  	vc.ChangeSignal = "SIGHUP"
  6157  
  6158  	require.NotEqual(t, v, vc)
  6159  }
  6160  
  6161  func TestVault_Canonicalize(t *testing.T) {
  6162  	v := &Vault{
  6163  		ChangeSignal: "sighup",
  6164  	}
  6165  	v.Canonicalize()
  6166  	require.Equal(t, "SIGHUP", v.ChangeSignal)
  6167  	require.Equal(t, VaultChangeModeRestart, v.ChangeMode)
  6168  }
  6169  
  6170  func TestParameterizedJobConfig_Validate(t *testing.T) {
  6171  	ci.Parallel(t)
  6172  
  6173  	d := &ParameterizedJobConfig{
  6174  		Payload: "foo",
  6175  	}
  6176  
  6177  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "payload") {
  6178  		t.Fatalf("Expected unknown payload requirement: %v", err)
  6179  	}
  6180  
  6181  	d.Payload = DispatchPayloadOptional
  6182  	d.MetaOptional = []string{"foo", "bar"}
  6183  	d.MetaRequired = []string{"bar", "baz"}
  6184  
  6185  	if err := d.Validate(); err == nil || !strings.Contains(err.Error(), "disjoint") {
  6186  		t.Fatalf("Expected meta not being disjoint error: %v", err)
  6187  	}
  6188  }
  6189  
  6190  func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) {
  6191  	ci.Parallel(t)
  6192  
  6193  	job := testJob()
  6194  	job.ParameterizedJob = &ParameterizedJobConfig{
  6195  		Payload: DispatchPayloadOptional,
  6196  	}
  6197  	job.Type = JobTypeSystem
  6198  
  6199  	if err := job.Validate(); err == nil || !strings.Contains(err.Error(), "only be used with") {
  6200  		t.Fatalf("Expected bad scheduler tpye: %v", err)
  6201  	}
  6202  }
  6203  
  6204  func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) {
  6205  	ci.Parallel(t)
  6206  	// Setup a system Job with stop_after_client_disconnect set, which is invalid
  6207  	job := testJob()
  6208  	job.Type = JobTypeSystem
  6209  	stop := 1 * time.Minute
  6210  	job.TaskGroups[0].StopAfterClientDisconnect = &stop
  6211  
  6212  	err := job.Validate()
  6213  	require.Error(t, err)
  6214  	require.Contains(t, err.Error(), "stop_after_client_disconnect can only be set in batch and service jobs")
  6215  
  6216  	// Modify the job to a batch job with an invalid stop_after_client_disconnect value
  6217  	job.Type = JobTypeBatch
  6218  	invalid := -1 * time.Minute
  6219  	job.TaskGroups[0].StopAfterClientDisconnect = &invalid
  6220  
  6221  	err = job.Validate()
  6222  	require.Error(t, err)
  6223  	require.Contains(t, err.Error(), "stop_after_client_disconnect must be a positive value")
  6224  
  6225  	// Modify the job to a batch job with a valid stop_after_client_disconnect value
  6226  	job.Type = JobTypeBatch
  6227  	job.TaskGroups[0].StopAfterClientDisconnect = &stop
  6228  	err = job.Validate()
  6229  	require.NoError(t, err)
  6230  }
  6231  
  6232  func TestJobConfig_Validate_MaxClientDisconnect(t *testing.T) {
  6233  	// Set up a job with an invalid max_client_disconnect value
  6234  	job := testJob()
  6235  	timeout := -1 * time.Minute
  6236  	job.TaskGroups[0].MaxClientDisconnect = &timeout
  6237  	job.TaskGroups[0].StopAfterClientDisconnect = &timeout
  6238  
  6239  	err := job.Validate()
  6240  	require.Error(t, err)
  6241  	require.Contains(t, err.Error(), "max_client_disconnect cannot be negative")
  6242  	require.Contains(t, err.Error(), "Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect")
  6243  
  6244  	// Modify the job with a valid max_client_disconnect value
  6245  	timeout = 1 * time.Minute
  6246  	job.TaskGroups[0].MaxClientDisconnect = &timeout
  6247  	job.TaskGroups[0].StopAfterClientDisconnect = nil
  6248  	err = job.Validate()
  6249  	require.NoError(t, err)
  6250  }
  6251  
  6252  func TestParameterizedJobConfig_Canonicalize(t *testing.T) {
  6253  	ci.Parallel(t)
  6254  
  6255  	d := &ParameterizedJobConfig{}
  6256  	d.Canonicalize()
  6257  	if d.Payload != DispatchPayloadOptional {
  6258  		t.Fatalf("Canonicalize failed")
  6259  	}
  6260  }
  6261  
  6262  func TestDispatchPayloadConfig_Validate(t *testing.T) {
  6263  	ci.Parallel(t)
  6264  
  6265  	d := &DispatchPayloadConfig{
  6266  		File: "foo",
  6267  	}
  6268  
  6269  	// task/local/haha
  6270  	if err := d.Validate(); err != nil {
  6271  		t.Fatalf("bad: %v", err)
  6272  	}
  6273  
  6274  	// task/haha
  6275  	d.File = "../haha"
  6276  	if err := d.Validate(); err != nil {
  6277  		t.Fatalf("bad: %v", err)
  6278  	}
  6279  
  6280  	// ../haha
  6281  	d.File = "../../../haha"
  6282  	if err := d.Validate(); err == nil {
  6283  		t.Fatalf("bad: %v", err)
  6284  	}
  6285  }
  6286  
  6287  func TestScalingPolicy_Canonicalize(t *testing.T) {
  6288  	ci.Parallel(t)
  6289  
  6290  	cases := []struct {
  6291  		name     string
  6292  		input    *ScalingPolicy
  6293  		expected *ScalingPolicy
  6294  	}{
  6295  		{
  6296  			name:     "empty policy",
  6297  			input:    &ScalingPolicy{},
  6298  			expected: &ScalingPolicy{Type: ScalingPolicyTypeHorizontal},
  6299  		},
  6300  		{
  6301  			name:     "policy with type",
  6302  			input:    &ScalingPolicy{Type: "other-type"},
  6303  			expected: &ScalingPolicy{Type: "other-type"},
  6304  		},
  6305  	}
  6306  
  6307  	for _, c := range cases {
  6308  		t.Run(c.name, func(t *testing.T) {
  6309  			require := require.New(t)
  6310  
  6311  			c.input.Canonicalize()
  6312  			require.Equal(c.expected, c.input)
  6313  		})
  6314  	}
  6315  }
  6316  
  6317  func TestScalingPolicy_Validate(t *testing.T) {
  6318  	ci.Parallel(t)
  6319  	type testCase struct {
  6320  		name        string
  6321  		input       *ScalingPolicy
  6322  		expectedErr string
  6323  	}
  6324  	cases := []testCase{
  6325  		{
  6326  			name: "full horizontal policy",
  6327  			input: &ScalingPolicy{
  6328  				Policy: map[string]interface{}{
  6329  					"key": "value",
  6330  				},
  6331  				Type:    ScalingPolicyTypeHorizontal,
  6332  				Min:     5,
  6333  				Max:     5,
  6334  				Enabled: true,
  6335  				Target: map[string]string{
  6336  					ScalingTargetNamespace: "my-namespace",
  6337  					ScalingTargetJob:       "my-job",
  6338  					ScalingTargetGroup:     "my-task-group",
  6339  				},
  6340  			},
  6341  		},
  6342  		{
  6343  			name:        "missing type",
  6344  			input:       &ScalingPolicy{},
  6345  			expectedErr: "missing scaling policy type",
  6346  		},
  6347  		{
  6348  			name: "invalid type",
  6349  			input: &ScalingPolicy{
  6350  				Type: "not valid",
  6351  			},
  6352  			expectedErr: `scaling policy type "not valid" is not valid`,
  6353  		},
  6354  		{
  6355  			name: "min < 0",
  6356  			input: &ScalingPolicy{
  6357  				Type: ScalingPolicyTypeHorizontal,
  6358  				Min:  -1,
  6359  				Max:  5,
  6360  			},
  6361  			expectedErr: "minimum count must be specified and non-negative",
  6362  		},
  6363  		{
  6364  			name: "max < 0",
  6365  			input: &ScalingPolicy{
  6366  				Type: ScalingPolicyTypeHorizontal,
  6367  				Min:  5,
  6368  				Max:  -1,
  6369  			},
  6370  			expectedErr: "maximum count must be specified and non-negative",
  6371  		},
  6372  		{
  6373  			name: "min > max",
  6374  			input: &ScalingPolicy{
  6375  				Type: ScalingPolicyTypeHorizontal,
  6376  				Min:  10,
  6377  				Max:  0,
  6378  			},
  6379  			expectedErr: "maximum count must not be less than minimum count",
  6380  		},
  6381  		{
  6382  			name: "min == max",
  6383  			input: &ScalingPolicy{
  6384  				Type: ScalingPolicyTypeHorizontal,
  6385  				Min:  10,
  6386  				Max:  10,
  6387  			},
  6388  		},
  6389  		{
  6390  			name: "min == 0",
  6391  			input: &ScalingPolicy{
  6392  				Type: ScalingPolicyTypeHorizontal,
  6393  				Min:  0,
  6394  				Max:  10,
  6395  			},
  6396  		},
  6397  		{
  6398  			name: "max == 0",
  6399  			input: &ScalingPolicy{
  6400  				Type: ScalingPolicyTypeHorizontal,
  6401  				Min:  0,
  6402  				Max:  0,
  6403  			},
  6404  		},
  6405  		{
  6406  			name: "horizontal missing namespace",
  6407  			input: &ScalingPolicy{
  6408  				Type: ScalingPolicyTypeHorizontal,
  6409  				Target: map[string]string{
  6410  					ScalingTargetJob:   "my-job",
  6411  					ScalingTargetGroup: "my-group",
  6412  				},
  6413  			},
  6414  			expectedErr: "missing target namespace",
  6415  		},
  6416  		{
  6417  			name: "horizontal missing job",
  6418  			input: &ScalingPolicy{
  6419  				Type: ScalingPolicyTypeHorizontal,
  6420  				Target: map[string]string{
  6421  					ScalingTargetNamespace: "my-namespace",
  6422  					ScalingTargetGroup:     "my-group",
  6423  				},
  6424  			},
  6425  			expectedErr: "missing target job",
  6426  		},
  6427  		{
  6428  			name: "horizontal missing group",
  6429  			input: &ScalingPolicy{
  6430  				Type: ScalingPolicyTypeHorizontal,
  6431  				Target: map[string]string{
  6432  					ScalingTargetNamespace: "my-namespace",
  6433  					ScalingTargetJob:       "my-job",
  6434  				},
  6435  			},
  6436  			expectedErr: "missing target group",
  6437  		},
  6438  	}
  6439  
  6440  	for _, c := range cases {
  6441  		t.Run(c.name, func(t *testing.T) {
  6442  			require := require.New(t)
  6443  
  6444  			err := c.input.Validate()
  6445  
  6446  			if len(c.expectedErr) > 0 {
  6447  				require.Error(err, c.expectedErr)
  6448  			} else {
  6449  				require.NoError(err)
  6450  			}
  6451  		})
  6452  	}
  6453  }
  6454  
  6455  func TestIsRecoverable(t *testing.T) {
  6456  	ci.Parallel(t)
  6457  
  6458  	if IsRecoverable(nil) {
  6459  		t.Errorf("nil should not be recoverable")
  6460  	}
  6461  	if IsRecoverable(NewRecoverableError(nil, true)) {
  6462  		t.Errorf("NewRecoverableError(nil, true) should not be recoverable")
  6463  	}
  6464  	if IsRecoverable(fmt.Errorf("i promise im recoverable")) {
  6465  		t.Errorf("Custom errors should not be recoverable")
  6466  	}
  6467  	if IsRecoverable(NewRecoverableError(fmt.Errorf(""), false)) {
  6468  		t.Errorf("Explicitly unrecoverable errors should not be recoverable")
  6469  	}
  6470  	if !IsRecoverable(NewRecoverableError(fmt.Errorf(""), true)) {
  6471  		t.Errorf("Explicitly recoverable errors *should* be recoverable")
  6472  	}
  6473  }
  6474  
  6475  func TestACLTokenSetHash(t *testing.T) {
  6476  	ci.Parallel(t)
  6477  
  6478  	tk := &ACLToken{
  6479  		Name:     "foo",
  6480  		Type:     ACLClientToken,
  6481  		Policies: []string{"foo", "bar"},
  6482  		Global:   false,
  6483  	}
  6484  	out1 := tk.SetHash()
  6485  	assert.NotNil(t, out1)
  6486  	assert.NotNil(t, tk.Hash)
  6487  	assert.Equal(t, out1, tk.Hash)
  6488  
  6489  	tk.Policies = []string{"foo"}
  6490  	out2 := tk.SetHash()
  6491  	assert.NotNil(t, out2)
  6492  	assert.NotNil(t, tk.Hash)
  6493  	assert.Equal(t, out2, tk.Hash)
  6494  	assert.NotEqual(t, out1, out2)
  6495  }
  6496  
  6497  func TestACLPolicySetHash(t *testing.T) {
  6498  	ci.Parallel(t)
  6499  
  6500  	ap := &ACLPolicy{
  6501  		Name:        "foo",
  6502  		Description: "great policy",
  6503  		Rules:       "node { policy = \"read\" }",
  6504  	}
  6505  	out1 := ap.SetHash()
  6506  	assert.NotNil(t, out1)
  6507  	assert.NotNil(t, ap.Hash)
  6508  	assert.Equal(t, out1, ap.Hash)
  6509  
  6510  	ap.Rules = "node { policy = \"write\" }"
  6511  	out2 := ap.SetHash()
  6512  	assert.NotNil(t, out2)
  6513  	assert.NotNil(t, ap.Hash)
  6514  	assert.Equal(t, out2, ap.Hash)
  6515  	assert.NotEqual(t, out1, out2)
  6516  }
  6517  
  6518  func TestTaskEventPopulate(t *testing.T) {
  6519  	ci.Parallel(t)
  6520  
  6521  	prepopulatedEvent := NewTaskEvent(TaskSetup)
  6522  	prepopulatedEvent.DisplayMessage = "Hola"
  6523  	testcases := []struct {
  6524  		event       *TaskEvent
  6525  		expectedMsg string
  6526  	}{
  6527  		{nil, ""},
  6528  		{prepopulatedEvent, "Hola"},
  6529  		{NewTaskEvent(TaskSetup).SetMessage("Setup"), "Setup"},
  6530  		{NewTaskEvent(TaskStarted), "Task started by client"},
  6531  		{NewTaskEvent(TaskReceived), "Task received by client"},
  6532  		{NewTaskEvent(TaskFailedValidation), "Validation of task failed"},
  6533  		{NewTaskEvent(TaskFailedValidation).SetValidationError(fmt.Errorf("task failed validation")), "task failed validation"},
  6534  		{NewTaskEvent(TaskSetupFailure), "Task setup failed"},
  6535  		{NewTaskEvent(TaskSetupFailure).SetSetupError(fmt.Errorf("task failed setup")), "task failed setup"},
  6536  		{NewTaskEvent(TaskDriverFailure), "Failed to start task"},
  6537  		{NewTaskEvent(TaskDownloadingArtifacts), "Client is downloading artifacts"},
  6538  		{NewTaskEvent(TaskArtifactDownloadFailed), "Failed to download artifacts"},
  6539  		{NewTaskEvent(TaskArtifactDownloadFailed).SetDownloadError(fmt.Errorf("connection reset by peer")), "connection reset by peer"},
  6540  		{NewTaskEvent(TaskRestarting).SetRestartDelay(2 * time.Second).SetRestartReason(ReasonWithinPolicy), "Task restarting in 2s"},
  6541  		{NewTaskEvent(TaskRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it - Task restarting in 0s"},
  6542  		{NewTaskEvent(TaskKilling), "Sent interrupt"},
  6543  		{NewTaskEvent(TaskKilling).SetKillReason("Its time for you to die"), "Its time for you to die"},
  6544  		{NewTaskEvent(TaskKilling).SetKillTimeout(1*time.Second, 5*time.Second), "Sent interrupt. Waiting 1s before force killing"},
  6545  		{NewTaskEvent(TaskKilling).SetKillTimeout(10*time.Second, 5*time.Second), "Sent interrupt. Waiting 5s before force killing"},
  6546  		{NewTaskEvent(TaskTerminated).SetExitCode(-1).SetSignal(3), "Exit Code: -1, Signal: 3"},
  6547  		{NewTaskEvent(TaskTerminated).SetMessage("Goodbye"), "Exit Code: 0, Exit Message: \"Goodbye\""},
  6548  		{NewTaskEvent(TaskKilled), "Task successfully killed"},
  6549  		{NewTaskEvent(TaskKilled).SetKillError(fmt.Errorf("undead creatures can't be killed")), "undead creatures can't be killed"},
  6550  		{NewTaskEvent(TaskNotRestarting).SetRestartReason("Chaos Monkey did it"), "Chaos Monkey did it"},
  6551  		{NewTaskEvent(TaskNotRestarting), "Task exceeded restart policy"},
  6552  		{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
  6553  		{NewTaskEvent(TaskSiblingFailed), "Task's sibling failed"},
  6554  		{NewTaskEvent(TaskSiblingFailed).SetFailedSibling("patient zero"), "Task's sibling \"patient zero\" failed"},
  6555  		{NewTaskEvent(TaskSignaling), "Task being sent a signal"},
  6556  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt), "Task being sent signal interrupt"},
  6557  		{NewTaskEvent(TaskSignaling).SetTaskSignal(os.Interrupt).SetTaskSignalReason("process interrupted"), "Task being sent signal interrupt: process interrupted"},
  6558  		{NewTaskEvent(TaskRestartSignal), "Task signaled to restart"},
  6559  		{NewTaskEvent(TaskRestartSignal).SetRestartReason("Chaos Monkey restarted it"), "Chaos Monkey restarted it"},
  6560  		{NewTaskEvent(TaskClientReconnected), "Client reconnected"},
  6561  		{NewTaskEvent(TaskLeaderDead), "Leader Task in Group dead"},
  6562  		{NewTaskEvent("Unknown Type, No message"), ""},
  6563  		{NewTaskEvent("Unknown Type").SetMessage("Hello world"), "Hello world"},
  6564  	}
  6565  
  6566  	for _, tc := range testcases {
  6567  		tc.event.PopulateEventDisplayMessage()
  6568  		if tc.event != nil && tc.event.DisplayMessage != tc.expectedMsg {
  6569  			t.Fatalf("Expected %v but got %v", tc.expectedMsg, tc.event.DisplayMessage)
  6570  		}
  6571  	}
  6572  }
  6573  
  6574  func TestNetworkResourcesEquals(t *testing.T) {
  6575  	ci.Parallel(t)
  6576  
  6577  	require := require.New(t)
  6578  	var networkResourcesTest = []struct {
  6579  		input    []*NetworkResource
  6580  		expected bool
  6581  		errorMsg string
  6582  	}{
  6583  		{
  6584  			[]*NetworkResource{
  6585  				{
  6586  					IP:            "10.0.0.1",
  6587  					MBits:         50,
  6588  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6589  				},
  6590  				{
  6591  					IP:            "10.0.0.1",
  6592  					MBits:         50,
  6593  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6594  				},
  6595  			},
  6596  			true,
  6597  			"Equal network resources should return true",
  6598  		},
  6599  		{
  6600  			[]*NetworkResource{
  6601  				{
  6602  					IP:            "10.0.0.0",
  6603  					MBits:         50,
  6604  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6605  				},
  6606  				{
  6607  					IP:            "10.0.0.1",
  6608  					MBits:         50,
  6609  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6610  				},
  6611  			},
  6612  			false,
  6613  			"Different IP addresses should return false",
  6614  		},
  6615  		{
  6616  			[]*NetworkResource{
  6617  				{
  6618  					IP:            "10.0.0.1",
  6619  					MBits:         40,
  6620  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6621  				},
  6622  				{
  6623  					IP:            "10.0.0.1",
  6624  					MBits:         50,
  6625  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6626  				},
  6627  			},
  6628  			false,
  6629  			"Different MBits values should return false",
  6630  		},
  6631  		{
  6632  			[]*NetworkResource{
  6633  				{
  6634  					IP:            "10.0.0.1",
  6635  					MBits:         50,
  6636  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6637  				},
  6638  				{
  6639  					IP:            "10.0.0.1",
  6640  					MBits:         50,
  6641  					ReservedPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}},
  6642  				},
  6643  			},
  6644  			false,
  6645  			"Different ReservedPorts lengths should return false",
  6646  		},
  6647  		{
  6648  			[]*NetworkResource{
  6649  				{
  6650  					IP:            "10.0.0.1",
  6651  					MBits:         50,
  6652  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6653  				},
  6654  				{
  6655  					IP:            "10.0.0.1",
  6656  					MBits:         50,
  6657  					ReservedPorts: []Port{},
  6658  				},
  6659  			},
  6660  			false,
  6661  			"Empty and non empty ReservedPorts values should return false",
  6662  		},
  6663  		{
  6664  			[]*NetworkResource{
  6665  				{
  6666  					IP:            "10.0.0.1",
  6667  					MBits:         50,
  6668  					ReservedPorts: []Port{{"web", 80, 0, ""}},
  6669  				},
  6670  				{
  6671  					IP:            "10.0.0.1",
  6672  					MBits:         50,
  6673  					ReservedPorts: []Port{{"notweb", 80, 0, ""}},
  6674  				},
  6675  			},
  6676  			false,
  6677  			"Different valued ReservedPorts values should return false",
  6678  		},
  6679  		{
  6680  			[]*NetworkResource{
  6681  				{
  6682  					IP:           "10.0.0.1",
  6683  					MBits:        50,
  6684  					DynamicPorts: []Port{{"web", 80, 0, ""}},
  6685  				},
  6686  				{
  6687  					IP:           "10.0.0.1",
  6688  					MBits:        50,
  6689  					DynamicPorts: []Port{{"web", 80, 0, ""}, {"web", 80, 0, ""}},
  6690  				},
  6691  			},
  6692  			false,
  6693  			"Different DynamicPorts lengths should return false",
  6694  		},
  6695  		{
  6696  			[]*NetworkResource{
  6697  				{
  6698  					IP:           "10.0.0.1",
  6699  					MBits:        50,
  6700  					DynamicPorts: []Port{{"web", 80, 0, ""}},
  6701  				},
  6702  				{
  6703  					IP:           "10.0.0.1",
  6704  					MBits:        50,
  6705  					DynamicPorts: []Port{},
  6706  				},
  6707  			},
  6708  			false,
  6709  			"Empty and non empty DynamicPorts values should return false",
  6710  		},
  6711  		{
  6712  			[]*NetworkResource{
  6713  				{
  6714  					IP:           "10.0.0.1",
  6715  					MBits:        50,
  6716  					DynamicPorts: []Port{{"web", 80, 0, ""}},
  6717  				},
  6718  				{
  6719  					IP:           "10.0.0.1",
  6720  					MBits:        50,
  6721  					DynamicPorts: []Port{{"notweb", 80, 0, ""}},
  6722  				},
  6723  			},
  6724  			false,
  6725  			"Different valued DynamicPorts values should return false",
  6726  		},
  6727  	}
  6728  	for _, testCase := range networkResourcesTest {
  6729  		first := testCase.input[0]
  6730  		second := testCase.input[1]
  6731  		require.Equal(testCase.expected, first.Equal(second), testCase.errorMsg)
  6732  	}
  6733  }
  6734  
  6735  func TestNode_Canonicalize(t *testing.T) {
  6736  	ci.Parallel(t)
  6737  	require := require.New(t)
  6738  
  6739  	// Make sure the eligiblity is set properly
  6740  	node := &Node{}
  6741  	node.Canonicalize()
  6742  	require.Equal(NodeSchedulingEligible, node.SchedulingEligibility)
  6743  
  6744  	node = &Node{
  6745  		DrainStrategy: &DrainStrategy{
  6746  			DrainSpec: DrainSpec{
  6747  				Deadline: 30000,
  6748  			},
  6749  		},
  6750  	}
  6751  	node.Canonicalize()
  6752  	require.Equal(NodeSchedulingIneligible, node.SchedulingEligibility)
  6753  }
  6754  
  6755  func TestNode_Copy(t *testing.T) {
  6756  	ci.Parallel(t)
  6757  	require := require.New(t)
  6758  
  6759  	node := &Node{
  6760  		ID:         uuid.Generate(),
  6761  		SecretID:   uuid.Generate(),
  6762  		Datacenter: "dc1",
  6763  		Name:       "foobar",
  6764  		Attributes: map[string]string{
  6765  			"kernel.name":        "linux",
  6766  			"arch":               "x86",
  6767  			"nomad.version":      "0.5.0",
  6768  			"driver.exec":        "1",
  6769  			"driver.mock_driver": "1",
  6770  		},
  6771  		Resources: &Resources{
  6772  			CPU:      4000,
  6773  			MemoryMB: 8192,
  6774  			DiskMB:   100 * 1024,
  6775  			Networks: []*NetworkResource{
  6776  				{
  6777  					Device: "eth0",
  6778  					CIDR:   "192.168.0.100/32",
  6779  					MBits:  1000,
  6780  				},
  6781  			},
  6782  		},
  6783  		Reserved: &Resources{
  6784  			CPU:      100,
  6785  			MemoryMB: 256,
  6786  			DiskMB:   4 * 1024,
  6787  			Networks: []*NetworkResource{
  6788  				{
  6789  					Device:        "eth0",
  6790  					IP:            "192.168.0.100",
  6791  					ReservedPorts: []Port{{Label: "ssh", Value: 22}},
  6792  					MBits:         1,
  6793  				},
  6794  			},
  6795  		},
  6796  		NodeResources: &NodeResources{
  6797  			Cpu: NodeCpuResources{
  6798  				CpuShares:          4000,
  6799  				TotalCpuCores:      4,
  6800  				ReservableCpuCores: []uint16{0, 1, 2, 3},
  6801  			},
  6802  			Memory: NodeMemoryResources{
  6803  				MemoryMB: 8192,
  6804  			},
  6805  			Disk: NodeDiskResources{
  6806  				DiskMB: 100 * 1024,
  6807  			},
  6808  			Networks: []*NetworkResource{
  6809  				{
  6810  					Device: "eth0",
  6811  					CIDR:   "192.168.0.100/32",
  6812  					MBits:  1000,
  6813  				},
  6814  			},
  6815  		},
  6816  		ReservedResources: &NodeReservedResources{
  6817  			Cpu: NodeReservedCpuResources{
  6818  				CpuShares:        100,
  6819  				ReservedCpuCores: []uint16{0},
  6820  			},
  6821  			Memory: NodeReservedMemoryResources{
  6822  				MemoryMB: 256,
  6823  			},
  6824  			Disk: NodeReservedDiskResources{
  6825  				DiskMB: 4 * 1024,
  6826  			},
  6827  			Networks: NodeReservedNetworkResources{
  6828  				ReservedHostPorts: "22",
  6829  			},
  6830  		},
  6831  		Links: map[string]string{
  6832  			"consul": "foobar.dc1",
  6833  		},
  6834  		Meta: map[string]string{
  6835  			"pci-dss":  "true",
  6836  			"database": "mysql",
  6837  			"version":  "5.6",
  6838  		},
  6839  		NodeClass:             "linux-medium-pci",
  6840  		Status:                NodeStatusReady,
  6841  		SchedulingEligibility: NodeSchedulingEligible,
  6842  		Drivers: map[string]*DriverInfo{
  6843  			"mock_driver": {
  6844  				Attributes:        map[string]string{"running": "1"},
  6845  				Detected:          true,
  6846  				Healthy:           true,
  6847  				HealthDescription: "Currently active",
  6848  				UpdateTime:        time.Now(),
  6849  			},
  6850  		},
  6851  	}
  6852  	node.ComputeClass()
  6853  
  6854  	node2 := node.Copy()
  6855  
  6856  	require.Equal(node.Attributes, node2.Attributes)
  6857  	require.Equal(node.Resources, node2.Resources)
  6858  	require.Equal(node.Reserved, node2.Reserved)
  6859  	require.Equal(node.Links, node2.Links)
  6860  	require.Equal(node.Meta, node2.Meta)
  6861  	require.Equal(node.Events, node2.Events)
  6862  	require.Equal(node.DrainStrategy, node2.DrainStrategy)
  6863  	require.Equal(node.Drivers, node2.Drivers)
  6864  }
  6865  
  6866  func TestNode_GetID(t *testing.T) {
  6867  	ci.Parallel(t)
  6868  
  6869  	testCases := []struct {
  6870  		inputNode      *Node
  6871  		expectedOutput string
  6872  		name           string
  6873  	}{
  6874  		{
  6875  			inputNode:      nil,
  6876  			expectedOutput: "",
  6877  			name:           "nil input node",
  6878  		},
  6879  		{
  6880  			inputNode:      &Node{ID: "someid"},
  6881  			expectedOutput: "someid",
  6882  			name:           "nil input node",
  6883  		},
  6884  	}
  6885  
  6886  	for _, tc := range testCases {
  6887  		actualOutput := tc.inputNode.GetID()
  6888  		require.Equal(t, tc.expectedOutput, actualOutput)
  6889  	}
  6890  }
  6891  
  6892  func TestNode_Sanitize(t *testing.T) {
  6893  	ci.Parallel(t)
  6894  
  6895  	require := require.New(t)
  6896  
  6897  	testCases := []*Node{
  6898  		nil,
  6899  		{
  6900  			ID:       uuid.Generate(),
  6901  			SecretID: "",
  6902  		},
  6903  		{
  6904  			ID:       uuid.Generate(),
  6905  			SecretID: uuid.Generate(),
  6906  		},
  6907  	}
  6908  	for _, tc := range testCases {
  6909  		sanitized := tc.Sanitize()
  6910  		if tc == nil {
  6911  			require.Nil(sanitized)
  6912  		} else {
  6913  			require.NotNil(sanitized)
  6914  			require.Empty(sanitized.SecretID)
  6915  		}
  6916  	}
  6917  }
  6918  
  6919  func TestSpread_Validate(t *testing.T) {
  6920  	ci.Parallel(t)
  6921  	type tc struct {
  6922  		spread *Spread
  6923  		err    error
  6924  		name   string
  6925  	}
  6926  	testCases := []tc{
  6927  		{
  6928  			spread: &Spread{},
  6929  			err:    fmt.Errorf("Missing spread attribute"),
  6930  			name:   "empty spread",
  6931  		},
  6932  		{
  6933  			spread: &Spread{
  6934  				Attribute: "${node.datacenter}",
  6935  				Weight:    -1,
  6936  			},
  6937  			err:  fmt.Errorf("Spread block must have a positive weight from 0 to 100"),
  6938  			name: "Invalid weight",
  6939  		},
  6940  		{
  6941  			spread: &Spread{
  6942  				Attribute: "${node.datacenter}",
  6943  				Weight:    110,
  6944  			},
  6945  			err:  fmt.Errorf("Spread block must have a positive weight from 0 to 100"),
  6946  			name: "Invalid weight",
  6947  		},
  6948  		{
  6949  			spread: &Spread{
  6950  				Attribute: "${node.datacenter}",
  6951  				Weight:    50,
  6952  				SpreadTarget: []*SpreadTarget{
  6953  					{
  6954  						Value:   "dc1",
  6955  						Percent: 25,
  6956  					},
  6957  					{
  6958  						Value:   "dc2",
  6959  						Percent: 150,
  6960  					},
  6961  				},
  6962  			},
  6963  			err:  fmt.Errorf("Spread target percentage for value \"dc2\" must be between 0 and 100"),
  6964  			name: "Invalid percentages",
  6965  		},
  6966  		{
  6967  			spread: &Spread{
  6968  				Attribute: "${node.datacenter}",
  6969  				Weight:    50,
  6970  				SpreadTarget: []*SpreadTarget{
  6971  					{
  6972  						Value:   "dc1",
  6973  						Percent: 75,
  6974  					},
  6975  					{
  6976  						Value:   "dc2",
  6977  						Percent: 75,
  6978  					},
  6979  				},
  6980  			},
  6981  			err:  fmt.Errorf("Sum of spread target percentages must not be greater than 100%%; got %d%%", 150),
  6982  			name: "Invalid percentages",
  6983  		},
  6984  		{
  6985  			spread: &Spread{
  6986  				Attribute: "${node.datacenter}",
  6987  				Weight:    50,
  6988  				SpreadTarget: []*SpreadTarget{
  6989  					{
  6990  						Value:   "dc1",
  6991  						Percent: 25,
  6992  					},
  6993  					{
  6994  						Value:   "dc1",
  6995  						Percent: 50,
  6996  					},
  6997  				},
  6998  			},
  6999  			err:  fmt.Errorf("Spread target value \"dc1\" already defined"),
  7000  			name: "No spread targets",
  7001  		},
  7002  		{
  7003  			spread: &Spread{
  7004  				Attribute: "${node.datacenter}",
  7005  				Weight:    50,
  7006  				SpreadTarget: []*SpreadTarget{
  7007  					{
  7008  						Value:   "dc1",
  7009  						Percent: 25,
  7010  					},
  7011  					{
  7012  						Value:   "dc2",
  7013  						Percent: 50,
  7014  					},
  7015  				},
  7016  			},
  7017  			err:  nil,
  7018  			name: "Valid spread",
  7019  		},
  7020  	}
  7021  
  7022  	for _, tc := range testCases {
  7023  		t.Run(tc.name, func(t *testing.T) {
  7024  			err := tc.spread.Validate()
  7025  			if tc.err != nil {
  7026  				require.NotNil(t, err)
  7027  				require.Contains(t, err.Error(), tc.err.Error())
  7028  			} else {
  7029  				require.Nil(t, err)
  7030  			}
  7031  		})
  7032  	}
  7033  }
  7034  
  7035  func TestNodeReservedNetworkResources_ParseReserved(t *testing.T) {
  7036  	ci.Parallel(t)
  7037  
  7038  	require := require.New(t)
  7039  	cases := []struct {
  7040  		Input  string
  7041  		Parsed []uint64
  7042  		Err    bool
  7043  	}{
  7044  		{
  7045  			"1,2,3",
  7046  			[]uint64{1, 2, 3},
  7047  			false,
  7048  		},
  7049  		{
  7050  			"3,1,2,1,2,3,1-3",
  7051  			[]uint64{1, 2, 3},
  7052  			false,
  7053  		},
  7054  		{
  7055  			"3-1",
  7056  			nil,
  7057  			true,
  7058  		},
  7059  		{
  7060  			"1-3,2-4",
  7061  			[]uint64{1, 2, 3, 4},
  7062  			false,
  7063  		},
  7064  		{
  7065  			"1-3,4,5-5,6,7,8-10",
  7066  			[]uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
  7067  			false,
  7068  		},
  7069  	}
  7070  
  7071  	for i, tc := range cases {
  7072  		r := &NodeReservedNetworkResources{ReservedHostPorts: tc.Input}
  7073  		out, err := r.ParseReservedHostPorts()
  7074  		if (err != nil) != tc.Err {
  7075  			t.Fatalf("test case %d: %v", i, err)
  7076  		}
  7077  
  7078  		require.Equal(out, tc.Parsed)
  7079  	}
  7080  }
  7081  
  7082  func TestMultiregion_CopyCanonicalize(t *testing.T) {
  7083  	ci.Parallel(t)
  7084  
  7085  	require := require.New(t)
  7086  
  7087  	emptyOld := &Multiregion{}
  7088  	expected := &Multiregion{
  7089  		Strategy: &MultiregionStrategy{},
  7090  		Regions:  []*MultiregionRegion{},
  7091  	}
  7092  
  7093  	old := emptyOld.Copy()
  7094  	old.Canonicalize()
  7095  	require.Equal(old, expected)
  7096  	require.False(old.Diff(expected))
  7097  
  7098  	nonEmptyOld := &Multiregion{
  7099  		Strategy: &MultiregionStrategy{
  7100  			MaxParallel: 2,
  7101  			OnFailure:   "fail_all",
  7102  		},
  7103  		Regions: []*MultiregionRegion{
  7104  			{
  7105  				Name:        "west",
  7106  				Count:       2,
  7107  				Datacenters: []string{"west-1", "west-2"},
  7108  				Meta:        map[string]string{},
  7109  			},
  7110  			{
  7111  				Name:        "east",
  7112  				Count:       1,
  7113  				Datacenters: []string{"east-1"},
  7114  				Meta:        map[string]string{},
  7115  			},
  7116  		},
  7117  	}
  7118  
  7119  	old = nonEmptyOld.Copy()
  7120  	old.Canonicalize()
  7121  	require.Equal(old, nonEmptyOld)
  7122  	require.False(old.Diff(nonEmptyOld))
  7123  }
  7124  
  7125  func TestNodeResources_Copy(t *testing.T) {
  7126  	ci.Parallel(t)
  7127  
  7128  	orig := &NodeResources{
  7129  		Cpu: NodeCpuResources{
  7130  			CpuShares:          int64(32000),
  7131  			TotalCpuCores:      32,
  7132  			ReservableCpuCores: []uint16{1, 2, 3, 9},
  7133  		},
  7134  		Memory: NodeMemoryResources{
  7135  			MemoryMB: int64(64000),
  7136  		},
  7137  		Networks: Networks{
  7138  			{
  7139  				Device: "foo",
  7140  			},
  7141  		},
  7142  		NodeNetworks: []*NodeNetworkResource{
  7143  			{
  7144  				Mode:       "host",
  7145  				Device:     "eth0",
  7146  				MacAddress: "00:00:00:00:00:00",
  7147  				Speed:      1000,
  7148  				Addresses: []NodeNetworkAddress{
  7149  					{
  7150  						Family:        NodeNetworkAF_IPv4,
  7151  						Alias:         "private",
  7152  						Address:       "192.168.0.100",
  7153  						ReservedPorts: "22,80",
  7154  						Gateway:       "192.168.0.1",
  7155  					},
  7156  				},
  7157  			},
  7158  		},
  7159  	}
  7160  
  7161  	kopy := orig.Copy()
  7162  	assert.Equal(t, orig, kopy)
  7163  
  7164  	// Make sure slices aren't shared
  7165  	kopy.Cpu.ReservableCpuCores[1] = 9000
  7166  	assert.NotEqual(t, orig.Cpu.ReservableCpuCores, kopy.Cpu.ReservableCpuCores)
  7167  
  7168  	kopy.NodeNetworks[0].MacAddress = "11:11:11:11:11:11"
  7169  	kopy.NodeNetworks[0].Addresses[0].Alias = "public"
  7170  	assert.NotEqual(t, orig.NodeNetworks[0], kopy.NodeNetworks[0])
  7171  }
  7172  
  7173  func TestNodeResources_Merge(t *testing.T) {
  7174  	ci.Parallel(t)
  7175  
  7176  	res := &NodeResources{
  7177  		Cpu: NodeCpuResources{
  7178  			CpuShares:     int64(32000),
  7179  			TotalCpuCores: 32,
  7180  		},
  7181  		Memory: NodeMemoryResources{
  7182  			MemoryMB: int64(64000),
  7183  		},
  7184  		Networks: Networks{
  7185  			{
  7186  				Device: "foo",
  7187  			},
  7188  		},
  7189  	}
  7190  
  7191  	res.Merge(&NodeResources{
  7192  		Cpu: NodeCpuResources{ReservableCpuCores: []uint16{0, 1, 2, 3}},
  7193  		Memory: NodeMemoryResources{
  7194  			MemoryMB: int64(100000),
  7195  		},
  7196  		Networks: Networks{
  7197  			{
  7198  				Mode: "foo/bar",
  7199  			},
  7200  		},
  7201  	})
  7202  
  7203  	require.Exactly(t, &NodeResources{
  7204  		Cpu: NodeCpuResources{
  7205  			CpuShares:          int64(32000),
  7206  			TotalCpuCores:      32,
  7207  			ReservableCpuCores: []uint16{0, 1, 2, 3},
  7208  		},
  7209  		Memory: NodeMemoryResources{
  7210  			MemoryMB: int64(100000),
  7211  		},
  7212  		Networks: Networks{
  7213  			{
  7214  				Device: "foo",
  7215  			},
  7216  			{
  7217  				Mode: "foo/bar",
  7218  			},
  7219  		},
  7220  	}, res)
  7221  }
  7222  
  7223  func TestAllocatedPortMapping_Equal(t *testing.T) {
  7224  	ci.Parallel(t)
  7225  
  7226  	must.Equal[*AllocatedPortMapping](t, nil, nil)
  7227  	must.NotEqual[*AllocatedPortMapping](t, nil, new(AllocatedPortMapping))
  7228  
  7229  	must.StructEqual(t, &AllocatedPortMapping{
  7230  		Label:  "http",
  7231  		Value:  80,
  7232  		To:     9000,
  7233  		HostIP: "10.0.0.1",
  7234  	}, []must.Tweak[*AllocatedPortMapping]{{
  7235  		Field: "Label",
  7236  		Apply: func(m *AllocatedPortMapping) { m.Label = "https" },
  7237  	}, {
  7238  		Field: "Value",
  7239  		Apply: func(m *AllocatedPortMapping) { m.Value = 443 },
  7240  	}, {
  7241  		Field: "To",
  7242  		Apply: func(m *AllocatedPortMapping) { m.To = 9999 },
  7243  	}, {
  7244  		Field: "HostIP",
  7245  		Apply: func(m *AllocatedPortMapping) { m.HostIP = "10.1.1.1" },
  7246  	}})
  7247  }
  7248  
  7249  func TestAllocatedResources_Canonicalize(t *testing.T) {
  7250  	ci.Parallel(t)
  7251  
  7252  	cases := map[string]struct {
  7253  		input    *AllocatedResources
  7254  		expected *AllocatedResources
  7255  	}{
  7256  		"base": {
  7257  			input: &AllocatedResources{
  7258  				Tasks: map[string]*AllocatedTaskResources{
  7259  					"task": {
  7260  						Networks: Networks{
  7261  							{
  7262  								IP:           "127.0.0.1",
  7263  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  7264  							},
  7265  						},
  7266  					},
  7267  				},
  7268  			},
  7269  			expected: &AllocatedResources{
  7270  				Tasks: map[string]*AllocatedTaskResources{
  7271  					"task": {
  7272  						Networks: Networks{
  7273  							{
  7274  								IP:           "127.0.0.1",
  7275  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  7276  							},
  7277  						},
  7278  					},
  7279  				},
  7280  				Shared: AllocatedSharedResources{
  7281  					Ports: AllocatedPorts{
  7282  						{
  7283  							Label:  "admin",
  7284  							Value:  8080,
  7285  							To:     0,
  7286  							HostIP: "127.0.0.1",
  7287  						},
  7288  					},
  7289  				},
  7290  			},
  7291  		},
  7292  		"base with existing": {
  7293  			input: &AllocatedResources{
  7294  				Tasks: map[string]*AllocatedTaskResources{
  7295  					"task": {
  7296  						Networks: Networks{
  7297  							{
  7298  								IP:           "127.0.0.1",
  7299  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  7300  							},
  7301  						},
  7302  					},
  7303  				},
  7304  				Shared: AllocatedSharedResources{
  7305  					Ports: AllocatedPorts{
  7306  						{
  7307  							Label:  "http",
  7308  							Value:  80,
  7309  							To:     8080,
  7310  							HostIP: "127.0.0.1",
  7311  						},
  7312  					},
  7313  				},
  7314  			},
  7315  			expected: &AllocatedResources{
  7316  				Tasks: map[string]*AllocatedTaskResources{
  7317  					"task": {
  7318  						Networks: Networks{
  7319  							{
  7320  								IP:           "127.0.0.1",
  7321  								DynamicPorts: []Port{{"admin", 8080, 0, "default"}},
  7322  							},
  7323  						},
  7324  					},
  7325  				},
  7326  				Shared: AllocatedSharedResources{
  7327  					Ports: AllocatedPorts{
  7328  						{
  7329  							Label:  "http",
  7330  							Value:  80,
  7331  							To:     8080,
  7332  							HostIP: "127.0.0.1",
  7333  						},
  7334  						{
  7335  							Label:  "admin",
  7336  							Value:  8080,
  7337  							To:     0,
  7338  							HostIP: "127.0.0.1",
  7339  						},
  7340  					},
  7341  				},
  7342  			},
  7343  		},
  7344  	}
  7345  	for name, tc := range cases {
  7346  		tc.input.Canonicalize()
  7347  		require.Exactly(t, tc.expected, tc.input, "case %s did not match", name)
  7348  	}
  7349  }
  7350  
  7351  func TestAllocatedSharedResources_Canonicalize(t *testing.T) {
  7352  	ci.Parallel(t)
  7353  
  7354  	a := &AllocatedSharedResources{
  7355  		Networks: []*NetworkResource{
  7356  			{
  7357  				IP: "127.0.0.1",
  7358  				DynamicPorts: []Port{
  7359  					{
  7360  						Label: "http",
  7361  						Value: 22222,
  7362  						To:    8080,
  7363  					},
  7364  				},
  7365  				ReservedPorts: []Port{
  7366  					{
  7367  						Label: "redis",
  7368  						Value: 6783,
  7369  						To:    6783,
  7370  					},
  7371  				},
  7372  			},
  7373  		},
  7374  	}
  7375  
  7376  	a.Canonicalize()
  7377  	require.Exactly(t, AllocatedPorts{
  7378  		{
  7379  			Label:  "http",
  7380  			Value:  22222,
  7381  			To:     8080,
  7382  			HostIP: "127.0.0.1",
  7383  		},
  7384  		{
  7385  			Label:  "redis",
  7386  			Value:  6783,
  7387  			To:     6783,
  7388  			HostIP: "127.0.0.1",
  7389  		},
  7390  	}, a.Ports)
  7391  }
  7392  
  7393  func TestTaskGroup_validateScriptChecksInGroupServices(t *testing.T) {
  7394  	ci.Parallel(t)
  7395  
  7396  	t.Run("service task not set", func(t *testing.T) {
  7397  		tg := &TaskGroup{
  7398  			Name: "group1",
  7399  			Services: []*Service{{
  7400  				Name:     "service1",
  7401  				TaskName: "", // unset
  7402  				Checks: []*ServiceCheck{{
  7403  					Name:     "check1",
  7404  					Type:     "script",
  7405  					TaskName: "", // unset
  7406  				}, {
  7407  					Name: "check2",
  7408  					Type: "ttl", // not script
  7409  				}, {
  7410  					Name:     "check3",
  7411  					Type:     "script",
  7412  					TaskName: "", // unset
  7413  				}},
  7414  			}, {
  7415  				Name: "service2",
  7416  				Checks: []*ServiceCheck{{
  7417  					Type:     "script",
  7418  					TaskName: "task1", // set
  7419  				}},
  7420  			}, {
  7421  				Name:     "service3",
  7422  				TaskName: "", // unset
  7423  				Checks: []*ServiceCheck{{
  7424  					Name:     "check1",
  7425  					Type:     "script",
  7426  					TaskName: "", // unset
  7427  				}},
  7428  			}},
  7429  		}
  7430  
  7431  		errStr := tg.validateScriptChecksInGroupServices().Error()
  7432  		require.Contains(t, errStr, "Service [group1]->service1 or Check check1 must specify task parameter")
  7433  		require.Contains(t, errStr, "Service [group1]->service1 or Check check3 must specify task parameter")
  7434  		require.Contains(t, errStr, "Service [group1]->service3 or Check check1 must specify task parameter")
  7435  	})
  7436  
  7437  	t.Run("service task set", func(t *testing.T) {
  7438  		tgOK := &TaskGroup{
  7439  			Name: "group1",
  7440  			Services: []*Service{{
  7441  				Name:     "service1",
  7442  				TaskName: "task1",
  7443  				Checks: []*ServiceCheck{{
  7444  					Name: "check1",
  7445  					Type: "script",
  7446  				}, {
  7447  					Name: "check2",
  7448  					Type: "ttl",
  7449  				}, {
  7450  					Name: "check3",
  7451  					Type: "script",
  7452  				}},
  7453  			}},
  7454  		}
  7455  
  7456  		mErrOK := tgOK.validateScriptChecksInGroupServices()
  7457  		require.Nil(t, mErrOK)
  7458  	})
  7459  }
  7460  
  7461  func TestComparableResources_Superset(t *testing.T) {
  7462  	ci.Parallel(t)
  7463  
  7464  	base := &ComparableResources{
  7465  		Flattened: AllocatedTaskResources{
  7466  			Cpu: AllocatedCpuResources{
  7467  				CpuShares:     4000,
  7468  				ReservedCores: []uint16{0, 1, 2, 3},
  7469  			},
  7470  			Memory: AllocatedMemoryResources{MemoryMB: 4096},
  7471  		},
  7472  		Shared: AllocatedSharedResources{DiskMB: 10000},
  7473  	}
  7474  	cases := []struct {
  7475  		a         *ComparableResources
  7476  		b         *ComparableResources
  7477  		dimension string
  7478  	}{
  7479  		{
  7480  			a: base,
  7481  			b: &ComparableResources{
  7482  				Flattened: AllocatedTaskResources{
  7483  					Cpu: AllocatedCpuResources{CpuShares: 1000, ReservedCores: []uint16{0}},
  7484  				},
  7485  			},
  7486  		},
  7487  		{
  7488  			a: base,
  7489  			b: &ComparableResources{
  7490  				Flattened: AllocatedTaskResources{
  7491  					Cpu: AllocatedCpuResources{CpuShares: 4000, ReservedCores: []uint16{0, 1, 2, 3}},
  7492  				},
  7493  			},
  7494  		},
  7495  		{
  7496  			a: base,
  7497  			b: &ComparableResources{
  7498  				Flattened: AllocatedTaskResources{
  7499  					Cpu: AllocatedCpuResources{CpuShares: 5000},
  7500  				},
  7501  			},
  7502  			dimension: "cpu",
  7503  		},
  7504  		{
  7505  			a: base,
  7506  			b: &ComparableResources{
  7507  				Flattened: AllocatedTaskResources{
  7508  					Cpu: AllocatedCpuResources{CpuShares: 1000, ReservedCores: []uint16{3, 4}},
  7509  				},
  7510  			},
  7511  			dimension: "cores",
  7512  		},
  7513  	}
  7514  
  7515  	for _, c := range cases {
  7516  		fit, dim := c.a.Superset(c.b)
  7517  		if c.dimension == "" {
  7518  			require.True(t, fit)
  7519  		} else {
  7520  			require.False(t, fit)
  7521  			require.Equal(t, c.dimension, dim)
  7522  		}
  7523  	}
  7524  }
  7525  
  7526  func requireErrors(t *testing.T, err error, expected ...string) {
  7527  	t.Helper()
  7528  	require.Error(t, err)
  7529  	mErr, ok := err.(*multierror.Error)
  7530  	require.True(t, ok)
  7531  
  7532  	var found []string
  7533  	for _, e := range expected {
  7534  		for _, actual := range mErr.Errors {
  7535  			if strings.Contains(actual.Error(), e) {
  7536  				found = append(found, e)
  7537  				break
  7538  			}
  7539  		}
  7540  	}
  7541  
  7542  	require.Equal(t, expected, found)
  7543  }
  7544  
  7545  func TestEphemeralDisk_Equal(t *testing.T) {
  7546  	ci.Parallel(t)
  7547  
  7548  	must.Equal[*EphemeralDisk](t, nil, nil)
  7549  	must.NotEqual[*EphemeralDisk](t, nil, new(EphemeralDisk))
  7550  
  7551  	must.StructEqual(t, &EphemeralDisk{
  7552  		Sticky:  true,
  7553  		SizeMB:  42,
  7554  		Migrate: true,
  7555  	}, []must.Tweak[*EphemeralDisk]{{
  7556  		Field: "Sticky",
  7557  		Apply: func(e *EphemeralDisk) { e.Sticky = false },
  7558  	}, {
  7559  		Field: "SizeMB",
  7560  		Apply: func(e *EphemeralDisk) { e.SizeMB = 10 },
  7561  	}, {
  7562  		Field: "Migrate",
  7563  		Apply: func(e *EphemeralDisk) { e.Migrate = false },
  7564  	}})
  7565  }
  7566  
  7567  func TestDNSConfig_Equal(t *testing.T) {
  7568  	ci.Parallel(t)
  7569  
  7570  	must.Equal[*DNSConfig](t, nil, nil)
  7571  	must.NotEqual[*DNSConfig](t, nil, new(DNSConfig))
  7572  
  7573  	must.StructEqual(t, &DNSConfig{
  7574  		Servers:  []string{"8.8.8.8", "8.8.4.4"},
  7575  		Searches: []string{"org", "local"},
  7576  		Options:  []string{"opt1"},
  7577  	}, []must.Tweak[*DNSConfig]{{
  7578  		Field: "Servers",
  7579  		Apply: func(c *DNSConfig) { c.Servers = []string{"1.1.1.1"} },
  7580  	}, {
  7581  		Field: "Searches",
  7582  		Apply: func(c *DNSConfig) { c.Searches = []string{"localhost"} },
  7583  	}, {
  7584  		Field: "Options",
  7585  		Apply: func(c *DNSConfig) { c.Options = []string{"opt2"} },
  7586  	}})
  7587  }
  7588  
  7589  func TestChangeScript_Equal(t *testing.T) {
  7590  	ci.Parallel(t)
  7591  
  7592  	must.Equal[*ChangeScript](t, nil, nil)
  7593  	must.NotEqual[*ChangeScript](t, nil, new(ChangeScript))
  7594  
  7595  	must.StructEqual(t, &ChangeScript{
  7596  		Command:     "/bin/sleep",
  7597  		Args:        []string{"infinity"},
  7598  		Timeout:     1 * time.Second,
  7599  		FailOnError: true,
  7600  	}, []must.Tweak[*ChangeScript]{{
  7601  		Field: "Command",
  7602  		Apply: func(c *ChangeScript) { c.Command = "/bin/false" },
  7603  	}, {
  7604  		Field: "Args",
  7605  		Apply: func(c *ChangeScript) { c.Args = []string{"1s"} },
  7606  	}, {
  7607  		Field: "Timeout",
  7608  		Apply: func(c *ChangeScript) { c.Timeout = 2 * time.Second },
  7609  	}, {
  7610  		Field: "FailOnError",
  7611  		Apply: func(c *ChangeScript) { c.FailOnError = false },
  7612  	}})
  7613  }
  7614  
  7615  func TestWaitConfig_Equal(t *testing.T) {
  7616  	ci.Parallel(t)
  7617  
  7618  	must.Equal[*WaitConfig](t, nil, nil)
  7619  	must.NotEqual[*WaitConfig](t, nil, new(WaitConfig))
  7620  
  7621  	must.StructEqual(t, &WaitConfig{
  7622  		Min: pointer.Of[time.Duration](100),
  7623  		Max: pointer.Of[time.Duration](200),
  7624  	}, []must.Tweak[*WaitConfig]{{
  7625  		Field: "Min",
  7626  		Apply: func(c *WaitConfig) { c.Min = pointer.Of[time.Duration](111) },
  7627  	}, {
  7628  		Field: "Max",
  7629  		Apply: func(c *WaitConfig) { c.Max = pointer.Of[time.Duration](222) },
  7630  	}})
  7631  }
  7632  
  7633  func TestTaskArtifact_Equal(t *testing.T) {
  7634  	ci.Parallel(t)
  7635  
  7636  	must.Equal[*TaskArtifact](t, nil, nil)
  7637  	must.NotEqual[*TaskArtifact](t, nil, new(TaskArtifact))
  7638  
  7639  	must.StructEqual(t, &TaskArtifact{
  7640  		GetterSource:  "source",
  7641  		GetterOptions: map[string]string{"a": "A"},
  7642  		GetterHeaders: map[string]string{"b": "B"},
  7643  		GetterMode:    "file",
  7644  		RelativeDest:  "./local",
  7645  	}, []must.Tweak[*TaskArtifact]{{
  7646  		Field: "GetterSource",
  7647  		Apply: func(ta *TaskArtifact) { ta.GetterSource = "other" },
  7648  	}, {
  7649  		Field: "GetterOptions",
  7650  		Apply: func(ta *TaskArtifact) { ta.GetterOptions = nil },
  7651  	}, {
  7652  		Field: "GetterHeaders",
  7653  		Apply: func(ta *TaskArtifact) { ta.GetterHeaders = nil },
  7654  	}, {
  7655  		Field: "GetterMode",
  7656  		Apply: func(ta *TaskArtifact) { ta.GetterMode = "directory" },
  7657  	}, {
  7658  		Field: "RelativeDest",
  7659  		Apply: func(ta *TaskArtifact) { ta.RelativeDest = "./alloc" },
  7660  	}})
  7661  }
  7662  
  7663  func TestVault_Equal(t *testing.T) {
  7664  	ci.Parallel(t)
  7665  
  7666  	must.Equal[*Vault](t, nil, nil)
  7667  	must.NotEqual[*Vault](t, nil, new(Vault))
  7668  
  7669  	must.StructEqual(t, &Vault{
  7670  		Policies:     []string{"one"},
  7671  		Namespace:    "global",
  7672  		Env:          true,
  7673  		ChangeMode:   "signal",
  7674  		ChangeSignal: "SIGILL",
  7675  	}, []must.Tweak[*Vault]{{
  7676  		Field: "Policies",
  7677  		Apply: func(v *Vault) { v.Policies = []string{"two"} },
  7678  	}, {
  7679  		Field: "Namespace",
  7680  		Apply: func(v *Vault) { v.Namespace = "regional" },
  7681  	}, {
  7682  		Field: "Env",
  7683  		Apply: func(v *Vault) { v.Env = false },
  7684  	}, {
  7685  		Field: "ChangeMode",
  7686  		Apply: func(v *Vault) { v.ChangeMode = "restart" },
  7687  	}, {
  7688  		Field: "ChangeSignal",
  7689  		Apply: func(v *Vault) { v.ChangeSignal = "SIGTERM" },
  7690  	}})
  7691  }
  7692  
  7693  func TestTemplate_Equal(t *testing.T) {
  7694  	ci.Parallel(t)
  7695  
  7696  	must.Equal[*Template](t, nil, nil)
  7697  	must.NotEqual[*Template](t, nil, new(Template))
  7698  
  7699  	must.StructEqual(t, &Template{
  7700  		SourcePath:   "source",
  7701  		DestPath:     "destination",
  7702  		EmbeddedTmpl: "tmpl",
  7703  		ChangeMode:   "mode",
  7704  		ChangeSignal: "signal",
  7705  		ChangeScript: &ChangeScript{
  7706  			Command:     "/bin/sleep",
  7707  			Args:        []string{"infinity"},
  7708  			Timeout:     1 * time.Second,
  7709  			FailOnError: true,
  7710  		},
  7711  		Splay:      1,
  7712  		Perms:      "perms",
  7713  		Uid:        pointer.Of(1000),
  7714  		Gid:        pointer.Of(1000),
  7715  		LeftDelim:  "{",
  7716  		RightDelim: "}",
  7717  		Envvars:    true,
  7718  		VaultGrace: 1 * time.Second,
  7719  		Wait: &WaitConfig{
  7720  			Min: pointer.Of[time.Duration](1),
  7721  			Max: pointer.Of[time.Duration](2),
  7722  		},
  7723  		ErrMissingKey: true,
  7724  	}, []must.Tweak[*Template]{{
  7725  		Field: "SourcePath",
  7726  		Apply: func(t *Template) { t.SourcePath = "source2" },
  7727  	}, {
  7728  		Field: "DestPath",
  7729  		Apply: func(t *Template) { t.DestPath = "destination2" },
  7730  	}, {
  7731  		Field: "EmbeddedTmpl",
  7732  		Apply: func(t *Template) { t.EmbeddedTmpl = "tmpl2" },
  7733  	}, {
  7734  		Field: "ChangeMode",
  7735  		Apply: func(t *Template) { t.ChangeMode = "mode2" },
  7736  	}, {
  7737  		Field: "ChangeSignal",
  7738  		Apply: func(t *Template) { t.ChangeSignal = "signal2" },
  7739  	}, {
  7740  		Field: "ChangeScript",
  7741  		Apply: func(t *Template) {
  7742  			t.ChangeScript = &ChangeScript{
  7743  				Command:     "/bin/sleep",
  7744  				Args:        []string{"infinity", "plus", "one"},
  7745  				Timeout:     1 * time.Second,
  7746  				FailOnError: true,
  7747  			}
  7748  		},
  7749  	}, {
  7750  		Field: "Splay",
  7751  		Apply: func(t *Template) { t.Splay = 2 },
  7752  	}, {
  7753  		Field: "Perms",
  7754  		Apply: func(t *Template) { t.Perms = "perms2" },
  7755  	}, {
  7756  		Field: "Uid",
  7757  		Apply: func(t *Template) { t.Uid = pointer.Of(0) },
  7758  	}, {
  7759  		Field: "Gid",
  7760  		Apply: func(t *Template) { t.Gid = pointer.Of(0) },
  7761  	}, {
  7762  		Field: "LeftDelim",
  7763  		Apply: func(t *Template) { t.LeftDelim = "[" },
  7764  	}, {
  7765  		Field: "RightDelim",
  7766  		Apply: func(t *Template) { t.RightDelim = "]" },
  7767  	}, {
  7768  		Field: "Envvars",
  7769  		Apply: func(t *Template) { t.Envvars = false },
  7770  	}, {
  7771  		Field: "VaultGrace",
  7772  		Apply: func(t *Template) { t.VaultGrace = 2 * time.Second },
  7773  	}, {
  7774  		Field: "Wait",
  7775  		Apply: func(t *Template) {
  7776  			t.Wait = &WaitConfig{
  7777  				Min: pointer.Of[time.Duration](1),
  7778  				Max: nil,
  7779  			}
  7780  		},
  7781  	}, {
  7782  		Field: "ErrMissingKey",
  7783  		Apply: func(t *Template) { t.ErrMissingKey = false },
  7784  	}})
  7785  }
  7786  
  7787  func TestAffinity_Equal(t *testing.T) {
  7788  	ci.Parallel(t)
  7789  
  7790  	must.Equal[*Affinity](t, nil, nil)
  7791  	must.NotEqual[*Affinity](t, nil, new(Affinity))
  7792  
  7793  	must.StructEqual(t, &Affinity{
  7794  		LTarget: "left",
  7795  		RTarget: "right",
  7796  		Operand: "op",
  7797  		Weight:  100,
  7798  	}, []must.Tweak[*Affinity]{{
  7799  		Field: "LTarget",
  7800  		Apply: func(a *Affinity) { a.LTarget = "left2" },
  7801  	}, {
  7802  		Field: "RTarget",
  7803  		Apply: func(a *Affinity) { a.RTarget = "right2" },
  7804  	}, {
  7805  		Field: "Operand",
  7806  		Apply: func(a *Affinity) { a.Operand = "op2" },
  7807  	}, {
  7808  		Field: "Weight",
  7809  		Apply: func(a *Affinity) { a.Weight = 50 },
  7810  	}})
  7811  }
  7812  
  7813  func TestSpreadTarget_Equal(t *testing.T) {
  7814  	ci.Parallel(t)
  7815  
  7816  	must.Equal[*SpreadTarget](t, nil, nil)
  7817  	must.NotEqual[*SpreadTarget](t, nil, new(SpreadTarget))
  7818  
  7819  	must.StructEqual(t, &SpreadTarget{
  7820  		Value:   "dc1",
  7821  		Percent: 99,
  7822  	}, []must.Tweak[*SpreadTarget]{{
  7823  		Field: "Value",
  7824  		Apply: func(st *SpreadTarget) { st.Value = "dc2" },
  7825  	}, {
  7826  		Field: "Percent",
  7827  		Apply: func(st *SpreadTarget) { st.Percent = 98 },
  7828  	}})
  7829  }
  7830  
  7831  func TestSpread_Equal(t *testing.T) {
  7832  	ci.Parallel(t)
  7833  
  7834  	must.Equal[*Spread](t, nil, nil)
  7835  	must.NotEqual[*Spread](t, nil, new(Spread))
  7836  
  7837  	must.StructEqual(t, &Spread{
  7838  		Attribute: "attr",
  7839  		Weight:    100,
  7840  		SpreadTarget: []*SpreadTarget{{
  7841  			Value:   "dc1",
  7842  			Percent: 99,
  7843  		}},
  7844  	}, []must.Tweak[*Spread]{{
  7845  		Field: "Attribute",
  7846  		Apply: func(s *Spread) { s.Attribute = "attr2" },
  7847  	}, {
  7848  		Field: "Weight",
  7849  		Apply: func(s *Spread) { s.Weight = 50 },
  7850  	}, {
  7851  		Field: "SpreadTarget",
  7852  		Apply: func(s *Spread) { s.SpreadTarget = nil },
  7853  	}})
  7854  }