github.com/hashicorp/nomad/api@v0.0.0-20240306165712-3193ac204f65/tasks_test.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package api
     5  
     6  import (
     7  	"path/filepath"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/hashicorp/nomad/api/internal/testutil"
    12  	"github.com/shoenig/test/must"
    13  )
    14  
    15  func TestTaskGroup_NewTaskGroup(t *testing.T) {
    16  	testutil.Parallel(t)
    17  
    18  	grp := NewTaskGroup("grp1", 2)
    19  	expect := &TaskGroup{
    20  		Name:  pointerOf("grp1"),
    21  		Count: pointerOf(2),
    22  	}
    23  	must.Eq(t, expect, grp)
    24  }
    25  
    26  func TestTaskGroup_Constrain(t *testing.T) {
    27  	testutil.Parallel(t)
    28  
    29  	grp := NewTaskGroup("grp1", 1)
    30  
    31  	// Add a constraint to the group
    32  	out := grp.Constrain(NewConstraint("kernel.name", "=", "darwin"))
    33  	must.Len(t, 1, grp.Constraints)
    34  
    35  	// Check that the group was returned
    36  	must.Eq(t, grp, out)
    37  
    38  	// Add a second constraint
    39  	grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
    40  	expect := []*Constraint{
    41  		{
    42  			LTarget: "kernel.name",
    43  			RTarget: "darwin",
    44  			Operand: "=",
    45  		},
    46  		{
    47  			LTarget: "memory.totalbytes",
    48  			RTarget: "128000000",
    49  			Operand: ">=",
    50  		},
    51  	}
    52  	must.Eq(t, expect, grp.Constraints)
    53  }
    54  
    55  func TestTaskGroup_AddAffinity(t *testing.T) {
    56  	testutil.Parallel(t)
    57  
    58  	grp := NewTaskGroup("grp1", 1)
    59  
    60  	// Add an affinity to the group
    61  	out := grp.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
    62  	must.Len(t, 1, grp.Affinities)
    63  
    64  	// Check that the group was returned
    65  	must.Eq(t, grp, out)
    66  
    67  	// Add a second affinity
    68  	grp.AddAffinity(NewAffinity("${node.affinity}", "=", "dc2", 50))
    69  	expect := []*Affinity{
    70  		{
    71  			LTarget: "kernel.version",
    72  			RTarget: "4.6",
    73  			Operand: "=",
    74  			Weight:  pointerOf(int8(100)),
    75  		},
    76  		{
    77  			LTarget: "${node.affinity}",
    78  			RTarget: "dc2",
    79  			Operand: "=",
    80  			Weight:  pointerOf(int8(50)),
    81  		},
    82  	}
    83  	must.Eq(t, expect, grp.Affinities)
    84  }
    85  
    86  func TestTaskGroup_SetMeta(t *testing.T) {
    87  	testutil.Parallel(t)
    88  
    89  	grp := NewTaskGroup("grp1", 1)
    90  
    91  	// Initializes an empty map
    92  	out := grp.SetMeta("foo", "bar")
    93  	must.NotNil(t, grp.Meta)
    94  
    95  	// Check that we returned the group
    96  	must.Eq(t, grp, out)
    97  
    98  	// Add a second meta k/v
    99  	grp.SetMeta("baz", "zip")
   100  	expect := map[string]string{"foo": "bar", "baz": "zip"}
   101  	must.Eq(t, expect, grp.Meta)
   102  }
   103  
   104  func TestTaskGroup_AddSpread(t *testing.T) {
   105  	testutil.Parallel(t)
   106  
   107  	grp := NewTaskGroup("grp1", 1)
   108  
   109  	// Create and add spread
   110  	spreadTarget := NewSpreadTarget("r1", 50)
   111  	spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
   112  
   113  	out := grp.AddSpread(spread)
   114  	must.Len(t, 1, grp.Spreads)
   115  
   116  	// Check that the group was returned
   117  	must.Eq(t, grp, out)
   118  
   119  	// Add a second spread
   120  	spreadTarget2 := NewSpreadTarget("dc1", 100)
   121  	spread2 := NewSpread("${node.datacenter}", 100, []*SpreadTarget{spreadTarget2})
   122  
   123  	grp.AddSpread(spread2)
   124  
   125  	expect := []*Spread{
   126  		{
   127  			Attribute: "${meta.rack}",
   128  			Weight:    pointerOf(int8(100)),
   129  			SpreadTarget: []*SpreadTarget{
   130  				{
   131  					Value:   "r1",
   132  					Percent: 50,
   133  				},
   134  			},
   135  		},
   136  		{
   137  			Attribute: "${node.datacenter}",
   138  			Weight:    pointerOf(int8(100)),
   139  			SpreadTarget: []*SpreadTarget{
   140  				{
   141  					Value:   "dc1",
   142  					Percent: 100,
   143  				},
   144  			},
   145  		},
   146  	}
   147  	must.Eq(t, expect, grp.Spreads)
   148  }
   149  
   150  func TestTaskGroup_AddTask(t *testing.T) {
   151  	testutil.Parallel(t)
   152  
   153  	grp := NewTaskGroup("grp1", 1)
   154  
   155  	// Add the task to the task group
   156  	out := grp.AddTask(NewTask("task1", "java"))
   157  	must.Len(t, 1, out.Tasks)
   158  
   159  	// Check that we returned the group
   160  	must.Eq(t, grp, out)
   161  
   162  	// Add a second task
   163  	grp.AddTask(NewTask("task2", "exec"))
   164  	expect := []*Task{
   165  		{
   166  			Name:   "task1",
   167  			Driver: "java",
   168  		},
   169  		{
   170  			Name:   "task2",
   171  			Driver: "exec",
   172  		},
   173  	}
   174  	must.Eq(t, expect, grp.Tasks)
   175  }
   176  
   177  func TestTask_NewTask(t *testing.T) {
   178  	testutil.Parallel(t)
   179  
   180  	task := NewTask("task1", "exec")
   181  	expect := &Task{
   182  		Name:   "task1",
   183  		Driver: "exec",
   184  	}
   185  	must.Eq(t, expect, task)
   186  }
   187  
   188  func TestTask_SetConfig(t *testing.T) {
   189  	testutil.Parallel(t)
   190  
   191  	task := NewTask("task1", "exec")
   192  
   193  	// Initializes an empty map
   194  	out := task.SetConfig("foo", "bar")
   195  	must.NotNil(t, task.Config)
   196  
   197  	// Check that we returned the task
   198  	must.Eq(t, task, out)
   199  
   200  	// Set another config value
   201  	task.SetConfig("baz", "zip")
   202  	expect := map[string]interface{}{"foo": "bar", "baz": "zip"}
   203  	must.Eq(t, expect, task.Config)
   204  }
   205  
   206  func TestTask_SetMeta(t *testing.T) {
   207  	testutil.Parallel(t)
   208  
   209  	task := NewTask("task1", "exec")
   210  
   211  	// Initializes an empty map
   212  	out := task.SetMeta("foo", "bar")
   213  	must.NotNil(t, out)
   214  
   215  	// Check that we returned the task
   216  	must.Eq(t, task, out)
   217  
   218  	// Set another meta k/v
   219  	task.SetMeta("baz", "zip")
   220  	expect := map[string]string{"foo": "bar", "baz": "zip"}
   221  	must.Eq(t, expect, task.Meta)
   222  }
   223  
   224  func TestTask_Require(t *testing.T) {
   225  	testutil.Parallel(t)
   226  
   227  	task := NewTask("task1", "exec")
   228  
   229  	// Create some require resources
   230  	resources := &Resources{
   231  		CPU:      pointerOf(1250),
   232  		MemoryMB: pointerOf(128),
   233  		DiskMB:   pointerOf(2048),
   234  		Networks: []*NetworkResource{
   235  			{
   236  				CIDR:          "0.0.0.0/0",
   237  				MBits:         pointerOf(100),
   238  				ReservedPorts: []Port{{"", 80, 0, ""}, {"", 443, 0, ""}},
   239  			},
   240  		},
   241  	}
   242  	out := task.Require(resources)
   243  	must.Eq(t, resources, task.Resources)
   244  
   245  	// Check that we returned the task
   246  	must.Eq(t, task, out)
   247  }
   248  
   249  func TestTask_Constrain(t *testing.T) {
   250  	testutil.Parallel(t)
   251  
   252  	task := NewTask("task1", "exec")
   253  
   254  	// Add a constraint to the task
   255  	out := task.Constrain(NewConstraint("kernel.name", "=", "darwin"))
   256  	must.Len(t, 1, task.Constraints)
   257  
   258  	// Check that the task was returned
   259  	must.Eq(t, task, out)
   260  
   261  	// Add a second constraint
   262  	task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
   263  	expect := []*Constraint{
   264  		{
   265  			LTarget: "kernel.name",
   266  			RTarget: "darwin",
   267  			Operand: "=",
   268  		},
   269  		{
   270  			LTarget: "memory.totalbytes",
   271  			RTarget: "128000000",
   272  			Operand: ">=",
   273  		},
   274  	}
   275  	must.Eq(t, expect, task.Constraints)
   276  }
   277  
   278  func TestTask_AddAffinity(t *testing.T) {
   279  	testutil.Parallel(t)
   280  
   281  	task := NewTask("task1", "exec")
   282  
   283  	// Add an affinity to the task
   284  	out := task.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
   285  	must.Len(t, 1, out.Affinities)
   286  
   287  	// Check that the task was returned
   288  	must.Eq(t, task, out)
   289  
   290  	// Add a second affinity
   291  	task.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
   292  	expect := []*Affinity{
   293  		{
   294  			LTarget: "kernel.version",
   295  			RTarget: "4.6",
   296  			Operand: "=",
   297  			Weight:  pointerOf(int8(100)),
   298  		},
   299  		{
   300  			LTarget: "${node.datacenter}",
   301  			RTarget: "dc2",
   302  			Operand: "=",
   303  			Weight:  pointerOf(int8(50)),
   304  		},
   305  	}
   306  	must.Eq(t, expect, task.Affinities)
   307  }
   308  
   309  func TestTask_Artifact(t *testing.T) {
   310  	testutil.Parallel(t)
   311  
   312  	a := TaskArtifact{
   313  		GetterSource:  pointerOf("http://localhost/foo.txt"),
   314  		GetterMode:    pointerOf("file"),
   315  		GetterHeaders: make(map[string]string),
   316  		GetterOptions: make(map[string]string),
   317  	}
   318  	a.Canonicalize()
   319  	must.Eq(t, "file", *a.GetterMode)
   320  	must.Eq(t, "local/foo.txt", filepath.ToSlash(*a.RelativeDest))
   321  	must.Nil(t, a.GetterOptions)
   322  	must.Nil(t, a.GetterHeaders)
   323  }
   324  
   325  func TestTask_VolumeMount(t *testing.T) {
   326  	testutil.Parallel(t)
   327  
   328  	vm := new(VolumeMount)
   329  	vm.Canonicalize()
   330  	must.NotNil(t, vm.PropagationMode)
   331  	must.Eq(t, "private", *vm.PropagationMode)
   332  }
   333  
   334  func TestTask_Canonicalize_TaskLifecycle(t *testing.T) {
   335  	testutil.Parallel(t)
   336  
   337  	testCases := []struct {
   338  		name     string
   339  		expected *TaskLifecycle
   340  		task     *Task
   341  	}{
   342  		{
   343  			name: "empty",
   344  			task: &Task{
   345  				Lifecycle: &TaskLifecycle{},
   346  			},
   347  			expected: nil,
   348  		},
   349  	}
   350  
   351  	for _, tc := range testCases {
   352  		t.Run(tc.name, func(t *testing.T) {
   353  			tg := &TaskGroup{
   354  				Name: pointerOf("foo"),
   355  			}
   356  			j := &Job{
   357  				ID: pointerOf("test"),
   358  			}
   359  			tc.task.Canonicalize(tg, j)
   360  			must.Eq(t, tc.expected, tc.task.Lifecycle)
   361  		})
   362  	}
   363  }
   364  
   365  func TestTask_Template_WaitConfig_Canonicalize_and_Copy(t *testing.T) {
   366  	testutil.Parallel(t)
   367  
   368  	taskWithWait := func(wc *WaitConfig) *Task {
   369  		return &Task{
   370  			Templates: []*Template{
   371  				{
   372  					Wait: wc,
   373  				},
   374  			},
   375  		}
   376  	}
   377  
   378  	testCases := []struct {
   379  		name          string
   380  		canonicalized *WaitConfig
   381  		copied        *WaitConfig
   382  		task          *Task
   383  	}{
   384  		{
   385  			name: "all-fields",
   386  			task: taskWithWait(&WaitConfig{
   387  				Min: pointerOf(time.Duration(5)),
   388  				Max: pointerOf(time.Duration(10)),
   389  			}),
   390  			canonicalized: &WaitConfig{
   391  				Min: pointerOf(time.Duration(5)),
   392  				Max: pointerOf(time.Duration(10)),
   393  			},
   394  			copied: &WaitConfig{
   395  				Min: pointerOf(time.Duration(5)),
   396  				Max: pointerOf(time.Duration(10)),
   397  			},
   398  		},
   399  		{
   400  			name: "no-fields",
   401  			task: taskWithWait(&WaitConfig{}),
   402  			canonicalized: &WaitConfig{
   403  				Min: nil,
   404  				Max: nil,
   405  			},
   406  			copied: &WaitConfig{
   407  				Min: nil,
   408  				Max: nil,
   409  			},
   410  		},
   411  		{
   412  			name: "min-only",
   413  			task: taskWithWait(&WaitConfig{
   414  				Min: pointerOf(time.Duration(5)),
   415  			}),
   416  			canonicalized: &WaitConfig{
   417  				Min: pointerOf(time.Duration(5)),
   418  			},
   419  			copied: &WaitConfig{
   420  				Min: pointerOf(time.Duration(5)),
   421  			},
   422  		},
   423  		{
   424  			name: "max-only",
   425  			task: taskWithWait(&WaitConfig{
   426  				Max: pointerOf(time.Duration(10)),
   427  			}),
   428  			canonicalized: &WaitConfig{
   429  				Max: pointerOf(time.Duration(10)),
   430  			},
   431  			copied: &WaitConfig{
   432  				Max: pointerOf(time.Duration(10)),
   433  			},
   434  		},
   435  	}
   436  
   437  	for _, tc := range testCases {
   438  		t.Run(tc.name, func(t *testing.T) {
   439  			tg := &TaskGroup{
   440  				Name: pointerOf("foo"),
   441  			}
   442  			j := &Job{
   443  				ID: pointerOf("test"),
   444  			}
   445  			must.Eq(t, tc.copied, tc.task.Templates[0].Wait.Copy())
   446  			tc.task.Canonicalize(tg, j)
   447  			must.Eq(t, tc.canonicalized, tc.task.Templates[0].Wait)
   448  		})
   449  	}
   450  }
   451  
   452  func TestTask_Canonicalize_Vault(t *testing.T) {
   453  	testCases := []struct {
   454  		name     string
   455  		input    *Vault
   456  		expected *Vault
   457  	}{
   458  		{
   459  			name:  "empty",
   460  			input: &Vault{},
   461  			expected: &Vault{
   462  				Env:                  pointerOf(true),
   463  				DisableFile:          pointerOf(false),
   464  				Namespace:            pointerOf(""),
   465  				Cluster:              "default",
   466  				ChangeMode:           pointerOf("restart"),
   467  				ChangeSignal:         pointerOf("SIGHUP"),
   468  				AllowTokenExpiration: pointerOf(false),
   469  			},
   470  		},
   471  	}
   472  
   473  	for _, tc := range testCases {
   474  		t.Run(tc.name, func(t *testing.T) {
   475  			tc.input.Canonicalize()
   476  			must.Eq(t, tc.expected, tc.input)
   477  		})
   478  	}
   479  }
   480  
   481  // Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
   482  func TestTaskGroup_Canonicalize_Update(t *testing.T) {
   483  	testutil.Parallel(t)
   484  
   485  	// Job with an Empty() Update
   486  	job := &Job{
   487  		ID: pointerOf("test"),
   488  		Update: &UpdateStrategy{
   489  			AutoRevert:       pointerOf(false),
   490  			AutoPromote:      pointerOf(false),
   491  			Canary:           pointerOf(0),
   492  			HealthCheck:      pointerOf(""),
   493  			HealthyDeadline:  pointerOf(time.Duration(0)),
   494  			ProgressDeadline: pointerOf(time.Duration(0)),
   495  			MaxParallel:      pointerOf(0),
   496  			MinHealthyTime:   pointerOf(time.Duration(0)),
   497  			Stagger:          pointerOf(time.Duration(0)),
   498  		},
   499  	}
   500  	job.Canonicalize()
   501  	tg := &TaskGroup{
   502  		Name: pointerOf("foo"),
   503  	}
   504  	tg.Canonicalize(job)
   505  	must.NotNil(t, job.Update)
   506  	must.Nil(t, tg.Update)
   507  }
   508  
   509  func TestTaskGroup_Canonicalize_Scaling(t *testing.T) {
   510  	testutil.Parallel(t)
   511  
   512  	job := &Job{
   513  		ID: pointerOf("test"),
   514  	}
   515  	job.Canonicalize()
   516  	tg := &TaskGroup{
   517  		Name:  pointerOf("foo"),
   518  		Count: nil,
   519  		Scaling: &ScalingPolicy{
   520  			Min:         nil,
   521  			Max:         pointerOf(int64(10)),
   522  			Policy:      nil,
   523  			Enabled:     nil,
   524  			CreateIndex: 0,
   525  			ModifyIndex: 0,
   526  		},
   527  	}
   528  	job.TaskGroups = []*TaskGroup{tg}
   529  
   530  	// both nil => both == 1
   531  	tg.Canonicalize(job)
   532  	must.Positive(t, *tg.Count)
   533  	must.NotNil(t, tg.Scaling.Min)
   534  	must.Eq(t, 1, *tg.Count)
   535  	must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
   536  
   537  	// count == nil => count = Scaling.Min
   538  	tg.Count = nil
   539  	tg.Scaling.Min = pointerOf(int64(5))
   540  	tg.Canonicalize(job)
   541  	must.Positive(t, *tg.Count)
   542  	must.NotNil(t, tg.Scaling.Min)
   543  	must.Eq(t, 5, *tg.Count)
   544  	must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
   545  
   546  	// Scaling.Min == nil => Scaling.Min == count
   547  	tg.Count = pointerOf(5)
   548  	tg.Scaling.Min = nil
   549  	tg.Canonicalize(job)
   550  	must.Positive(t, *tg.Count)
   551  	must.NotNil(t, tg.Scaling.Min)
   552  	must.Eq(t, 5, *tg.Scaling.Min)
   553  	must.Eq(t, int64(*tg.Count), *tg.Scaling.Min)
   554  
   555  	// both present, both persisted
   556  	tg.Count = pointerOf(5)
   557  	tg.Scaling.Min = pointerOf(int64(1))
   558  	tg.Canonicalize(job)
   559  	must.Positive(t, *tg.Count)
   560  	must.NotNil(t, tg.Scaling.Min)
   561  	must.Eq(t, 1, *tg.Scaling.Min)
   562  	must.Eq(t, 5, *tg.Count)
   563  }
   564  
   565  func TestTaskGroup_Merge_Update(t *testing.T) {
   566  	testutil.Parallel(t)
   567  
   568  	job := &Job{
   569  		ID:     pointerOf("test"),
   570  		Update: &UpdateStrategy{},
   571  	}
   572  	job.Canonicalize()
   573  
   574  	// Merge and canonicalize part of an update block
   575  	tg := &TaskGroup{
   576  		Name: pointerOf("foo"),
   577  		Update: &UpdateStrategy{
   578  			AutoRevert:  pointerOf(true),
   579  			Canary:      pointerOf(5),
   580  			HealthCheck: pointerOf("foo"),
   581  		},
   582  	}
   583  
   584  	tg.Canonicalize(job)
   585  	must.Eq(t, &UpdateStrategy{
   586  		AutoRevert:       pointerOf(true),
   587  		AutoPromote:      pointerOf(false),
   588  		Canary:           pointerOf(5),
   589  		HealthCheck:      pointerOf("foo"),
   590  		HealthyDeadline:  pointerOf(5 * time.Minute),
   591  		ProgressDeadline: pointerOf(10 * time.Minute),
   592  		MaxParallel:      pointerOf(1),
   593  		MinHealthyTime:   pointerOf(10 * time.Second),
   594  		Stagger:          pointerOf(30 * time.Second),
   595  	}, tg.Update)
   596  }
   597  
   598  // Verifies that migrate strategy is merged correctly
   599  func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
   600  	testutil.Parallel(t)
   601  
   602  	type testCase struct {
   603  		desc        string
   604  		jobType     string
   605  		jobMigrate  *MigrateStrategy
   606  		taskMigrate *MigrateStrategy
   607  		expected    *MigrateStrategy
   608  	}
   609  
   610  	testCases := []testCase{
   611  		{
   612  			desc:        "Default batch",
   613  			jobType:     "batch",
   614  			jobMigrate:  nil,
   615  			taskMigrate: nil,
   616  			expected:    nil,
   617  		},
   618  		{
   619  			desc:        "Default service",
   620  			jobType:     "service",
   621  			jobMigrate:  nil,
   622  			taskMigrate: nil,
   623  			expected: &MigrateStrategy{
   624  				MaxParallel:     pointerOf(1),
   625  				HealthCheck:     pointerOf("checks"),
   626  				MinHealthyTime:  pointerOf(10 * time.Second),
   627  				HealthyDeadline: pointerOf(5 * time.Minute),
   628  			},
   629  		},
   630  		{
   631  			desc:    "Empty job migrate strategy",
   632  			jobType: "service",
   633  			jobMigrate: &MigrateStrategy{
   634  				MaxParallel:     pointerOf(0),
   635  				HealthCheck:     pointerOf(""),
   636  				MinHealthyTime:  pointerOf(time.Duration(0)),
   637  				HealthyDeadline: pointerOf(time.Duration(0)),
   638  			},
   639  			taskMigrate: nil,
   640  			expected: &MigrateStrategy{
   641  				MaxParallel:     pointerOf(0),
   642  				HealthCheck:     pointerOf(""),
   643  				MinHealthyTime:  pointerOf(time.Duration(0)),
   644  				HealthyDeadline: pointerOf(time.Duration(0)),
   645  			},
   646  		},
   647  		{
   648  			desc:    "Inherit from job",
   649  			jobType: "service",
   650  			jobMigrate: &MigrateStrategy{
   651  				MaxParallel:     pointerOf(3),
   652  				HealthCheck:     pointerOf("checks"),
   653  				MinHealthyTime:  pointerOf(time.Duration(2)),
   654  				HealthyDeadline: pointerOf(time.Duration(2)),
   655  			},
   656  			taskMigrate: nil,
   657  			expected: &MigrateStrategy{
   658  				MaxParallel:     pointerOf(3),
   659  				HealthCheck:     pointerOf("checks"),
   660  				MinHealthyTime:  pointerOf(time.Duration(2)),
   661  				HealthyDeadline: pointerOf(time.Duration(2)),
   662  			},
   663  		},
   664  		{
   665  			desc:       "Set in task",
   666  			jobType:    "service",
   667  			jobMigrate: nil,
   668  			taskMigrate: &MigrateStrategy{
   669  				MaxParallel:     pointerOf(3),
   670  				HealthCheck:     pointerOf("checks"),
   671  				MinHealthyTime:  pointerOf(time.Duration(2)),
   672  				HealthyDeadline: pointerOf(time.Duration(2)),
   673  			},
   674  			expected: &MigrateStrategy{
   675  				MaxParallel:     pointerOf(3),
   676  				HealthCheck:     pointerOf("checks"),
   677  				MinHealthyTime:  pointerOf(time.Duration(2)),
   678  				HealthyDeadline: pointerOf(time.Duration(2)),
   679  			},
   680  		},
   681  		{
   682  			desc:    "Merge from job",
   683  			jobType: "service",
   684  			jobMigrate: &MigrateStrategy{
   685  				MaxParallel: pointerOf(11),
   686  			},
   687  			taskMigrate: &MigrateStrategy{
   688  				HealthCheck:     pointerOf("checks"),
   689  				MinHealthyTime:  pointerOf(time.Duration(2)),
   690  				HealthyDeadline: pointerOf(time.Duration(2)),
   691  			},
   692  			expected: &MigrateStrategy{
   693  				MaxParallel:     pointerOf(11),
   694  				HealthCheck:     pointerOf("checks"),
   695  				MinHealthyTime:  pointerOf(time.Duration(2)),
   696  				HealthyDeadline: pointerOf(time.Duration(2)),
   697  			},
   698  		},
   699  		{
   700  			desc:    "Override from group",
   701  			jobType: "service",
   702  			jobMigrate: &MigrateStrategy{
   703  				MaxParallel: pointerOf(11),
   704  			},
   705  			taskMigrate: &MigrateStrategy{
   706  				MaxParallel:     pointerOf(5),
   707  				HealthCheck:     pointerOf("checks"),
   708  				MinHealthyTime:  pointerOf(time.Duration(2)),
   709  				HealthyDeadline: pointerOf(time.Duration(2)),
   710  			},
   711  			expected: &MigrateStrategy{
   712  				MaxParallel:     pointerOf(5),
   713  				HealthCheck:     pointerOf("checks"),
   714  				MinHealthyTime:  pointerOf(time.Duration(2)),
   715  				HealthyDeadline: pointerOf(time.Duration(2)),
   716  			},
   717  		},
   718  		{
   719  			desc:    "Parallel from job, defaulting",
   720  			jobType: "service",
   721  			jobMigrate: &MigrateStrategy{
   722  				MaxParallel: pointerOf(5),
   723  			},
   724  			taskMigrate: nil,
   725  			expected: &MigrateStrategy{
   726  				MaxParallel:     pointerOf(5),
   727  				HealthCheck:     pointerOf("checks"),
   728  				MinHealthyTime:  pointerOf(10 * time.Second),
   729  				HealthyDeadline: pointerOf(5 * time.Minute),
   730  			},
   731  		},
   732  	}
   733  
   734  	for _, tc := range testCases {
   735  		t.Run(tc.desc, func(t *testing.T) {
   736  			job := &Job{
   737  				ID:      pointerOf("test"),
   738  				Migrate: tc.jobMigrate,
   739  				Type:    pointerOf(tc.jobType),
   740  			}
   741  			job.Canonicalize()
   742  			tg := &TaskGroup{
   743  				Name:    pointerOf("foo"),
   744  				Migrate: tc.taskMigrate,
   745  			}
   746  			tg.Canonicalize(job)
   747  			must.Eq(t, tc.expected, tg.Migrate)
   748  		})
   749  	}
   750  }
   751  
   752  // TestSpread_Canonicalize asserts that the spread block is canonicalized correctly
   753  func TestSpread_Canonicalize(t *testing.T) {
   754  	testutil.Parallel(t)
   755  
   756  	job := &Job{
   757  		ID:   pointerOf("test"),
   758  		Type: pointerOf("batch"),
   759  	}
   760  	job.Canonicalize()
   761  	tg := &TaskGroup{
   762  		Name: pointerOf("foo"),
   763  	}
   764  	type testCase struct {
   765  		desc           string
   766  		spread         *Spread
   767  		expectedWeight int8
   768  	}
   769  	cases := []testCase{
   770  		{
   771  			"Nil spread",
   772  			&Spread{
   773  				Attribute: "test",
   774  				Weight:    nil,
   775  			},
   776  			50,
   777  		},
   778  		{
   779  			"Zero spread",
   780  			&Spread{
   781  				Attribute: "test",
   782  				Weight:    pointerOf(int8(0)),
   783  			},
   784  			0,
   785  		},
   786  		{
   787  			"Non Zero spread",
   788  			&Spread{
   789  				Attribute: "test",
   790  				Weight:    pointerOf(int8(100)),
   791  			},
   792  			100,
   793  		},
   794  	}
   795  
   796  	for _, tc := range cases {
   797  		t.Run(tc.desc, func(t *testing.T) {
   798  			tg.Spreads = []*Spread{tc.spread}
   799  			tg.Canonicalize(job)
   800  			for _, spr := range tg.Spreads {
   801  				must.Eq(t, tc.expectedWeight, *spr.Weight)
   802  			}
   803  		})
   804  	}
   805  }
   806  
   807  func Test_NewDefaultReschedulePolicy(t *testing.T) {
   808  	testutil.Parallel(t)
   809  
   810  	testCases := []struct {
   811  		desc         string
   812  		inputJobType string
   813  		expected     *ReschedulePolicy
   814  	}{
   815  		{
   816  			desc:         "service job type",
   817  			inputJobType: "service",
   818  			expected: &ReschedulePolicy{
   819  				Attempts:      pointerOf(0),
   820  				Interval:      pointerOf(time.Duration(0)),
   821  				Delay:         pointerOf(30 * time.Second),
   822  				DelayFunction: pointerOf("exponential"),
   823  				MaxDelay:      pointerOf(1 * time.Hour),
   824  				Unlimited:     pointerOf(true),
   825  			},
   826  		},
   827  		{
   828  			desc:         "batch job type",
   829  			inputJobType: "batch",
   830  			expected: &ReschedulePolicy{
   831  				Attempts:      pointerOf(1),
   832  				Interval:      pointerOf(24 * time.Hour),
   833  				Delay:         pointerOf(5 * time.Second),
   834  				DelayFunction: pointerOf("constant"),
   835  				MaxDelay:      pointerOf(time.Duration(0)),
   836  				Unlimited:     pointerOf(false),
   837  			},
   838  		},
   839  		{
   840  			desc:         "system job type",
   841  			inputJobType: "system",
   842  			expected: &ReschedulePolicy{
   843  				Attempts:      pointerOf(0),
   844  				Interval:      pointerOf(time.Duration(0)),
   845  				Delay:         pointerOf(time.Duration(0)),
   846  				DelayFunction: pointerOf(""),
   847  				MaxDelay:      pointerOf(time.Duration(0)),
   848  				Unlimited:     pointerOf(false),
   849  			},
   850  		},
   851  		{
   852  			desc:         "unrecognised job type",
   853  			inputJobType: "unrecognised",
   854  			expected: &ReschedulePolicy{
   855  				Attempts:      pointerOf(0),
   856  				Interval:      pointerOf(time.Duration(0)),
   857  				Delay:         pointerOf(time.Duration(0)),
   858  				DelayFunction: pointerOf(""),
   859  				MaxDelay:      pointerOf(time.Duration(0)),
   860  				Unlimited:     pointerOf(false),
   861  			},
   862  		},
   863  	}
   864  
   865  	for _, tc := range testCases {
   866  		t.Run(tc.desc, func(t *testing.T) {
   867  			actual := NewDefaultReschedulePolicy(tc.inputJobType)
   868  			must.Eq(t, tc.expected, actual)
   869  		})
   870  	}
   871  }
   872  
   873  func TestTaskGroup_Canonicalize_Consul(t *testing.T) {
   874  	testutil.Parallel(t)
   875  
   876  	t.Run("override job consul in group", func(t *testing.T) {
   877  		job := &Job{
   878  			ID:              pointerOf("job"),
   879  			ConsulNamespace: pointerOf("ns1"),
   880  		}
   881  		job.Canonicalize()
   882  
   883  		tg := &TaskGroup{
   884  			Name:   pointerOf("group"),
   885  			Consul: &Consul{Namespace: "ns2"},
   886  		}
   887  		tg.Canonicalize(job)
   888  
   889  		must.Eq(t, "ns1", *job.ConsulNamespace)
   890  		must.Eq(t, "ns2", tg.Consul.Namespace)
   891  	})
   892  
   893  	t.Run("inherit job consul in group", func(t *testing.T) {
   894  		job := &Job{
   895  			ID:              pointerOf("job"),
   896  			ConsulNamespace: pointerOf("ns1"),
   897  		}
   898  		job.Canonicalize()
   899  
   900  		tg := &TaskGroup{
   901  			Name:   pointerOf("group"),
   902  			Consul: nil, // not set, inherit from job
   903  		}
   904  		tg.Canonicalize(job)
   905  
   906  		must.Eq(t, "ns1", *job.ConsulNamespace)
   907  		must.Eq(t, "ns1", tg.Consul.Namespace)
   908  	})
   909  
   910  	t.Run("set in group only", func(t *testing.T) {
   911  		job := &Job{
   912  			ID:              pointerOf("job"),
   913  			ConsulNamespace: nil,
   914  		}
   915  		job.Canonicalize()
   916  
   917  		tg := &TaskGroup{
   918  			Name:   pointerOf("group"),
   919  			Consul: &Consul{Namespace: "ns2"},
   920  		}
   921  		tg.Canonicalize(job)
   922  
   923  		must.Eq(t, "", *job.ConsulNamespace)
   924  		must.Eq(t, "ns2", tg.Consul.Namespace)
   925  	})
   926  }