github.com/zhizhiboom/nomad@v0.8.5-0.20180907175415-f28fd3a1a056/api/tasks_test.go (about)

     1  package api
     2  
     3  import (
     4  	"reflect"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/hashicorp/nomad/helper"
     9  	"github.com/hashicorp/nomad/nomad/structs"
    10  	"github.com/stretchr/testify/assert"
    11  	"github.com/stretchr/testify/require"
    12  )
    13  
    14  func TestTaskGroup_NewTaskGroup(t *testing.T) {
    15  	t.Parallel()
    16  	grp := NewTaskGroup("grp1", 2)
    17  	expect := &TaskGroup{
    18  		Name:  helper.StringToPtr("grp1"),
    19  		Count: helper.IntToPtr(2),
    20  	}
    21  	if !reflect.DeepEqual(grp, expect) {
    22  		t.Fatalf("expect: %#v, got: %#v", expect, grp)
    23  	}
    24  }
    25  
    26  func TestTaskGroup_Constrain(t *testing.T) {
    27  	t.Parallel()
    28  	grp := NewTaskGroup("grp1", 1)
    29  
    30  	// Add a constraint to the group
    31  	out := grp.Constrain(NewConstraint("kernel.name", "=", "darwin"))
    32  	if n := len(grp.Constraints); n != 1 {
    33  		t.Fatalf("expected 1 constraint, got: %d", n)
    34  	}
    35  
    36  	// Check that the group was returned
    37  	if out != grp {
    38  		t.Fatalf("expected: %#v, got: %#v", grp, out)
    39  	}
    40  
    41  	// Add a second constraint
    42  	grp.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
    43  	expect := []*Constraint{
    44  		{
    45  			LTarget: "kernel.name",
    46  			RTarget: "darwin",
    47  			Operand: "=",
    48  		},
    49  		{
    50  			LTarget: "memory.totalbytes",
    51  			RTarget: "128000000",
    52  			Operand: ">=",
    53  		},
    54  	}
    55  	if !reflect.DeepEqual(grp.Constraints, expect) {
    56  		t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints)
    57  	}
    58  }
    59  
    60  func TestTaskGroup_AddAffinity(t *testing.T) {
    61  	t.Parallel()
    62  	grp := NewTaskGroup("grp1", 1)
    63  
    64  	// Add an affinity to the group
    65  	out := grp.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
    66  	if n := len(grp.Affinities); n != 1 {
    67  		t.Fatalf("expected 1 affinity, got: %d", n)
    68  	}
    69  
    70  	// Check that the group was returned
    71  	if out != grp {
    72  		t.Fatalf("expected: %#v, got: %#v", grp, out)
    73  	}
    74  
    75  	// Add a second affinity
    76  	grp.AddAffinity(NewAffinity("${node.affinity}", "=", "dc2", 50))
    77  	expect := []*Affinity{
    78  		{
    79  			LTarget: "kernel.version",
    80  			RTarget: "4.6",
    81  			Operand: "=",
    82  			Weight:  100,
    83  		},
    84  		{
    85  			LTarget: "${node.affinity}",
    86  			RTarget: "dc2",
    87  			Operand: "=",
    88  			Weight:  50,
    89  		},
    90  	}
    91  	if !reflect.DeepEqual(grp.Affinities, expect) {
    92  		t.Fatalf("expect: %#v, got: %#v", expect, grp.Constraints)
    93  	}
    94  }
    95  
    96  func TestTaskGroup_SetMeta(t *testing.T) {
    97  	t.Parallel()
    98  	grp := NewTaskGroup("grp1", 1)
    99  
   100  	// Initializes an empty map
   101  	out := grp.SetMeta("foo", "bar")
   102  	if grp.Meta == nil {
   103  		t.Fatalf("should be initialized")
   104  	}
   105  
   106  	// Check that we returned the group
   107  	if out != grp {
   108  		t.Fatalf("expect: %#v, got: %#v", grp, out)
   109  	}
   110  
   111  	// Add a second meta k/v
   112  	grp.SetMeta("baz", "zip")
   113  	expect := map[string]string{"foo": "bar", "baz": "zip"}
   114  	if !reflect.DeepEqual(grp.Meta, expect) {
   115  		t.Fatalf("expect: %#v, got: %#v", expect, grp.Meta)
   116  	}
   117  }
   118  
   119  func TestTaskGroup_AddSpread(t *testing.T) {
   120  	t.Parallel()
   121  	grp := NewTaskGroup("grp1", 1)
   122  
   123  	// Create and add spread
   124  	spreadTarget := NewSpreadTarget("r1", 50)
   125  	spread := NewSpread("${meta.rack}", 100, []*SpreadTarget{spreadTarget})
   126  
   127  	out := grp.AddSpread(spread)
   128  	if n := len(grp.Spreads); n != 1 {
   129  		t.Fatalf("expected 1 spread, got: %d", n)
   130  	}
   131  
   132  	// Check that the group was returned
   133  	if out != grp {
   134  		t.Fatalf("expected: %#v, got: %#v", grp, out)
   135  	}
   136  
   137  	// Add a second spread
   138  	spreadTarget2 := NewSpreadTarget("dc1", 100)
   139  	spread2 := NewSpread("${node.datacenter}", 100, []*SpreadTarget{spreadTarget2})
   140  
   141  	grp.AddSpread(spread2)
   142  
   143  	expect := []*Spread{
   144  		{
   145  			Attribute: "${meta.rack}",
   146  			Weight:    100,
   147  			SpreadTarget: []*SpreadTarget{
   148  				{
   149  					Value:   "r1",
   150  					Percent: 50,
   151  				},
   152  			},
   153  		},
   154  		{
   155  			Attribute: "${node.datacenter}",
   156  			Weight:    100,
   157  			SpreadTarget: []*SpreadTarget{
   158  				{
   159  					Value:   "dc1",
   160  					Percent: 100,
   161  				},
   162  			},
   163  		},
   164  	}
   165  	if !reflect.DeepEqual(grp.Spreads, expect) {
   166  		t.Fatalf("expect: %#v, got: %#v", expect, grp.Spreads)
   167  	}
   168  }
   169  
   170  func TestTaskGroup_AddTask(t *testing.T) {
   171  	t.Parallel()
   172  	grp := NewTaskGroup("grp1", 1)
   173  
   174  	// Add the task to the task group
   175  	out := grp.AddTask(NewTask("task1", "java"))
   176  	if n := len(grp.Tasks); n != 1 {
   177  		t.Fatalf("expected 1 task, got: %d", n)
   178  	}
   179  
   180  	// Check that we returned the group
   181  	if out != grp {
   182  		t.Fatalf("expect: %#v, got: %#v", grp, out)
   183  	}
   184  
   185  	// Add a second task
   186  	grp.AddTask(NewTask("task2", "exec"))
   187  	expect := []*Task{
   188  		{
   189  			Name:   "task1",
   190  			Driver: "java",
   191  		},
   192  		{
   193  			Name:   "task2",
   194  			Driver: "exec",
   195  		},
   196  	}
   197  	if !reflect.DeepEqual(grp.Tasks, expect) {
   198  		t.Fatalf("expect: %#v, got: %#v", expect, grp.Tasks)
   199  	}
   200  }
   201  
   202  func TestTask_NewTask(t *testing.T) {
   203  	t.Parallel()
   204  	task := NewTask("task1", "exec")
   205  	expect := &Task{
   206  		Name:   "task1",
   207  		Driver: "exec",
   208  	}
   209  	if !reflect.DeepEqual(task, expect) {
   210  		t.Fatalf("expect: %#v, got: %#v", expect, task)
   211  	}
   212  }
   213  
   214  func TestTask_SetConfig(t *testing.T) {
   215  	t.Parallel()
   216  	task := NewTask("task1", "exec")
   217  
   218  	// Initializes an empty map
   219  	out := task.SetConfig("foo", "bar")
   220  	if task.Config == nil {
   221  		t.Fatalf("should be initialized")
   222  	}
   223  
   224  	// Check that we returned the task
   225  	if out != task {
   226  		t.Fatalf("expect: %#v, got: %#v", task, out)
   227  	}
   228  
   229  	// Set another config value
   230  	task.SetConfig("baz", "zip")
   231  	expect := map[string]interface{}{"foo": "bar", "baz": "zip"}
   232  	if !reflect.DeepEqual(task.Config, expect) {
   233  		t.Fatalf("expect: %#v, got: %#v", expect, task.Config)
   234  	}
   235  }
   236  
   237  func TestTask_SetMeta(t *testing.T) {
   238  	t.Parallel()
   239  	task := NewTask("task1", "exec")
   240  
   241  	// Initializes an empty map
   242  	out := task.SetMeta("foo", "bar")
   243  	if task.Meta == nil {
   244  		t.Fatalf("should be initialized")
   245  	}
   246  
   247  	// Check that we returned the task
   248  	if out != task {
   249  		t.Fatalf("expect: %#v, got: %#v", task, out)
   250  	}
   251  
   252  	// Set another meta k/v
   253  	task.SetMeta("baz", "zip")
   254  	expect := map[string]string{"foo": "bar", "baz": "zip"}
   255  	if !reflect.DeepEqual(task.Meta, expect) {
   256  		t.Fatalf("expect: %#v, got: %#v", expect, task.Meta)
   257  	}
   258  }
   259  
   260  func TestTask_Require(t *testing.T) {
   261  	t.Parallel()
   262  	task := NewTask("task1", "exec")
   263  
   264  	// Create some require resources
   265  	resources := &Resources{
   266  		CPU:      helper.IntToPtr(1250),
   267  		MemoryMB: helper.IntToPtr(128),
   268  		DiskMB:   helper.IntToPtr(2048),
   269  		IOPS:     helper.IntToPtr(500),
   270  		Networks: []*NetworkResource{
   271  			{
   272  				CIDR:          "0.0.0.0/0",
   273  				MBits:         helper.IntToPtr(100),
   274  				ReservedPorts: []Port{{"", 80}, {"", 443}},
   275  			},
   276  		},
   277  	}
   278  	out := task.Require(resources)
   279  	if !reflect.DeepEqual(task.Resources, resources) {
   280  		t.Fatalf("expect: %#v, got: %#v", resources, task.Resources)
   281  	}
   282  
   283  	// Check that we returned the task
   284  	if out != task {
   285  		t.Fatalf("expect: %#v, got: %#v", task, out)
   286  	}
   287  }
   288  
   289  func TestTask_Constrain(t *testing.T) {
   290  	t.Parallel()
   291  	task := NewTask("task1", "exec")
   292  
   293  	// Add a constraint to the task
   294  	out := task.Constrain(NewConstraint("kernel.name", "=", "darwin"))
   295  	if n := len(task.Constraints); n != 1 {
   296  		t.Fatalf("expected 1 constraint, got: %d", n)
   297  	}
   298  
   299  	// Check that the task was returned
   300  	if out != task {
   301  		t.Fatalf("expected: %#v, got: %#v", task, out)
   302  	}
   303  
   304  	// Add a second constraint
   305  	task.Constrain(NewConstraint("memory.totalbytes", ">=", "128000000"))
   306  	expect := []*Constraint{
   307  		{
   308  			LTarget: "kernel.name",
   309  			RTarget: "darwin",
   310  			Operand: "=",
   311  		},
   312  		{
   313  			LTarget: "memory.totalbytes",
   314  			RTarget: "128000000",
   315  			Operand: ">=",
   316  		},
   317  	}
   318  	if !reflect.DeepEqual(task.Constraints, expect) {
   319  		t.Fatalf("expect: %#v, got: %#v", expect, task.Constraints)
   320  	}
   321  }
   322  
   323  func TestTask_AddAffinity(t *testing.T) {
   324  	t.Parallel()
   325  	task := NewTask("task1", "exec")
   326  
   327  	// Add an affinity to the task
   328  	out := task.AddAffinity(NewAffinity("kernel.version", "=", "4.6", 100))
   329  	require := require.New(t)
   330  	require.Len(out.Affinities, 1)
   331  
   332  	// Check that the task was returned
   333  	if out != task {
   334  		t.Fatalf("expected: %#v, got: %#v", task, out)
   335  	}
   336  
   337  	// Add a second affinity
   338  	task.AddAffinity(NewAffinity("${node.datacenter}", "=", "dc2", 50))
   339  	expect := []*Affinity{
   340  		{
   341  			LTarget: "kernel.version",
   342  			RTarget: "4.6",
   343  			Operand: "=",
   344  			Weight:  100,
   345  		},
   346  		{
   347  			LTarget: "${node.datacenter}",
   348  			RTarget: "dc2",
   349  			Operand: "=",
   350  			Weight:  50,
   351  		},
   352  	}
   353  	if !reflect.DeepEqual(task.Affinities, expect) {
   354  		t.Fatalf("expect: %#v, got: %#v", expect, task.Affinities)
   355  	}
   356  }
   357  
   358  func TestTask_Artifact(t *testing.T) {
   359  	t.Parallel()
   360  	a := TaskArtifact{
   361  		GetterSource: helper.StringToPtr("http://localhost/foo.txt"),
   362  		GetterMode:   helper.StringToPtr("file"),
   363  	}
   364  	a.Canonicalize()
   365  	if *a.GetterMode != "file" {
   366  		t.Errorf("expected file but found %q", *a.GetterMode)
   367  	}
   368  	if *a.RelativeDest != "local/foo.txt" {
   369  		t.Errorf("expected local/foo.txt but found %q", *a.RelativeDest)
   370  	}
   371  }
   372  
   373  // Ensures no regression on https://github.com/hashicorp/nomad/issues/3132
   374  func TestTaskGroup_Canonicalize_Update(t *testing.T) {
   375  	job := &Job{
   376  		ID: helper.StringToPtr("test"),
   377  		Update: &UpdateStrategy{
   378  			AutoRevert:       helper.BoolToPtr(false),
   379  			Canary:           helper.IntToPtr(0),
   380  			HealthCheck:      helper.StringToPtr(""),
   381  			HealthyDeadline:  helper.TimeToPtr(0),
   382  			ProgressDeadline: helper.TimeToPtr(0),
   383  			MaxParallel:      helper.IntToPtr(0),
   384  			MinHealthyTime:   helper.TimeToPtr(0),
   385  			Stagger:          helper.TimeToPtr(0),
   386  		},
   387  	}
   388  	job.Canonicalize()
   389  	tg := &TaskGroup{
   390  		Name: helper.StringToPtr("foo"),
   391  	}
   392  	tg.Canonicalize(job)
   393  	assert.Nil(t, tg.Update)
   394  }
   395  
   396  // Verifies that reschedule policy is merged correctly
   397  func TestTaskGroup_Canonicalize_ReschedulePolicy(t *testing.T) {
   398  	type testCase struct {
   399  		desc                 string
   400  		jobReschedulePolicy  *ReschedulePolicy
   401  		taskReschedulePolicy *ReschedulePolicy
   402  		expected             *ReschedulePolicy
   403  	}
   404  
   405  	testCases := []testCase{
   406  		{
   407  			desc:                 "Default",
   408  			jobReschedulePolicy:  nil,
   409  			taskReschedulePolicy: nil,
   410  			expected: &ReschedulePolicy{
   411  				Attempts:      helper.IntToPtr(structs.DefaultBatchJobReschedulePolicy.Attempts),
   412  				Interval:      helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
   413  				Delay:         helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
   414  				DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
   415  				MaxDelay:      helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
   416  				Unlimited:     helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
   417  			},
   418  		},
   419  		{
   420  			desc: "Empty job reschedule policy",
   421  			jobReschedulePolicy: &ReschedulePolicy{
   422  				Attempts:      helper.IntToPtr(0),
   423  				Interval:      helper.TimeToPtr(0),
   424  				Delay:         helper.TimeToPtr(0),
   425  				MaxDelay:      helper.TimeToPtr(0),
   426  				DelayFunction: helper.StringToPtr(""),
   427  				Unlimited:     helper.BoolToPtr(false),
   428  			},
   429  			taskReschedulePolicy: nil,
   430  			expected: &ReschedulePolicy{
   431  				Attempts:      helper.IntToPtr(0),
   432  				Interval:      helper.TimeToPtr(0),
   433  				Delay:         helper.TimeToPtr(0),
   434  				MaxDelay:      helper.TimeToPtr(0),
   435  				DelayFunction: helper.StringToPtr(""),
   436  				Unlimited:     helper.BoolToPtr(false),
   437  			},
   438  		},
   439  		{
   440  			desc: "Inherit from job",
   441  			jobReschedulePolicy: &ReschedulePolicy{
   442  				Attempts:      helper.IntToPtr(1),
   443  				Interval:      helper.TimeToPtr(20 * time.Second),
   444  				Delay:         helper.TimeToPtr(20 * time.Second),
   445  				MaxDelay:      helper.TimeToPtr(10 * time.Minute),
   446  				DelayFunction: helper.StringToPtr("constant"),
   447  				Unlimited:     helper.BoolToPtr(false),
   448  			},
   449  			taskReschedulePolicy: nil,
   450  			expected: &ReschedulePolicy{
   451  				Attempts:      helper.IntToPtr(1),
   452  				Interval:      helper.TimeToPtr(20 * time.Second),
   453  				Delay:         helper.TimeToPtr(20 * time.Second),
   454  				MaxDelay:      helper.TimeToPtr(10 * time.Minute),
   455  				DelayFunction: helper.StringToPtr("constant"),
   456  				Unlimited:     helper.BoolToPtr(false),
   457  			},
   458  		},
   459  		{
   460  			desc:                "Set in task",
   461  			jobReschedulePolicy: nil,
   462  			taskReschedulePolicy: &ReschedulePolicy{
   463  				Attempts:      helper.IntToPtr(5),
   464  				Interval:      helper.TimeToPtr(2 * time.Minute),
   465  				Delay:         helper.TimeToPtr(20 * time.Second),
   466  				MaxDelay:      helper.TimeToPtr(10 * time.Minute),
   467  				DelayFunction: helper.StringToPtr("constant"),
   468  				Unlimited:     helper.BoolToPtr(false),
   469  			},
   470  			expected: &ReschedulePolicy{
   471  				Attempts:      helper.IntToPtr(5),
   472  				Interval:      helper.TimeToPtr(2 * time.Minute),
   473  				Delay:         helper.TimeToPtr(20 * time.Second),
   474  				MaxDelay:      helper.TimeToPtr(10 * time.Minute),
   475  				DelayFunction: helper.StringToPtr("constant"),
   476  				Unlimited:     helper.BoolToPtr(false),
   477  			},
   478  		},
   479  		{
   480  			desc: "Merge from job",
   481  			jobReschedulePolicy: &ReschedulePolicy{
   482  				Attempts: helper.IntToPtr(1),
   483  				Delay:    helper.TimeToPtr(20 * time.Second),
   484  				MaxDelay: helper.TimeToPtr(10 * time.Minute),
   485  			},
   486  			taskReschedulePolicy: &ReschedulePolicy{
   487  				Interval:      helper.TimeToPtr(5 * time.Minute),
   488  				DelayFunction: helper.StringToPtr("constant"),
   489  				Unlimited:     helper.BoolToPtr(false),
   490  			},
   491  			expected: &ReschedulePolicy{
   492  				Attempts:      helper.IntToPtr(1),
   493  				Interval:      helper.TimeToPtr(5 * time.Minute),
   494  				Delay:         helper.TimeToPtr(20 * time.Second),
   495  				MaxDelay:      helper.TimeToPtr(10 * time.Minute),
   496  				DelayFunction: helper.StringToPtr("constant"),
   497  				Unlimited:     helper.BoolToPtr(false),
   498  			},
   499  		},
   500  		{
   501  			desc: "Override from group",
   502  			jobReschedulePolicy: &ReschedulePolicy{
   503  				Attempts: helper.IntToPtr(1),
   504  				MaxDelay: helper.TimeToPtr(10 * time.Second),
   505  			},
   506  			taskReschedulePolicy: &ReschedulePolicy{
   507  				Attempts:      helper.IntToPtr(5),
   508  				Delay:         helper.TimeToPtr(20 * time.Second),
   509  				MaxDelay:      helper.TimeToPtr(20 * time.Minute),
   510  				DelayFunction: helper.StringToPtr("constant"),
   511  				Unlimited:     helper.BoolToPtr(false),
   512  			},
   513  			expected: &ReschedulePolicy{
   514  				Attempts:      helper.IntToPtr(5),
   515  				Interval:      helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
   516  				Delay:         helper.TimeToPtr(20 * time.Second),
   517  				MaxDelay:      helper.TimeToPtr(20 * time.Minute),
   518  				DelayFunction: helper.StringToPtr("constant"),
   519  				Unlimited:     helper.BoolToPtr(false),
   520  			},
   521  		},
   522  		{
   523  			desc: "Attempts from job, default interval",
   524  			jobReschedulePolicy: &ReschedulePolicy{
   525  				Attempts: helper.IntToPtr(1),
   526  			},
   527  			taskReschedulePolicy: nil,
   528  			expected: &ReschedulePolicy{
   529  				Attempts:      helper.IntToPtr(1),
   530  				Interval:      helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Interval),
   531  				Delay:         helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.Delay),
   532  				DelayFunction: helper.StringToPtr(structs.DefaultBatchJobReschedulePolicy.DelayFunction),
   533  				MaxDelay:      helper.TimeToPtr(structs.DefaultBatchJobReschedulePolicy.MaxDelay),
   534  				Unlimited:     helper.BoolToPtr(structs.DefaultBatchJobReschedulePolicy.Unlimited),
   535  			},
   536  		},
   537  	}
   538  
   539  	for _, tc := range testCases {
   540  		t.Run(tc.desc, func(t *testing.T) {
   541  			job := &Job{
   542  				ID:         helper.StringToPtr("test"),
   543  				Reschedule: tc.jobReschedulePolicy,
   544  				Type:       helper.StringToPtr(JobTypeBatch),
   545  			}
   546  			job.Canonicalize()
   547  			tg := &TaskGroup{
   548  				Name:             helper.StringToPtr("foo"),
   549  				ReschedulePolicy: tc.taskReschedulePolicy,
   550  			}
   551  			tg.Canonicalize(job)
   552  			assert.Equal(t, tc.expected, tg.ReschedulePolicy)
   553  		})
   554  	}
   555  }
   556  
   557  // Verifies that migrate strategy is merged correctly
   558  func TestTaskGroup_Canonicalize_MigrateStrategy(t *testing.T) {
   559  	type testCase struct {
   560  		desc        string
   561  		jobType     string
   562  		jobMigrate  *MigrateStrategy
   563  		taskMigrate *MigrateStrategy
   564  		expected    *MigrateStrategy
   565  	}
   566  
   567  	testCases := []testCase{
   568  		{
   569  			desc:        "Default batch",
   570  			jobType:     "batch",
   571  			jobMigrate:  nil,
   572  			taskMigrate: nil,
   573  			expected:    nil,
   574  		},
   575  		{
   576  			desc:        "Default service",
   577  			jobType:     "service",
   578  			jobMigrate:  nil,
   579  			taskMigrate: nil,
   580  			expected: &MigrateStrategy{
   581  				MaxParallel:     helper.IntToPtr(1),
   582  				HealthCheck:     helper.StringToPtr("checks"),
   583  				MinHealthyTime:  helper.TimeToPtr(10 * time.Second),
   584  				HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
   585  			},
   586  		},
   587  		{
   588  			desc:    "Empty job migrate strategy",
   589  			jobType: "service",
   590  			jobMigrate: &MigrateStrategy{
   591  				MaxParallel:     helper.IntToPtr(0),
   592  				HealthCheck:     helper.StringToPtr(""),
   593  				MinHealthyTime:  helper.TimeToPtr(0),
   594  				HealthyDeadline: helper.TimeToPtr(0),
   595  			},
   596  			taskMigrate: nil,
   597  			expected: &MigrateStrategy{
   598  				MaxParallel:     helper.IntToPtr(0),
   599  				HealthCheck:     helper.StringToPtr(""),
   600  				MinHealthyTime:  helper.TimeToPtr(0),
   601  				HealthyDeadline: helper.TimeToPtr(0),
   602  			},
   603  		},
   604  		{
   605  			desc:    "Inherit from job",
   606  			jobType: "service",
   607  			jobMigrate: &MigrateStrategy{
   608  				MaxParallel:     helper.IntToPtr(3),
   609  				HealthCheck:     helper.StringToPtr("checks"),
   610  				MinHealthyTime:  helper.TimeToPtr(2),
   611  				HealthyDeadline: helper.TimeToPtr(2),
   612  			},
   613  			taskMigrate: nil,
   614  			expected: &MigrateStrategy{
   615  				MaxParallel:     helper.IntToPtr(3),
   616  				HealthCheck:     helper.StringToPtr("checks"),
   617  				MinHealthyTime:  helper.TimeToPtr(2),
   618  				HealthyDeadline: helper.TimeToPtr(2),
   619  			},
   620  		},
   621  		{
   622  			desc:       "Set in task",
   623  			jobType:    "service",
   624  			jobMigrate: nil,
   625  			taskMigrate: &MigrateStrategy{
   626  				MaxParallel:     helper.IntToPtr(3),
   627  				HealthCheck:     helper.StringToPtr("checks"),
   628  				MinHealthyTime:  helper.TimeToPtr(2),
   629  				HealthyDeadline: helper.TimeToPtr(2),
   630  			},
   631  			expected: &MigrateStrategy{
   632  				MaxParallel:     helper.IntToPtr(3),
   633  				HealthCheck:     helper.StringToPtr("checks"),
   634  				MinHealthyTime:  helper.TimeToPtr(2),
   635  				HealthyDeadline: helper.TimeToPtr(2),
   636  			},
   637  		},
   638  		{
   639  			desc:    "Merge from job",
   640  			jobType: "service",
   641  			jobMigrate: &MigrateStrategy{
   642  				MaxParallel: helper.IntToPtr(11),
   643  			},
   644  			taskMigrate: &MigrateStrategy{
   645  				HealthCheck:     helper.StringToPtr("checks"),
   646  				MinHealthyTime:  helper.TimeToPtr(2),
   647  				HealthyDeadline: helper.TimeToPtr(2),
   648  			},
   649  			expected: &MigrateStrategy{
   650  				MaxParallel:     helper.IntToPtr(11),
   651  				HealthCheck:     helper.StringToPtr("checks"),
   652  				MinHealthyTime:  helper.TimeToPtr(2),
   653  				HealthyDeadline: helper.TimeToPtr(2),
   654  			},
   655  		},
   656  		{
   657  			desc:    "Override from group",
   658  			jobType: "service",
   659  			jobMigrate: &MigrateStrategy{
   660  				MaxParallel: helper.IntToPtr(11),
   661  			},
   662  			taskMigrate: &MigrateStrategy{
   663  				MaxParallel:     helper.IntToPtr(5),
   664  				HealthCheck:     helper.StringToPtr("checks"),
   665  				MinHealthyTime:  helper.TimeToPtr(2),
   666  				HealthyDeadline: helper.TimeToPtr(2),
   667  			},
   668  			expected: &MigrateStrategy{
   669  				MaxParallel:     helper.IntToPtr(5),
   670  				HealthCheck:     helper.StringToPtr("checks"),
   671  				MinHealthyTime:  helper.TimeToPtr(2),
   672  				HealthyDeadline: helper.TimeToPtr(2),
   673  			},
   674  		},
   675  		{
   676  			desc:    "Parallel from job, defaulting",
   677  			jobType: "service",
   678  			jobMigrate: &MigrateStrategy{
   679  				MaxParallel: helper.IntToPtr(5),
   680  			},
   681  			taskMigrate: nil,
   682  			expected: &MigrateStrategy{
   683  				MaxParallel:     helper.IntToPtr(5),
   684  				HealthCheck:     helper.StringToPtr("checks"),
   685  				MinHealthyTime:  helper.TimeToPtr(10 * time.Second),
   686  				HealthyDeadline: helper.TimeToPtr(5 * time.Minute),
   687  			},
   688  		},
   689  	}
   690  
   691  	for _, tc := range testCases {
   692  		t.Run(tc.desc, func(t *testing.T) {
   693  			job := &Job{
   694  				ID:      helper.StringToPtr("test"),
   695  				Migrate: tc.jobMigrate,
   696  				Type:    helper.StringToPtr(tc.jobType),
   697  			}
   698  			job.Canonicalize()
   699  			tg := &TaskGroup{
   700  				Name:    helper.StringToPtr("foo"),
   701  				Migrate: tc.taskMigrate,
   702  			}
   703  			tg.Canonicalize(job)
   704  			assert.Equal(t, tc.expected, tg.Migrate)
   705  		})
   706  	}
   707  }
   708  
   709  // TestService_CheckRestart asserts Service.CheckRestart settings are properly
   710  // inherited by Checks.
   711  func TestService_CheckRestart(t *testing.T) {
   712  	job := &Job{Name: helper.StringToPtr("job")}
   713  	tg := &TaskGroup{Name: helper.StringToPtr("group")}
   714  	task := &Task{Name: "task"}
   715  	service := &Service{
   716  		CheckRestart: &CheckRestart{
   717  			Limit:          11,
   718  			Grace:          helper.TimeToPtr(11 * time.Second),
   719  			IgnoreWarnings: true,
   720  		},
   721  		Checks: []ServiceCheck{
   722  			{
   723  				Name: "all-set",
   724  				CheckRestart: &CheckRestart{
   725  					Limit:          22,
   726  					Grace:          helper.TimeToPtr(22 * time.Second),
   727  					IgnoreWarnings: true,
   728  				},
   729  			},
   730  			{
   731  				Name: "some-set",
   732  				CheckRestart: &CheckRestart{
   733  					Limit: 33,
   734  					Grace: helper.TimeToPtr(33 * time.Second),
   735  				},
   736  			},
   737  			{
   738  				Name: "unset",
   739  			},
   740  		},
   741  	}
   742  
   743  	service.Canonicalize(task, tg, job)
   744  	assert.Equal(t, service.Checks[0].CheckRestart.Limit, 22)
   745  	assert.Equal(t, *service.Checks[0].CheckRestart.Grace, 22*time.Second)
   746  	assert.True(t, service.Checks[0].CheckRestart.IgnoreWarnings)
   747  
   748  	assert.Equal(t, service.Checks[1].CheckRestart.Limit, 33)
   749  	assert.Equal(t, *service.Checks[1].CheckRestart.Grace, 33*time.Second)
   750  	assert.True(t, service.Checks[1].CheckRestart.IgnoreWarnings)
   751  
   752  	assert.Equal(t, service.Checks[2].CheckRestart.Limit, 11)
   753  	assert.Equal(t, *service.Checks[2].CheckRestart.Grace, 11*time.Second)
   754  	assert.True(t, service.Checks[2].CheckRestart.IgnoreWarnings)
   755  }