github.com/rohankumardubey/nomad@v0.11.8/scheduler/util_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"testing"
     7  
     8  	"github.com/stretchr/testify/require"
     9  
    10  	"github.com/hashicorp/nomad/helper"
    11  	"github.com/hashicorp/nomad/helper/testlog"
    12  	"github.com/hashicorp/nomad/helper/uuid"
    13  	"github.com/hashicorp/nomad/nomad/mock"
    14  	"github.com/hashicorp/nomad/nomad/state"
    15  	"github.com/hashicorp/nomad/nomad/structs"
    16  )
    17  
    18  func TestMaterializeTaskGroups(t *testing.T) {
    19  	job := mock.Job()
    20  	index := materializeTaskGroups(job)
    21  	require.Equal(t, 10, len(index))
    22  
    23  	for i := 0; i < 10; i++ {
    24  		name := fmt.Sprintf("my-job.web[%d]", i)
    25  		require.Contains(t, index, name)
    26  		require.Equal(t, job.TaskGroups[0], index[name])
    27  	}
    28  }
    29  
    30  func TestDiffSystemAllocsForNode(t *testing.T) {
    31  	job := mock.Job()
    32  	required := materializeTaskGroups(job)
    33  
    34  	// The "old" job has a previous modify index
    35  	oldJob := new(structs.Job)
    36  	*oldJob = *job
    37  	oldJob.JobModifyIndex -= 1
    38  
    39  	eligibleNode := mock.Node()
    40  	eligibleNode.ID = "zip"
    41  
    42  	drainNode := mock.Node()
    43  	drainNode.Drain = true
    44  
    45  	deadNode := mock.Node()
    46  	deadNode.Status = structs.NodeStatusDown
    47  
    48  	tainted := map[string]*structs.Node{
    49  		"dead":      deadNode,
    50  		"drainNode": drainNode,
    51  	}
    52  
    53  	eligible := map[string]*structs.Node{
    54  		eligibleNode.ID: eligibleNode,
    55  	}
    56  
    57  	allocs := []*structs.Allocation{
    58  		// Update the 1st
    59  		{
    60  			ID:     uuid.Generate(),
    61  			NodeID: "zip",
    62  			Name:   "my-job.web[0]",
    63  			Job:    oldJob,
    64  		},
    65  
    66  		// Ignore the 2rd
    67  		{
    68  			ID:     uuid.Generate(),
    69  			NodeID: "zip",
    70  			Name:   "my-job.web[1]",
    71  			Job:    job,
    72  		},
    73  
    74  		// Evict 11th
    75  		{
    76  			ID:     uuid.Generate(),
    77  			NodeID: "zip",
    78  			Name:   "my-job.web[10]",
    79  			Job:    oldJob,
    80  		},
    81  
    82  		// Migrate the 3rd
    83  		{
    84  			ID:     uuid.Generate(),
    85  			NodeID: "drainNode",
    86  			Name:   "my-job.web[2]",
    87  			Job:    oldJob,
    88  			DesiredTransition: structs.DesiredTransition{
    89  				Migrate: helper.BoolToPtr(true),
    90  			},
    91  		},
    92  		// Mark the 4th lost
    93  		{
    94  			ID:     uuid.Generate(),
    95  			NodeID: "dead",
    96  			Name:   "my-job.web[3]",
    97  			Job:    oldJob,
    98  		},
    99  	}
   100  
   101  	// Have three terminal allocs
   102  	terminalAllocs := map[string]*structs.Allocation{
   103  		"my-job.web[4]": {
   104  			ID:     uuid.Generate(),
   105  			NodeID: "zip",
   106  			Name:   "my-job.web[4]",
   107  			Job:    job,
   108  		},
   109  		"my-job.web[5]": {
   110  			ID:     uuid.Generate(),
   111  			NodeID: "zip",
   112  			Name:   "my-job.web[5]",
   113  			Job:    job,
   114  		},
   115  		"my-job.web[6]": {
   116  			ID:     uuid.Generate(),
   117  			NodeID: "zip",
   118  			Name:   "my-job.web[6]",
   119  			Job:    job,
   120  		},
   121  	}
   122  
   123  	diff := diffSystemAllocsForNode(job, "zip", eligible, tainted, required, allocs, terminalAllocs)
   124  	place := diff.place
   125  	update := diff.update
   126  	migrate := diff.migrate
   127  	stop := diff.stop
   128  	ignore := diff.ignore
   129  	lost := diff.lost
   130  
   131  	// We should update the first alloc
   132  	require.True(t, len(update) == 1 && update[0].Alloc == allocs[0])
   133  
   134  	// We should ignore the second alloc
   135  	require.True(t, len(ignore) == 1 && ignore[0].Alloc == allocs[1])
   136  
   137  	// We should stop the 3rd alloc
   138  	require.True(t, len(stop) == 1 && stop[0].Alloc == allocs[2])
   139  
   140  	// We should migrate the 4rd alloc
   141  	require.True(t, len(migrate) == 1 && migrate[0].Alloc == allocs[3])
   142  
   143  	// We should mark the 5th alloc as lost
   144  	require.True(t, len(lost) == 1 && lost[0].Alloc == allocs[4])
   145  
   146  	// We should place 6
   147  	require.Equal(t, 6, len(place))
   148  
   149  	// Ensure that the allocations which are replacements of terminal allocs are
   150  	// annotated
   151  	for name, alloc := range terminalAllocs {
   152  		for _, allocTuple := range diff.place {
   153  			if name == allocTuple.Name {
   154  				require.True(t, reflect.DeepEqual(alloc, allocTuple.Alloc),
   155  					"expected: %#v, actual: %#v", alloc, allocTuple.Alloc)
   156  			}
   157  		}
   158  	}
   159  }
   160  
   161  // Test the desired diff for an updated system job running on a
   162  // ineligible node
   163  func TestDiffSystemAllocsForNode_ExistingAllocIneligibleNode(t *testing.T) {
   164  	job := mock.Job()
   165  	job.TaskGroups[0].Count = 1
   166  	required := materializeTaskGroups(job)
   167  
   168  	// The "old" job has a previous modify index
   169  	oldJob := new(structs.Job)
   170  	*oldJob = *job
   171  	oldJob.JobModifyIndex -= 1
   172  
   173  	eligibleNode := mock.Node()
   174  	ineligibleNode := mock.Node()
   175  	ineligibleNode.SchedulingEligibility = structs.NodeSchedulingIneligible
   176  
   177  	tainted := map[string]*structs.Node{}
   178  
   179  	eligible := map[string]*structs.Node{
   180  		eligibleNode.ID: eligibleNode,
   181  	}
   182  
   183  	allocs := []*structs.Allocation{
   184  		// Update the TG alloc running on eligible node
   185  		{
   186  			ID:     uuid.Generate(),
   187  			NodeID: eligibleNode.ID,
   188  			Name:   "my-job.web[0]",
   189  			Job:    oldJob,
   190  		},
   191  
   192  		// Ignore the TG alloc running on ineligible node
   193  		{
   194  			ID:     uuid.Generate(),
   195  			NodeID: ineligibleNode.ID,
   196  			Name:   "my-job.web[0]",
   197  			Job:    job,
   198  		},
   199  	}
   200  
   201  	// No terminal allocs
   202  	terminalAllocs := map[string]*structs.Allocation{}
   203  
   204  	diff := diffSystemAllocsForNode(job, eligibleNode.ID, eligible, tainted, required, allocs, terminalAllocs)
   205  	place := diff.place
   206  	update := diff.update
   207  	migrate := diff.migrate
   208  	stop := diff.stop
   209  	ignore := diff.ignore
   210  	lost := diff.lost
   211  
   212  	require.Len(t, place, 0)
   213  	require.Len(t, update, 1)
   214  	require.Len(t, migrate, 0)
   215  	require.Len(t, stop, 0)
   216  	require.Len(t, ignore, 1)
   217  	require.Len(t, lost, 0)
   218  }
   219  
   220  func TestDiffSystemAllocs(t *testing.T) {
   221  	job := mock.SystemJob()
   222  
   223  	drainNode := mock.Node()
   224  	drainNode.Drain = true
   225  
   226  	deadNode := mock.Node()
   227  	deadNode.Status = structs.NodeStatusDown
   228  
   229  	tainted := map[string]*structs.Node{
   230  		deadNode.ID:  deadNode,
   231  		drainNode.ID: drainNode,
   232  	}
   233  
   234  	// Create three alive nodes.
   235  	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"},
   236  		{ID: "pipe"}, {ID: drainNode.ID}, {ID: deadNode.ID}}
   237  
   238  	// The "old" job has a previous modify index
   239  	oldJob := new(structs.Job)
   240  	*oldJob = *job
   241  	oldJob.JobModifyIndex -= 1
   242  
   243  	allocs := []*structs.Allocation{
   244  		// Update allocation on baz
   245  		{
   246  			ID:     uuid.Generate(),
   247  			NodeID: "baz",
   248  			Name:   "my-job.web[0]",
   249  			Job:    oldJob,
   250  		},
   251  
   252  		// Ignore allocation on bar
   253  		{
   254  			ID:     uuid.Generate(),
   255  			NodeID: "bar",
   256  			Name:   "my-job.web[0]",
   257  			Job:    job,
   258  		},
   259  
   260  		// Stop allocation on draining node.
   261  		{
   262  			ID:     uuid.Generate(),
   263  			NodeID: drainNode.ID,
   264  			Name:   "my-job.web[0]",
   265  			Job:    oldJob,
   266  			DesiredTransition: structs.DesiredTransition{
   267  				Migrate: helper.BoolToPtr(true),
   268  			},
   269  		},
   270  		// Mark as lost on a dead node
   271  		{
   272  			ID:     uuid.Generate(),
   273  			NodeID: deadNode.ID,
   274  			Name:   "my-job.web[0]",
   275  			Job:    oldJob,
   276  		},
   277  	}
   278  
   279  	// Have three terminal allocs
   280  	terminalAllocs := map[string]*structs.Allocation{
   281  		"my-job.web[0]": {
   282  			ID:     uuid.Generate(),
   283  			NodeID: "pipe",
   284  			Name:   "my-job.web[0]",
   285  			Job:    job,
   286  		},
   287  	}
   288  
   289  	diff := diffSystemAllocs(job, nodes, tainted, allocs, terminalAllocs)
   290  	place := diff.place
   291  	update := diff.update
   292  	migrate := diff.migrate
   293  	stop := diff.stop
   294  	ignore := diff.ignore
   295  	lost := diff.lost
   296  
   297  	// We should update the first alloc
   298  	require.True(t, len(update) == 1 && update[0].Alloc == allocs[0])
   299  
   300  	// We should ignore the second alloc
   301  	require.True(t, len(ignore) == 1 && ignore[0].Alloc == allocs[1])
   302  
   303  	// We should stop the third alloc
   304  	require.Empty(t, stop)
   305  
   306  	// There should be no migrates.
   307  	require.True(t, len(migrate) == 1 && migrate[0].Alloc == allocs[2])
   308  
   309  	// We should mark the 5th alloc as lost
   310  	require.True(t, len(lost) == 1 && lost[0].Alloc == allocs[3])
   311  
   312  	// We should place 1
   313  	require.Equal(t, 2, len(place))
   314  
   315  	// Ensure that the allocations which are replacements of terminal allocs are
   316  	// annotated
   317  	for _, alloc := range terminalAllocs {
   318  		for _, allocTuple := range diff.place {
   319  			if alloc.NodeID == allocTuple.Alloc.NodeID {
   320  				require.True(t, reflect.DeepEqual(alloc, allocTuple.Alloc),
   321  					"expected: %#v, actual: %#v", alloc, allocTuple.Alloc)
   322  			}
   323  		}
   324  	}
   325  }
   326  
   327  func TestReadyNodesInDCs(t *testing.T) {
   328  	state := state.TestStateStore(t)
   329  	node1 := mock.Node()
   330  	node2 := mock.Node()
   331  	node2.Datacenter = "dc2"
   332  	node3 := mock.Node()
   333  	node3.Datacenter = "dc2"
   334  	node3.Status = structs.NodeStatusDown
   335  	node4 := mock.Node()
   336  	node4.Drain = true
   337  
   338  	require.NoError(t, state.UpsertNode(1000, node1))
   339  	require.NoError(t, state.UpsertNode(1001, node2))
   340  	require.NoError(t, state.UpsertNode(1002, node3))
   341  	require.NoError(t, state.UpsertNode(1003, node4))
   342  
   343  	nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"})
   344  	require.NoError(t, err)
   345  	require.Equal(t, 2, len(nodes))
   346  	require.True(t, nodes[0].ID != node3.ID && nodes[1].ID != node3.ID)
   347  
   348  	require.Contains(t, dc, "dc1")
   349  	require.Equal(t, 1, dc["dc1"])
   350  	require.Contains(t, dc, "dc2")
   351  	require.Equal(t, 1, dc["dc2"])
   352  }
   353  
   354  func TestRetryMax(t *testing.T) {
   355  	calls := 0
   356  	bad := func() (bool, error) {
   357  		calls += 1
   358  		return false, nil
   359  	}
   360  	err := retryMax(3, bad, nil)
   361  	require.Error(t, err)
   362  	require.Equal(t, 3, calls, "mis match")
   363  
   364  	calls = 0
   365  	first := true
   366  	reset := func() bool {
   367  		if calls == 3 && first {
   368  			first = false
   369  			return true
   370  		}
   371  		return false
   372  	}
   373  	err = retryMax(3, bad, reset)
   374  	require.Error(t, err)
   375  	require.Equal(t, 6, calls, "mis match")
   376  
   377  	calls = 0
   378  	good := func() (bool, error) {
   379  		calls += 1
   380  		return true, nil
   381  	}
   382  	err = retryMax(3, good, nil)
   383  	require.NoError(t, err)
   384  	require.Equal(t, 1, calls, "mis match")
   385  }
   386  
   387  func TestTaintedNodes(t *testing.T) {
   388  	state := state.TestStateStore(t)
   389  	node1 := mock.Node()
   390  	node2 := mock.Node()
   391  	node2.Datacenter = "dc2"
   392  	node3 := mock.Node()
   393  	node3.Datacenter = "dc2"
   394  	node3.Status = structs.NodeStatusDown
   395  	node4 := mock.Node()
   396  	node4.Drain = true
   397  	require.NoError(t, state.UpsertNode(1000, node1))
   398  	require.NoError(t, state.UpsertNode(1001, node2))
   399  	require.NoError(t, state.UpsertNode(1002, node3))
   400  	require.NoError(t, state.UpsertNode(1003, node4))
   401  
   402  	allocs := []*structs.Allocation{
   403  		{NodeID: node1.ID},
   404  		{NodeID: node2.ID},
   405  		{NodeID: node3.ID},
   406  		{NodeID: node4.ID},
   407  		{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
   408  	}
   409  	tainted, err := taintedNodes(state, allocs)
   410  	require.NoError(t, err)
   411  	require.Equal(t, 3, len(tainted))
   412  	require.NotContains(t, tainted, node1.ID)
   413  	require.NotContains(t, tainted, node2.ID)
   414  
   415  	require.Contains(t, tainted, node3.ID)
   416  	require.NotNil(t, tainted[node3.ID])
   417  
   418  	require.Contains(t, tainted, node4.ID)
   419  	require.NotNil(t, tainted[node4.ID])
   420  
   421  	require.Contains(t, tainted, "12345678-abcd-efab-cdef-123456789abc")
   422  	require.Nil(t, tainted["12345678-abcd-efab-cdef-123456789abc"])
   423  }
   424  
   425  func TestShuffleNodes(t *testing.T) {
   426  	// Use a large number of nodes to make the probability of shuffling to the
   427  	// original order very low.
   428  	nodes := []*structs.Node{
   429  		mock.Node(),
   430  		mock.Node(),
   431  		mock.Node(),
   432  		mock.Node(),
   433  		mock.Node(),
   434  		mock.Node(),
   435  		mock.Node(),
   436  		mock.Node(),
   437  		mock.Node(),
   438  		mock.Node(),
   439  	}
   440  	orig := make([]*structs.Node, len(nodes))
   441  	copy(orig, nodes)
   442  	shuffleNodes(nodes)
   443  	require.False(t, reflect.DeepEqual(nodes, orig))
   444  }
   445  
   446  func TestTaskUpdatedAffinity(t *testing.T) {
   447  	j1 := mock.Job()
   448  	j2 := mock.Job()
   449  	name := j1.TaskGroups[0].Name
   450  
   451  	require.False(t, tasksUpdated(j1, j2, name))
   452  
   453  	// TaskGroup Affinity
   454  	j2.TaskGroups[0].Affinities = []*structs.Affinity{
   455  		{
   456  			LTarget: "node.datacenter",
   457  			RTarget: "dc1",
   458  			Operand: "=",
   459  			Weight:  100,
   460  		},
   461  	}
   462  	require.True(t, tasksUpdated(j1, j2, name))
   463  
   464  	// TaskGroup Task Affinity
   465  	j3 := mock.Job()
   466  	j3.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
   467  		{
   468  			LTarget: "node.datacenter",
   469  			RTarget: "dc1",
   470  			Operand: "=",
   471  			Weight:  100,
   472  		},
   473  	}
   474  
   475  	require.True(t, tasksUpdated(j1, j3, name))
   476  
   477  	j4 := mock.Job()
   478  	j4.TaskGroups[0].Tasks[0].Affinities = []*structs.Affinity{
   479  		{
   480  			LTarget: "node.datacenter",
   481  			RTarget: "dc1",
   482  			Operand: "=",
   483  			Weight:  100,
   484  		},
   485  	}
   486  
   487  	require.True(t, tasksUpdated(j1, j4, name))
   488  
   489  	// check different level of same affinity
   490  	j5 := mock.Job()
   491  	j5.Affinities = []*structs.Affinity{
   492  		{
   493  			LTarget: "node.datacenter",
   494  			RTarget: "dc1",
   495  			Operand: "=",
   496  			Weight:  100,
   497  		},
   498  	}
   499  
   500  	j6 := mock.Job()
   501  	j6.Affinities = make([]*structs.Affinity, 0)
   502  	j6.TaskGroups[0].Affinities = []*structs.Affinity{
   503  		{
   504  			LTarget: "node.datacenter",
   505  			RTarget: "dc1",
   506  			Operand: "=",
   507  			Weight:  100,
   508  		},
   509  	}
   510  
   511  	require.False(t, tasksUpdated(j5, j6, name))
   512  }
   513  
   514  func TestTaskUpdatedSpread(t *testing.T) {
   515  	j1 := mock.Job()
   516  	j2 := mock.Job()
   517  	name := j1.TaskGroups[0].Name
   518  
   519  	require.False(t, tasksUpdated(j1, j2, name))
   520  
   521  	// TaskGroup Spread
   522  	j2.TaskGroups[0].Spreads = []*structs.Spread{
   523  		{
   524  			Attribute: "node.datacenter",
   525  			Weight:    100,
   526  			SpreadTarget: []*structs.SpreadTarget{
   527  				{
   528  					Value:   "r1",
   529  					Percent: 50,
   530  				},
   531  				{
   532  					Value:   "r2",
   533  					Percent: 50,
   534  				},
   535  			},
   536  		},
   537  	}
   538  	require.True(t, tasksUpdated(j1, j2, name))
   539  
   540  	// check different level of same constraint
   541  	j5 := mock.Job()
   542  	j5.Spreads = []*structs.Spread{
   543  		{
   544  			Attribute: "node.datacenter",
   545  			Weight:    100,
   546  			SpreadTarget: []*structs.SpreadTarget{
   547  				{
   548  					Value:   "r1",
   549  					Percent: 50,
   550  				},
   551  				{
   552  					Value:   "r2",
   553  					Percent: 50,
   554  				},
   555  			},
   556  		},
   557  	}
   558  
   559  	j6 := mock.Job()
   560  	j6.TaskGroups[0].Spreads = []*structs.Spread{
   561  		{
   562  			Attribute: "node.datacenter",
   563  			Weight:    100,
   564  			SpreadTarget: []*structs.SpreadTarget{
   565  				{
   566  					Value:   "r1",
   567  					Percent: 50,
   568  				},
   569  				{
   570  					Value:   "r2",
   571  					Percent: 50,
   572  				},
   573  			},
   574  		},
   575  	}
   576  
   577  	require.False(t, tasksUpdated(j5, j6, name))
   578  }
   579  func TestTasksUpdated(t *testing.T) {
   580  	j1 := mock.Job()
   581  	j2 := mock.Job()
   582  	name := j1.TaskGroups[0].Name
   583  	require.False(t, tasksUpdated(j1, j2, name))
   584  
   585  	j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
   586  	require.True(t, tasksUpdated(j1, j2, name))
   587  
   588  	j3 := mock.Job()
   589  	j3.TaskGroups[0].Tasks[0].Name = "foo"
   590  	require.True(t, tasksUpdated(j1, j3, name))
   591  
   592  	j4 := mock.Job()
   593  	j4.TaskGroups[0].Tasks[0].Driver = "foo"
   594  	require.True(t, tasksUpdated(j1, j4, name))
   595  
   596  	j5 := mock.Job()
   597  	j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks,
   598  		j5.TaskGroups[0].Tasks[0])
   599  	require.True(t, tasksUpdated(j1, j5, name))
   600  
   601  	j6 := mock.Job()
   602  	j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{
   603  		{Label: "http", Value: 0},
   604  		{Label: "https", Value: 0},
   605  		{Label: "admin", Value: 0},
   606  	}
   607  	require.True(t, tasksUpdated(j1, j6, name))
   608  
   609  	j7 := mock.Job()
   610  	j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE"
   611  	require.True(t, tasksUpdated(j1, j7, name))
   612  
   613  	j8 := mock.Job()
   614  	j8.TaskGroups[0].Tasks[0].User = "foo"
   615  	require.True(t, tasksUpdated(j1, j8, name))
   616  
   617  	j9 := mock.Job()
   618  	j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{
   619  		{
   620  			GetterSource: "http://foo.com/bar",
   621  		},
   622  	}
   623  	require.True(t, tasksUpdated(j1, j9, name))
   624  
   625  	j10 := mock.Job()
   626  	j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom"
   627  	require.True(t, tasksUpdated(j1, j10, name))
   628  
   629  	j11 := mock.Job()
   630  	j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337
   631  	require.True(t, tasksUpdated(j1, j11, name))
   632  
   633  	j11d1 := mock.Job()
   634  	j11d1.TaskGroups[0].Tasks[0].Resources.Devices = structs.ResourceDevices{
   635  		&structs.RequestedDevice{
   636  			Name:  "gpu",
   637  			Count: 1,
   638  		},
   639  	}
   640  	j11d2 := mock.Job()
   641  	j11d2.TaskGroups[0].Tasks[0].Resources.Devices = structs.ResourceDevices{
   642  		&structs.RequestedDevice{
   643  			Name:  "gpu",
   644  			Count: 2,
   645  		},
   646  	}
   647  	require.True(t, tasksUpdated(j11d1, j11d2, name))
   648  
   649  	j12 := mock.Job()
   650  	j12.TaskGroups[0].Tasks[0].Resources.Networks[0].MBits = 100
   651  	require.True(t, tasksUpdated(j1, j12, name))
   652  
   653  	j13 := mock.Job()
   654  	j13.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts[0].Label = "foobar"
   655  	require.True(t, tasksUpdated(j1, j13, name))
   656  
   657  	j14 := mock.Job()
   658  	j14.TaskGroups[0].Tasks[0].Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}}
   659  	require.True(t, tasksUpdated(j1, j14, name))
   660  
   661  	j15 := mock.Job()
   662  	j15.TaskGroups[0].Tasks[0].Vault = &structs.Vault{Policies: []string{"foo"}}
   663  	require.True(t, tasksUpdated(j1, j15, name))
   664  
   665  	j16 := mock.Job()
   666  	j16.TaskGroups[0].EphemeralDisk.Sticky = true
   667  	require.True(t, tasksUpdated(j1, j16, name))
   668  
   669  	// Change group meta
   670  	j17 := mock.Job()
   671  	j17.TaskGroups[0].Meta["j17_test"] = "roll_baby_roll"
   672  	require.True(t, tasksUpdated(j1, j17, name))
   673  
   674  	// Change job meta
   675  	j18 := mock.Job()
   676  	j18.Meta["j18_test"] = "roll_baby_roll"
   677  	require.True(t, tasksUpdated(j1, j18, name))
   678  
   679  	// Change network mode
   680  	j19 := mock.Job()
   681  	j19.TaskGroups[0].Networks = j19.TaskGroups[0].Tasks[0].Resources.Networks
   682  	j19.TaskGroups[0].Tasks[0].Resources.Networks = nil
   683  
   684  	j20 := mock.Job()
   685  	j20.TaskGroups[0].Networks = j20.TaskGroups[0].Tasks[0].Resources.Networks
   686  	j20.TaskGroups[0].Tasks[0].Resources.Networks = nil
   687  
   688  	require.False(t, tasksUpdated(j19, j20, name))
   689  
   690  	j20.TaskGroups[0].Networks[0].Mode = "bridge"
   691  	require.True(t, tasksUpdated(j19, j20, name))
   692  }
   693  
   694  func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) {
   695  	_, ctx := testContext(t)
   696  	allocs := []allocTuple{
   697  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   698  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   699  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   700  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   701  	}
   702  	diff := &diffResult{}
   703  
   704  	limit := 2
   705  	require.True(t, evictAndPlace(ctx, diff, allocs, "", &limit), "evictAndReplace() should have returned true")
   706  	require.Zero(t, limit, "evictAndReplace() should decremented limit; got %v; want 0", limit)
   707  	require.Equal(t, 2, len(diff.place), "evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   708  }
   709  
   710  func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) {
   711  	_, ctx := testContext(t)
   712  	allocs := []allocTuple{
   713  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   714  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   715  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   716  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   717  	}
   718  	diff := &diffResult{}
   719  
   720  	limit := 4
   721  	require.False(t, evictAndPlace(ctx, diff, allocs, "", &limit), "evictAndReplace() should have returned false")
   722  	require.Zero(t, limit, "evictAndReplace() should decremented limit; got %v; want 0", limit)
   723  	require.Equal(t, 4, len(diff.place), "evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   724  }
   725  
   726  func TestSetStatus(t *testing.T) {
   727  	h := NewHarness(t)
   728  	logger := testlog.HCLogger(t)
   729  	eval := mock.Eval()
   730  	status := "a"
   731  	desc := "b"
   732  	require.NoError(t, setStatus(logger, h, eval, nil, nil, nil, status, desc, nil, ""))
   733  	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
   734  
   735  	newEval := h.Evals[0]
   736  	require.True(t, newEval.ID == eval.ID && newEval.Status == status && newEval.StatusDescription == desc,
   737  		"setStatus() submited invalid eval: %v", newEval)
   738  
   739  	// Test next evals
   740  	h = NewHarness(t)
   741  	next := mock.Eval()
   742  	require.NoError(t, setStatus(logger, h, eval, next, nil, nil, status, desc, nil, ""))
   743  	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
   744  
   745  	newEval = h.Evals[0]
   746  	require.Equal(t, next.ID, newEval.NextEval, "setStatus() didn't set nextEval correctly: %v", newEval)
   747  
   748  	// Test blocked evals
   749  	h = NewHarness(t)
   750  	blocked := mock.Eval()
   751  	require.NoError(t, setStatus(logger, h, eval, nil, blocked, nil, status, desc, nil, ""))
   752  	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
   753  
   754  	newEval = h.Evals[0]
   755  	require.Equal(t, blocked.ID, newEval.BlockedEval, "setStatus() didn't set BlockedEval correctly: %v", newEval)
   756  
   757  	// Test metrics
   758  	h = NewHarness(t)
   759  	metrics := map[string]*structs.AllocMetric{"foo": nil}
   760  	require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, nil, ""))
   761  	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
   762  
   763  	newEval = h.Evals[0]
   764  	require.True(t, reflect.DeepEqual(newEval.FailedTGAllocs, metrics),
   765  		"setStatus() didn't set failed task group metrics correctly: %v", newEval)
   766  
   767  	// Test queued allocations
   768  	h = NewHarness(t)
   769  	queuedAllocs := map[string]int{"web": 1}
   770  
   771  	require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, ""))
   772  	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
   773  
   774  	newEval = h.Evals[0]
   775  	require.True(t, reflect.DeepEqual(newEval.QueuedAllocations, queuedAllocs), "setStatus() didn't set failed task group metrics correctly: %v", newEval)
   776  
   777  	h = NewHarness(t)
   778  	dID := uuid.Generate()
   779  	require.NoError(t, setStatus(logger, h, eval, nil, nil, metrics, status, desc, queuedAllocs, dID))
   780  	require.Equal(t, 1, len(h.Evals), "setStatus() didn't update plan: %v", h.Evals)
   781  
   782  	newEval = h.Evals[0]
   783  	require.Equal(t, dID, newEval.DeploymentID, "setStatus() didn't set deployment id correctly: %v", newEval)
   784  }
   785  
   786  func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
   787  	state, ctx := testContext(t)
   788  	eval := mock.Eval()
   789  	job := mock.Job()
   790  
   791  	node := mock.Node()
   792  	require.NoError(t, state.UpsertNode(900, node))
   793  
   794  	// Register an alloc
   795  	alloc := &structs.Allocation{
   796  		Namespace: structs.DefaultNamespace,
   797  		ID:        uuid.Generate(),
   798  		EvalID:    eval.ID,
   799  		NodeID:    node.ID,
   800  		JobID:     job.ID,
   801  		Job:       job,
   802  		AllocatedResources: &structs.AllocatedResources{
   803  			Tasks: map[string]*structs.AllocatedTaskResources{
   804  				"web": {
   805  					Cpu: structs.AllocatedCpuResources{
   806  						CpuShares: 2048,
   807  					},
   808  					Memory: structs.AllocatedMemoryResources{
   809  						MemoryMB: 2048,
   810  					},
   811  				},
   812  			},
   813  		},
   814  		DesiredStatus: structs.AllocDesiredStatusRun,
   815  		TaskGroup:     "web",
   816  	}
   817  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   818  	require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
   819  	require.NoError(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   820  
   821  	// Create a new task group that prevents in-place updates.
   822  	tg := &structs.TaskGroup{}
   823  	*tg = *job.TaskGroups[0]
   824  	task := &structs.Task{
   825  		Name:      "FOO",
   826  		Resources: &structs.Resources{},
   827  	}
   828  	tg.Tasks = nil
   829  	tg.Tasks = append(tg.Tasks, task)
   830  
   831  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   832  	stack := NewGenericStack(false, ctx)
   833  
   834  	// Do the inplace update.
   835  	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
   836  
   837  	require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
   838  	require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
   839  }
   840  
   841  func TestInplaceUpdate_NoMatch(t *testing.T) {
   842  	state, ctx := testContext(t)
   843  	eval := mock.Eval()
   844  	job := mock.Job()
   845  
   846  	node := mock.Node()
   847  	require.NoError(t, state.UpsertNode(900, node))
   848  
   849  	// Register an alloc
   850  	alloc := &structs.Allocation{
   851  		Namespace: structs.DefaultNamespace,
   852  		ID:        uuid.Generate(),
   853  		EvalID:    eval.ID,
   854  		NodeID:    node.ID,
   855  		JobID:     job.ID,
   856  		Job:       job,
   857  		AllocatedResources: &structs.AllocatedResources{
   858  			Tasks: map[string]*structs.AllocatedTaskResources{
   859  				"web": {
   860  					Cpu: structs.AllocatedCpuResources{
   861  						CpuShares: 2048,
   862  					},
   863  					Memory: structs.AllocatedMemoryResources{
   864  						MemoryMB: 2048,
   865  					},
   866  				},
   867  			},
   868  		},
   869  		DesiredStatus: structs.AllocDesiredStatusRun,
   870  		TaskGroup:     "web",
   871  	}
   872  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   873  	require.NoError(t, state.UpsertJobSummary(1000, mock.JobSummary(alloc.JobID)))
   874  	require.NoError(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   875  
   876  	// Create a new task group that requires too much resources.
   877  	tg := &structs.TaskGroup{}
   878  	*tg = *job.TaskGroups[0]
   879  	resource := &structs.Resources{CPU: 9999}
   880  	tg.Tasks[0].Resources = resource
   881  
   882  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   883  	stack := NewGenericStack(false, ctx)
   884  
   885  	// Do the inplace update.
   886  	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
   887  
   888  	require.True(t, len(unplaced) == 1 && len(inplace) == 0, "inplaceUpdate incorrectly did an inplace update")
   889  	require.Empty(t, ctx.plan.NodeAllocation, "inplaceUpdate incorrectly did an inplace update")
   890  }
   891  
   892  func TestInplaceUpdate_Success(t *testing.T) {
   893  	state, ctx := testContext(t)
   894  	eval := mock.Eval()
   895  	job := mock.Job()
   896  
   897  	node := mock.Node()
   898  	require.NoError(t, state.UpsertNode(900, node))
   899  
   900  	// Register an alloc
   901  	alloc := &structs.Allocation{
   902  		Namespace: structs.DefaultNamespace,
   903  		ID:        uuid.Generate(),
   904  		EvalID:    eval.ID,
   905  		NodeID:    node.ID,
   906  		JobID:     job.ID,
   907  		Job:       job,
   908  		TaskGroup: job.TaskGroups[0].Name,
   909  		AllocatedResources: &structs.AllocatedResources{
   910  			Tasks: map[string]*structs.AllocatedTaskResources{
   911  				"web": {
   912  					Cpu: structs.AllocatedCpuResources{
   913  						CpuShares: 2048,
   914  					},
   915  					Memory: structs.AllocatedMemoryResources{
   916  						MemoryMB: 2048,
   917  					},
   918  				},
   919  			},
   920  		},
   921  		DesiredStatus: structs.AllocDesiredStatusRun,
   922  	}
   923  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   924  	require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc.JobID)))
   925  	require.NoError(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   926  
   927  	// Create a new task group that updates the resources.
   928  	tg := &structs.TaskGroup{}
   929  	*tg = *job.TaskGroups[0]
   930  	resource := &structs.Resources{CPU: 737}
   931  	tg.Tasks[0].Resources = resource
   932  	newServices := []*structs.Service{
   933  		{
   934  			Name:      "dummy-service",
   935  			PortLabel: "http",
   936  		},
   937  		{
   938  			Name:      "dummy-service2",
   939  			PortLabel: "http",
   940  		},
   941  	}
   942  
   943  	// Delete service 2
   944  	tg.Tasks[0].Services = tg.Tasks[0].Services[:1]
   945  
   946  	// Add the new services
   947  	tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...)
   948  
   949  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   950  	stack := NewGenericStack(false, ctx)
   951  	stack.SetJob(job)
   952  
   953  	// Do the inplace update.
   954  	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
   955  
   956  	require.True(t, len(unplaced) == 0 && len(inplace) == 1, "inplaceUpdate did not do an inplace update")
   957  	require.Equal(t, 1, len(ctx.plan.NodeAllocation), "inplaceUpdate did not do an inplace update")
   958  	require.Equal(t, alloc.ID, inplace[0].Alloc.ID, "inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)
   959  
   960  	// Get the alloc we inserted.
   961  	a := inplace[0].Alloc // TODO(sean@): Verify this is correct vs: ctx.plan.NodeAllocation[alloc.NodeID][0]
   962  	require.NotNil(t, a.Job)
   963  	require.Equal(t, 1, len(a.Job.TaskGroups))
   964  	require.Equal(t, 1, len(a.Job.TaskGroups[0].Tasks))
   965  	require.Equal(t, 3, len(a.Job.TaskGroups[0].Tasks[0].Services),
   966  		"Expected number of services: %v, Actual: %v", 3, len(a.Job.TaskGroups[0].Tasks[0].Services))
   967  
   968  	serviceNames := make(map[string]struct{}, 3)
   969  	for _, consulService := range a.Job.TaskGroups[0].Tasks[0].Services {
   970  		serviceNames[consulService.Name] = struct{}{}
   971  	}
   972  	require.Equal(t, 3, len(serviceNames))
   973  
   974  	for _, name := range []string{"dummy-service", "dummy-service2", "web-frontend"} {
   975  		if _, found := serviceNames[name]; !found {
   976  			t.Errorf("Expected consul service name missing: %v", name)
   977  		}
   978  	}
   979  }
   980  
   981  func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) {
   982  	_, ctx := testContext(t)
   983  	allocs := []allocTuple{
   984  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   985  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   986  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   987  		{Alloc: &structs.Allocation{ID: uuid.Generate()}},
   988  	}
   989  	diff := &diffResult{}
   990  
   991  	limit := 6
   992  	require.False(t, evictAndPlace(ctx, diff, allocs, "", &limit))
   993  	require.Equal(t, 2, limit, "evictAndReplace() should decremented limit")
   994  	require.Equal(t, 4, len(diff.place), "evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   995  }
   996  
   997  func TestTaskGroupConstraints(t *testing.T) {
   998  	constr := &structs.Constraint{RTarget: "bar"}
   999  	constr2 := &structs.Constraint{LTarget: "foo"}
  1000  	constr3 := &structs.Constraint{Operand: "<"}
  1001  
  1002  	tg := &structs.TaskGroup{
  1003  		Name:          "web",
  1004  		Count:         10,
  1005  		Constraints:   []*structs.Constraint{constr},
  1006  		EphemeralDisk: &structs.EphemeralDisk{},
  1007  		Tasks: []*structs.Task{
  1008  			{
  1009  				Driver: "exec",
  1010  				Resources: &structs.Resources{
  1011  					CPU:      500,
  1012  					MemoryMB: 256,
  1013  				},
  1014  				Constraints: []*structs.Constraint{constr2},
  1015  			},
  1016  			{
  1017  				Driver: "docker",
  1018  				Resources: &structs.Resources{
  1019  					CPU:      500,
  1020  					MemoryMB: 256,
  1021  				},
  1022  				Constraints: []*structs.Constraint{constr3},
  1023  			},
  1024  		},
  1025  	}
  1026  
  1027  	// Build the expected values.
  1028  	expConstr := []*structs.Constraint{constr, constr2, constr3}
  1029  	expDrivers := map[string]struct{}{"exec": {}, "docker": {}}
  1030  
  1031  	actConstrains := taskGroupConstraints(tg)
  1032  	require.True(t, reflect.DeepEqual(actConstrains.constraints, expConstr),
  1033  		"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
  1034  	require.True(t, reflect.DeepEqual(actConstrains.drivers, expDrivers),
  1035  		"taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
  1036  }
  1037  
  1038  func TestProgressMade(t *testing.T) {
  1039  	noopPlan := &structs.PlanResult{}
  1040  	require.False(t, progressMade(nil) || progressMade(noopPlan), "no progress plan marked as making progress")
  1041  
  1042  	m := map[string][]*structs.Allocation{
  1043  		"foo": {mock.Alloc()},
  1044  	}
  1045  	both := &structs.PlanResult{
  1046  		NodeAllocation: m,
  1047  		NodeUpdate:     m,
  1048  	}
  1049  	update := &structs.PlanResult{NodeUpdate: m}
  1050  	alloc := &structs.PlanResult{NodeAllocation: m}
  1051  	deployment := &structs.PlanResult{Deployment: mock.Deployment()}
  1052  	deploymentUpdates := &structs.PlanResult{
  1053  		DeploymentUpdates: []*structs.DeploymentStatusUpdate{
  1054  			{DeploymentID: uuid.Generate()},
  1055  		},
  1056  	}
  1057  
  1058  	require.True(t, progressMade(both) && progressMade(update) && progressMade(alloc) &&
  1059  		progressMade(deployment) && progressMade(deploymentUpdates))
  1060  }
  1061  
  1062  func TestDesiredUpdates(t *testing.T) {
  1063  	tg1 := &structs.TaskGroup{Name: "foo"}
  1064  	tg2 := &structs.TaskGroup{Name: "bar"}
  1065  	a2 := &structs.Allocation{TaskGroup: "bar"}
  1066  
  1067  	place := []allocTuple{
  1068  		{TaskGroup: tg1},
  1069  		{TaskGroup: tg1},
  1070  		{TaskGroup: tg1},
  1071  		{TaskGroup: tg2},
  1072  	}
  1073  	stop := []allocTuple{
  1074  		{TaskGroup: tg2, Alloc: a2},
  1075  		{TaskGroup: tg2, Alloc: a2},
  1076  	}
  1077  	ignore := []allocTuple{
  1078  		{TaskGroup: tg1},
  1079  	}
  1080  	migrate := []allocTuple{
  1081  		{TaskGroup: tg2},
  1082  	}
  1083  	inplace := []allocTuple{
  1084  		{TaskGroup: tg1},
  1085  		{TaskGroup: tg1},
  1086  	}
  1087  	destructive := []allocTuple{
  1088  		{TaskGroup: tg1},
  1089  		{TaskGroup: tg2},
  1090  		{TaskGroup: tg2},
  1091  	}
  1092  	diff := &diffResult{
  1093  		place:   place,
  1094  		stop:    stop,
  1095  		ignore:  ignore,
  1096  		migrate: migrate,
  1097  	}
  1098  
  1099  	expected := map[string]*structs.DesiredUpdates{
  1100  		"foo": {
  1101  			Place:             3,
  1102  			Ignore:            1,
  1103  			InPlaceUpdate:     2,
  1104  			DestructiveUpdate: 1,
  1105  		},
  1106  		"bar": {
  1107  			Place:             1,
  1108  			Stop:              2,
  1109  			Migrate:           1,
  1110  			DestructiveUpdate: 2,
  1111  		},
  1112  	}
  1113  
  1114  	desired := desiredUpdates(diff, inplace, destructive)
  1115  	require.True(t, reflect.DeepEqual(desired, expected), "desiredUpdates() returned %#v; want %#v", desired, expected)
  1116  }
  1117  
  1118  func TestUtil_AdjustQueuedAllocations(t *testing.T) {
  1119  	logger := testlog.HCLogger(t)
  1120  	alloc1 := mock.Alloc()
  1121  	alloc2 := mock.Alloc()
  1122  	alloc2.CreateIndex = 4
  1123  	alloc2.ModifyIndex = 4
  1124  	alloc3 := mock.Alloc()
  1125  	alloc3.CreateIndex = 3
  1126  	alloc3.ModifyIndex = 5
  1127  	alloc4 := mock.Alloc()
  1128  	alloc4.CreateIndex = 6
  1129  	alloc4.ModifyIndex = 8
  1130  
  1131  	planResult := structs.PlanResult{
  1132  		NodeUpdate: map[string][]*structs.Allocation{
  1133  			"node-1": {alloc1},
  1134  		},
  1135  		NodeAllocation: map[string][]*structs.Allocation{
  1136  			"node-1": {
  1137  				alloc2,
  1138  			},
  1139  			"node-2": {
  1140  				alloc3, alloc4,
  1141  			},
  1142  		},
  1143  		RefreshIndex: 3,
  1144  		AllocIndex:   16, // Should not be considered
  1145  	}
  1146  
  1147  	queuedAllocs := map[string]int{"web": 2}
  1148  	adjustQueuedAllocations(logger, &planResult, queuedAllocs)
  1149  
  1150  	require.Equal(t, 1, queuedAllocs["web"])
  1151  }
  1152  
  1153  func TestUtil_UpdateNonTerminalAllocsToLost(t *testing.T) {
  1154  	node := mock.Node()
  1155  	node.Status = structs.NodeStatusDown
  1156  	alloc1 := mock.Alloc()
  1157  	alloc1.NodeID = node.ID
  1158  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
  1159  
  1160  	alloc2 := mock.Alloc()
  1161  	alloc2.NodeID = node.ID
  1162  	alloc2.DesiredStatus = structs.AllocDesiredStatusStop
  1163  	alloc2.ClientStatus = structs.AllocClientStatusRunning
  1164  
  1165  	alloc3 := mock.Alloc()
  1166  	alloc3.NodeID = node.ID
  1167  	alloc3.DesiredStatus = structs.AllocDesiredStatusStop
  1168  	alloc3.ClientStatus = structs.AllocClientStatusComplete
  1169  
  1170  	alloc4 := mock.Alloc()
  1171  	alloc4.NodeID = node.ID
  1172  	alloc4.DesiredStatus = structs.AllocDesiredStatusStop
  1173  	alloc4.ClientStatus = structs.AllocClientStatusFailed
  1174  
  1175  	allocs := []*structs.Allocation{alloc1, alloc2, alloc3, alloc4}
  1176  	plan := structs.Plan{
  1177  		NodeUpdate: make(map[string][]*structs.Allocation),
  1178  	}
  1179  	tainted := map[string]*structs.Node{node.ID: node}
  1180  
  1181  	updateNonTerminalAllocsToLost(&plan, tainted, allocs)
  1182  
  1183  	allocsLost := make([]string, 0, 2)
  1184  	for _, alloc := range plan.NodeUpdate[node.ID] {
  1185  		allocsLost = append(allocsLost, alloc.ID)
  1186  	}
  1187  	expected := []string{alloc1.ID, alloc2.ID}
  1188  	require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
  1189  
  1190  	// Update the node status to ready and try again
  1191  	plan = structs.Plan{
  1192  		NodeUpdate: make(map[string][]*structs.Allocation),
  1193  	}
  1194  	node.Status = structs.NodeStatusReady
  1195  	updateNonTerminalAllocsToLost(&plan, tainted, allocs)
  1196  
  1197  	allocsLost = make([]string, 0, 2)
  1198  	for _, alloc := range plan.NodeUpdate[node.ID] {
  1199  		allocsLost = append(allocsLost, alloc.ID)
  1200  	}
  1201  	expected = []string{}
  1202  	require.True(t, reflect.DeepEqual(allocsLost, expected), "actual: %v, expected: %v", allocsLost, expected)
  1203  }