github.com/hhrutter/nomad@v0.6.0-rc2.0.20170723054333-80c4b03f0705/scheduler/reconcile_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"os"
     7  	"reflect"
     8  	"regexp"
     9  	"strconv"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/hashicorp/nomad/helper"
    14  	"github.com/hashicorp/nomad/nomad/mock"
    15  	"github.com/hashicorp/nomad/nomad/structs"
    16  	"github.com/kr/pretty"
    17  )
    18  
    19  /*
    20  Basic Tests:
    21  √  Place when there is nothing in the cluster
    22  √  Place remainder when there is some in the cluster
    23  √  Scale down from n to n-m where n != m
    24  √  Scale down from n to zero
    25  √  Inplace upgrade test
    26  √  Inplace upgrade and scale up test
    27  √  Inplace upgrade and scale down test
    28  √  Destructive upgrade
    29  √  Destructive upgrade and scale up test
    30  √  Destructive upgrade and scale down test
    31  √  Handle lost nodes
    32  √  Handle lost nodes and scale up
    33  √  Handle lost nodes and scale down
    34  √  Handle draining nodes
    35  √  Handle draining nodes and scale up
    36  √  Handle draining nodes and scale down
    37  √  Handle task group being removed
    38  √  Handle job being stopped both as .Stopped and nil
    39  √  Place more that one group
    40  
    41  Update stanza Tests:
    42  √  Stopped job cancels any active deployment
    43  √  Stopped job doesn't cancel terminal deployment
    44  √  JobIndex change cancels any active deployment
    45  √  JobIndex change doens't cancels any terminal deployment
    46  √  Destructive changes create deployment and get rolled out via max_parallelism
    47  √  Don't create a deployment if there are no changes
    48  √  Deployment created by all inplace updates
    49  √  Paused or failed deployment doesn't create any more canaries
    50  √  Paused or failed deployment doesn't do any placements
    51  √  Paused or failed deployment doesn't do destructive updates
    52  √  Paused does do migrations
    53  √  Failed deployment doesn't do migrations
    54  √  Canary that is on a draining node
    55  √  Canary that is on a lost node
    56  √  Stop old canaries
    57  √  Create new canaries on job change
    58  √  Create new canaries on job change while scaling up
    59  √  Create new canaries on job change while scaling down
    60  √  Fill canaries if partial placement
    61  √  Promote canaries unblocks max_parallel
    62  √  Promote canaries when canaries == count
    63  √  Only place as many as are healthy in deployment
    64  √  Limit calculation accounts for healthy allocs on migrating/lost nodes
    65  √  Failed deployment should not place anything
    66  √  Run after canaries have been promoted, new allocs have been rolled out and there is no deployment
    67  √  Failed deployment cancels non-promoted task groups
    68  √  Failed deployment and updated job works
    69  √  Finished deployment gets marked as complete
    70  √  The stagger is correctly calculated when it is applied across multiple task groups.
    71  √  Change job change while scaling up
    72  */
    73  
    74  var (
    75  	canaryUpdate = &structs.UpdateStrategy{
    76  		Canary:          2,
    77  		MaxParallel:     2,
    78  		HealthCheck:     structs.UpdateStrategyHealthCheck_Checks,
    79  		MinHealthyTime:  10 * time.Second,
    80  		HealthyDeadline: 10 * time.Minute,
    81  		Stagger:         31 * time.Second,
    82  	}
    83  
    84  	noCanaryUpdate = &structs.UpdateStrategy{
    85  		MaxParallel:     4,
    86  		HealthCheck:     structs.UpdateStrategyHealthCheck_Checks,
    87  		MinHealthyTime:  10 * time.Second,
    88  		HealthyDeadline: 10 * time.Minute,
    89  		Stagger:         31 * time.Second,
    90  	}
    91  )
    92  
    93  func testLogger() *log.Logger {
    94  	return log.New(os.Stderr, "", log.LstdFlags)
    95  }
    96  
    97  func allocUpdateFnIgnore(*structs.Allocation, *structs.Job, *structs.TaskGroup) (bool, bool, *structs.Allocation) {
    98  	return true, false, nil
    99  }
   100  
   101  func allocUpdateFnDestructive(*structs.Allocation, *structs.Job, *structs.TaskGroup) (bool, bool, *structs.Allocation) {
   102  	return false, true, nil
   103  }
   104  
   105  func allocUpdateFnInplace(existing *structs.Allocation, _ *structs.Job, newTG *structs.TaskGroup) (bool, bool, *structs.Allocation) {
   106  	// Create a shallow copy
   107  	newAlloc := new(structs.Allocation)
   108  	*newAlloc = *existing
   109  	newAlloc.TaskResources = make(map[string]*structs.Resources)
   110  
   111  	// Use the new task resources but keep the network from the old
   112  	for _, task := range newTG.Tasks {
   113  		r := task.Resources.Copy()
   114  		r.Networks = existing.TaskResources[task.Name].Networks
   115  		newAlloc.TaskResources[task.Name] = r
   116  	}
   117  
   118  	return false, false, newAlloc
   119  }
   120  
   121  func allocUpdateFnMock(handled map[string]allocUpdateType, unhandled allocUpdateType) allocUpdateType {
   122  	return func(existing *structs.Allocation, newJob *structs.Job, newTG *structs.TaskGroup) (bool, bool, *structs.Allocation) {
   123  		if fn, ok := handled[existing.ID]; ok {
   124  			return fn(existing, newJob, newTG)
   125  		}
   126  
   127  		return unhandled(existing, newJob, newTG)
   128  	}
   129  }
   130  
   131  var (
   132  	// AllocationIndexRegex is a regular expression to find the allocation index.
   133  	allocationIndexRegex = regexp.MustCompile(".+\\[(\\d+)\\]$")
   134  )
   135  
   136  // allocNameToIndex returns the index of the allocation.
   137  func allocNameToIndex(name string) uint {
   138  	matches := allocationIndexRegex.FindStringSubmatch(name)
   139  	if len(matches) != 2 {
   140  		return 0
   141  	}
   142  
   143  	index, err := strconv.Atoi(matches[1])
   144  	if err != nil {
   145  		return 0
   146  	}
   147  
   148  	return uint(index)
   149  }
   150  
   151  func assertNamesHaveIndexes(t *testing.T, indexes []int, names []string) {
   152  	m := make(map[uint]int)
   153  	for _, i := range indexes {
   154  		m[uint(i)] += 1
   155  	}
   156  
   157  	for _, n := range names {
   158  		index := allocNameToIndex(n)
   159  		val, contained := m[index]
   160  		if !contained {
   161  			t.Fatalf("Unexpected index %d from name %s\nAll names: %v", index, n, names)
   162  		}
   163  
   164  		val--
   165  		if val < 0 {
   166  			t.Fatalf("Index %d repeated too many times\nAll names: %v", index, names)
   167  		}
   168  		m[index] = val
   169  	}
   170  
   171  	for k, remainder := range m {
   172  		if remainder != 0 {
   173  			t.Fatalf("Index %d has %d remaining uses expected\nAll names: %v", k, remainder, names)
   174  		}
   175  	}
   176  }
   177  
   178  func assertNoCanariesStopped(t *testing.T, d *structs.Deployment, stop []allocStopResult) {
   179  	canaryIndex := make(map[string]struct{})
   180  	for _, state := range d.TaskGroups {
   181  		for _, c := range state.PlacedCanaries {
   182  			canaryIndex[c] = struct{}{}
   183  		}
   184  	}
   185  
   186  	for _, s := range stop {
   187  		if _, ok := canaryIndex[s.alloc.ID]; ok {
   188  			t.Fatalf("Stopping canary alloc %q %q", s.alloc.ID, s.alloc.Name)
   189  		}
   190  	}
   191  }
   192  
   193  func assertPlaceResultsHavePreviousAllocs(t *testing.T, numPrevious int, place []allocPlaceResult) {
   194  	names := make(map[string]struct{}, numPrevious)
   195  
   196  	found := 0
   197  	for _, p := range place {
   198  		if _, ok := names[p.name]; ok {
   199  			t.Fatalf("Name %q already placed", p.name)
   200  		}
   201  		names[p.name] = struct{}{}
   202  
   203  		if p.previousAlloc == nil {
   204  			continue
   205  		}
   206  
   207  		if act := p.previousAlloc.Name; p.name != act {
   208  			t.Fatalf("Name mismatch on previous alloc; got %q; want %q", act, p.name)
   209  		}
   210  		found++
   211  	}
   212  	if numPrevious != found {
   213  		t.Fatalf("wanted %d; got %d placements with previous allocs", numPrevious, found)
   214  	}
   215  }
   216  
   217  func intRange(pairs ...int) []int {
   218  	if len(pairs)%2 != 0 {
   219  		return nil
   220  	}
   221  
   222  	var r []int
   223  	for i := 0; i < len(pairs); i += 2 {
   224  		for j := pairs[i]; j <= pairs[i+1]; j++ {
   225  			r = append(r, j)
   226  		}
   227  	}
   228  	return r
   229  }
   230  
   231  func placeResultsToNames(place []allocPlaceResult) []string {
   232  	names := make([]string, 0, len(place))
   233  	for _, p := range place {
   234  		names = append(names, p.name)
   235  	}
   236  	return names
   237  }
   238  
   239  func destructiveResultsToNames(destructive []allocDestructiveResult) []string {
   240  	names := make([]string, 0, len(destructive))
   241  	for _, d := range destructive {
   242  		names = append(names, d.placeName)
   243  	}
   244  	return names
   245  }
   246  
   247  func stopResultsToNames(stop []allocStopResult) []string {
   248  	names := make([]string, 0, len(stop))
   249  	for _, s := range stop {
   250  		names = append(names, s.alloc.Name)
   251  	}
   252  	return names
   253  }
   254  
   255  func allocsToNames(allocs []*structs.Allocation) []string {
   256  	names := make([]string, 0, len(allocs))
   257  	for _, a := range allocs {
   258  		names = append(names, a.Name)
   259  	}
   260  	return names
   261  }
   262  
   263  type resultExpectation struct {
   264  	createDeployment  *structs.Deployment
   265  	deploymentUpdates []*structs.DeploymentStatusUpdate
   266  	place             int
   267  	destructive       int
   268  	inplace           int
   269  	stop              int
   270  	desiredTGUpdates  map[string]*structs.DesiredUpdates
   271  	followupEvalWait  time.Duration
   272  }
   273  
   274  func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) {
   275  
   276  	if exp.createDeployment != nil && r.deployment == nil {
   277  		t.Fatalf("Expect a created deployment got none")
   278  	} else if exp.createDeployment == nil && r.deployment != nil {
   279  		t.Fatalf("Expect no created deployment; got %#v", r.deployment)
   280  	} else if exp.createDeployment != nil && r.deployment != nil {
   281  		// Clear the deployment ID
   282  		r.deployment.ID, exp.createDeployment.ID = "", ""
   283  		if !reflect.DeepEqual(r.deployment, exp.createDeployment) {
   284  			t.Fatalf("Unexpected createdDeployment; got\n %#v\nwant\n%#v\nDiff: %v",
   285  				r.deployment, exp.createDeployment, pretty.Diff(r.deployment, exp.createDeployment))
   286  		}
   287  	}
   288  
   289  	if !reflect.DeepEqual(r.deploymentUpdates, exp.deploymentUpdates) {
   290  		t.Fatalf("Unexpected deploymentUpdates: %v", pretty.Diff(r.deploymentUpdates, exp.deploymentUpdates))
   291  	}
   292  	if l := len(r.place); l != exp.place {
   293  		t.Fatalf("Expected %d placements; got %d", exp.place, l)
   294  	}
   295  	if l := len(r.destructiveUpdate); l != exp.destructive {
   296  		t.Fatalf("Expected %d destructive; got %d", exp.destructive, l)
   297  	}
   298  	if l := len(r.inplaceUpdate); l != exp.inplace {
   299  		t.Fatalf("Expected %d inplaceUpdate; got %d", exp.inplace, l)
   300  	}
   301  	if l := len(r.stop); l != exp.stop {
   302  		t.Fatalf("Expected %d stops; got %d", exp.stop, l)
   303  	}
   304  	if l := len(r.desiredTGUpdates); l != len(exp.desiredTGUpdates) {
   305  		t.Fatalf("Expected %d task group desired tg updates annotations; got %d", len(exp.desiredTGUpdates), l)
   306  	}
   307  	if r.followupEvalWait != exp.followupEvalWait {
   308  		t.Fatalf("Unexpected followup eval wait time. Got %v; want %v", r.followupEvalWait, exp.followupEvalWait)
   309  	}
   310  
   311  	// Check the desired updates happened
   312  	for group, desired := range exp.desiredTGUpdates {
   313  		act, ok := r.desiredTGUpdates[group]
   314  		if !ok {
   315  			t.Fatalf("Expected desired updates for group %q", group)
   316  		}
   317  
   318  		if !reflect.DeepEqual(act, desired) {
   319  			t.Fatalf("Unexpected annotations for group %q: %v", group, pretty.Diff(act, desired))
   320  		}
   321  	}
   322  }
   323  
   324  // Tests the reconciler properly handles placements for a job that has no
   325  // existing allocations
   326  func TestReconciler_Place_NoExisting(t *testing.T) {
   327  	job := mock.Job()
   328  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, nil, nil)
   329  	r := reconciler.Compute()
   330  
   331  	// Assert the correct results
   332  	assertResults(t, r, &resultExpectation{
   333  		createDeployment:  nil,
   334  		deploymentUpdates: nil,
   335  		place:             10,
   336  		inplace:           0,
   337  		stop:              0,
   338  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   339  			job.TaskGroups[0].Name: {
   340  				Place: 10,
   341  			},
   342  		},
   343  	})
   344  
   345  	assertNamesHaveIndexes(t, intRange(0, 9), placeResultsToNames(r.place))
   346  }
   347  
   348  // Tests the reconciler properly handles placements for a job that has some
   349  // existing allocations
   350  func TestReconciler_Place_Existing(t *testing.T) {
   351  	job := mock.Job()
   352  
   353  	// Create 3 existing allocations
   354  	var allocs []*structs.Allocation
   355  	for i := 0; i < 5; i++ {
   356  		alloc := mock.Alloc()
   357  		alloc.Job = job
   358  		alloc.JobID = job.ID
   359  		alloc.NodeID = structs.GenerateUUID()
   360  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   361  		allocs = append(allocs, alloc)
   362  	}
   363  
   364  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil)
   365  	r := reconciler.Compute()
   366  
   367  	// Assert the correct results
   368  	assertResults(t, r, &resultExpectation{
   369  		createDeployment:  nil,
   370  		deploymentUpdates: nil,
   371  		place:             5,
   372  		inplace:           0,
   373  		stop:              0,
   374  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   375  			job.TaskGroups[0].Name: {
   376  				Place:  5,
   377  				Ignore: 5,
   378  			},
   379  		},
   380  	})
   381  
   382  	assertNamesHaveIndexes(t, intRange(5, 9), placeResultsToNames(r.place))
   383  }
   384  
   385  // Tests the reconciler properly handles stopping allocations for a job that has
   386  // scaled down
   387  func TestReconciler_ScaleDown_Partial(t *testing.T) {
   388  	// Has desired 10
   389  	job := mock.Job()
   390  
   391  	// Create 20 existing allocations
   392  	var allocs []*structs.Allocation
   393  	for i := 0; i < 20; i++ {
   394  		alloc := mock.Alloc()
   395  		alloc.Job = job
   396  		alloc.JobID = job.ID
   397  		alloc.NodeID = structs.GenerateUUID()
   398  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   399  		allocs = append(allocs, alloc)
   400  	}
   401  
   402  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil)
   403  	r := reconciler.Compute()
   404  
   405  	// Assert the correct results
   406  	assertResults(t, r, &resultExpectation{
   407  		createDeployment:  nil,
   408  		deploymentUpdates: nil,
   409  		place:             0,
   410  		inplace:           0,
   411  		stop:              10,
   412  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   413  			job.TaskGroups[0].Name: {
   414  				Ignore: 10,
   415  				Stop:   10,
   416  			},
   417  		},
   418  	})
   419  
   420  	assertNamesHaveIndexes(t, intRange(10, 19), stopResultsToNames(r.stop))
   421  }
   422  
   423  // Tests the reconciler properly handles stopping allocations for a job that has
   424  // scaled down to zero desired
   425  func TestReconciler_ScaleDown_Zero(t *testing.T) {
   426  	// Set desired 0
   427  	job := mock.Job()
   428  	job.TaskGroups[0].Count = 0
   429  
   430  	// Create 20 existing allocations
   431  	var allocs []*structs.Allocation
   432  	for i := 0; i < 20; i++ {
   433  		alloc := mock.Alloc()
   434  		alloc.Job = job
   435  		alloc.JobID = job.ID
   436  		alloc.NodeID = structs.GenerateUUID()
   437  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   438  		allocs = append(allocs, alloc)
   439  	}
   440  
   441  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil)
   442  	r := reconciler.Compute()
   443  
   444  	// Assert the correct results
   445  	assertResults(t, r, &resultExpectation{
   446  		createDeployment:  nil,
   447  		deploymentUpdates: nil,
   448  		place:             0,
   449  		inplace:           0,
   450  		stop:              20,
   451  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   452  			job.TaskGroups[0].Name: {
   453  				Stop: 20,
   454  			},
   455  		},
   456  	})
   457  
   458  	assertNamesHaveIndexes(t, intRange(0, 19), stopResultsToNames(r.stop))
   459  }
   460  
   461  // Tests the reconciler properly handles inplace upgrading allocations
   462  func TestReconciler_Inplace(t *testing.T) {
   463  	job := mock.Job()
   464  
   465  	// Create 10 existing allocations
   466  	var allocs []*structs.Allocation
   467  	for i := 0; i < 10; i++ {
   468  		alloc := mock.Alloc()
   469  		alloc.Job = job
   470  		alloc.JobID = job.ID
   471  		alloc.NodeID = structs.GenerateUUID()
   472  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   473  		allocs = append(allocs, alloc)
   474  	}
   475  
   476  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil)
   477  	r := reconciler.Compute()
   478  
   479  	// Assert the correct results
   480  	assertResults(t, r, &resultExpectation{
   481  		createDeployment:  nil,
   482  		deploymentUpdates: nil,
   483  		place:             0,
   484  		inplace:           10,
   485  		stop:              0,
   486  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   487  			job.TaskGroups[0].Name: {
   488  				InPlaceUpdate: 10,
   489  			},
   490  		},
   491  	})
   492  
   493  	assertNamesHaveIndexes(t, intRange(0, 9), allocsToNames(r.inplaceUpdate))
   494  }
   495  
   496  // Tests the reconciler properly handles inplace upgrading allocations while
   497  // scaling up
   498  func TestReconciler_Inplace_ScaleUp(t *testing.T) {
   499  	// Set desired 15
   500  	job := mock.Job()
   501  	job.TaskGroups[0].Count = 15
   502  
   503  	// Create 10 existing allocations
   504  	var allocs []*structs.Allocation
   505  	for i := 0; i < 10; i++ {
   506  		alloc := mock.Alloc()
   507  		alloc.Job = job
   508  		alloc.JobID = job.ID
   509  		alloc.NodeID = structs.GenerateUUID()
   510  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   511  		allocs = append(allocs, alloc)
   512  	}
   513  
   514  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil)
   515  	r := reconciler.Compute()
   516  
   517  	// Assert the correct results
   518  	assertResults(t, r, &resultExpectation{
   519  		createDeployment:  nil,
   520  		deploymentUpdates: nil,
   521  		place:             5,
   522  		inplace:           10,
   523  		stop:              0,
   524  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   525  			job.TaskGroups[0].Name: {
   526  				Place:         5,
   527  				InPlaceUpdate: 10,
   528  			},
   529  		},
   530  	})
   531  
   532  	assertNamesHaveIndexes(t, intRange(0, 9), allocsToNames(r.inplaceUpdate))
   533  	assertNamesHaveIndexes(t, intRange(10, 14), placeResultsToNames(r.place))
   534  }
   535  
   536  // Tests the reconciler properly handles inplace upgrading allocations while
   537  // scaling down
   538  func TestReconciler_Inplace_ScaleDown(t *testing.T) {
   539  	// Set desired 5
   540  	job := mock.Job()
   541  	job.TaskGroups[0].Count = 5
   542  
   543  	// Create 10 existing allocations
   544  	var allocs []*structs.Allocation
   545  	for i := 0; i < 10; i++ {
   546  		alloc := mock.Alloc()
   547  		alloc.Job = job
   548  		alloc.JobID = job.ID
   549  		alloc.NodeID = structs.GenerateUUID()
   550  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   551  		allocs = append(allocs, alloc)
   552  	}
   553  
   554  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil)
   555  	r := reconciler.Compute()
   556  
   557  	// Assert the correct results
   558  	assertResults(t, r, &resultExpectation{
   559  		createDeployment:  nil,
   560  		deploymentUpdates: nil,
   561  		place:             0,
   562  		inplace:           5,
   563  		stop:              5,
   564  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   565  			job.TaskGroups[0].Name: {
   566  				Stop:          5,
   567  				InPlaceUpdate: 5,
   568  			},
   569  		},
   570  	})
   571  
   572  	assertNamesHaveIndexes(t, intRange(0, 4), allocsToNames(r.inplaceUpdate))
   573  	assertNamesHaveIndexes(t, intRange(5, 9), stopResultsToNames(r.stop))
   574  }
   575  
   576  // Tests the reconciler properly handles destructive upgrading allocations
   577  func TestReconciler_Destructive(t *testing.T) {
   578  	job := mock.Job()
   579  
   580  	// Create 10 existing allocations
   581  	var allocs []*structs.Allocation
   582  	for i := 0; i < 10; i++ {
   583  		alloc := mock.Alloc()
   584  		alloc.Job = job
   585  		alloc.JobID = job.ID
   586  		alloc.NodeID = structs.GenerateUUID()
   587  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   588  		allocs = append(allocs, alloc)
   589  	}
   590  
   591  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
   592  	r := reconciler.Compute()
   593  
   594  	// Assert the correct results
   595  	assertResults(t, r, &resultExpectation{
   596  		createDeployment:  nil,
   597  		deploymentUpdates: nil,
   598  		destructive:       10,
   599  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   600  			job.TaskGroups[0].Name: {
   601  				DestructiveUpdate: 10,
   602  			},
   603  		},
   604  	})
   605  
   606  	assertNamesHaveIndexes(t, intRange(0, 9), destructiveResultsToNames(r.destructiveUpdate))
   607  }
   608  
   609  // Tests the reconciler properly handles destructive upgrading allocations while
   610  // scaling up
   611  func TestReconciler_Destructive_ScaleUp(t *testing.T) {
   612  	// Set desired 15
   613  	job := mock.Job()
   614  	job.TaskGroups[0].Count = 15
   615  
   616  	// Create 10 existing allocations
   617  	var allocs []*structs.Allocation
   618  	for i := 0; i < 10; i++ {
   619  		alloc := mock.Alloc()
   620  		alloc.Job = job
   621  		alloc.JobID = job.ID
   622  		alloc.NodeID = structs.GenerateUUID()
   623  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   624  		allocs = append(allocs, alloc)
   625  	}
   626  
   627  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
   628  	r := reconciler.Compute()
   629  
   630  	// Assert the correct results
   631  	assertResults(t, r, &resultExpectation{
   632  		createDeployment:  nil,
   633  		deploymentUpdates: nil,
   634  		place:             5,
   635  		destructive:       10,
   636  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   637  			job.TaskGroups[0].Name: {
   638  				Place:             5,
   639  				DestructiveUpdate: 10,
   640  			},
   641  		},
   642  	})
   643  
   644  	assertNamesHaveIndexes(t, intRange(0, 9), destructiveResultsToNames(r.destructiveUpdate))
   645  	assertNamesHaveIndexes(t, intRange(10, 14), placeResultsToNames(r.place))
   646  }
   647  
   648  // Tests the reconciler properly handles destructive upgrading allocations while
   649  // scaling down
   650  func TestReconciler_Destructive_ScaleDown(t *testing.T) {
   651  	// Set desired 5
   652  	job := mock.Job()
   653  	job.TaskGroups[0].Count = 5
   654  
   655  	// Create 10 existing allocations
   656  	var allocs []*structs.Allocation
   657  	for i := 0; i < 10; i++ {
   658  		alloc := mock.Alloc()
   659  		alloc.Job = job
   660  		alloc.JobID = job.ID
   661  		alloc.NodeID = structs.GenerateUUID()
   662  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   663  		allocs = append(allocs, alloc)
   664  	}
   665  
   666  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
   667  	r := reconciler.Compute()
   668  
   669  	// Assert the correct results
   670  	assertResults(t, r, &resultExpectation{
   671  		createDeployment:  nil,
   672  		deploymentUpdates: nil,
   673  		destructive:       5,
   674  		stop:              5,
   675  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   676  			job.TaskGroups[0].Name: {
   677  				Stop:              5,
   678  				DestructiveUpdate: 5,
   679  			},
   680  		},
   681  	})
   682  
   683  	assertNamesHaveIndexes(t, intRange(5, 9), stopResultsToNames(r.stop))
   684  	assertNamesHaveIndexes(t, intRange(0, 4), destructiveResultsToNames(r.destructiveUpdate))
   685  }
   686  
   687  // Tests the reconciler properly handles lost nodes with allocations
   688  func TestReconciler_LostNode(t *testing.T) {
   689  	job := mock.Job()
   690  
   691  	// Create 10 existing allocations
   692  	var allocs []*structs.Allocation
   693  	for i := 0; i < 10; i++ {
   694  		alloc := mock.Alloc()
   695  		alloc.Job = job
   696  		alloc.JobID = job.ID
   697  		alloc.NodeID = structs.GenerateUUID()
   698  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   699  		allocs = append(allocs, alloc)
   700  	}
   701  
   702  	// Build a map of tainted nodes
   703  	tainted := make(map[string]*structs.Node, 2)
   704  	for i := 0; i < 2; i++ {
   705  		n := mock.Node()
   706  		n.ID = allocs[i].NodeID
   707  		n.Status = structs.NodeStatusDown
   708  		tainted[n.ID] = n
   709  	}
   710  
   711  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
   712  	r := reconciler.Compute()
   713  
   714  	// Assert the correct results
   715  	assertResults(t, r, &resultExpectation{
   716  		createDeployment:  nil,
   717  		deploymentUpdates: nil,
   718  		place:             2,
   719  		inplace:           0,
   720  		stop:              2,
   721  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   722  			job.TaskGroups[0].Name: {
   723  				Place:  2,
   724  				Stop:   2,
   725  				Ignore: 8,
   726  			},
   727  		},
   728  	})
   729  
   730  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
   731  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
   732  }
   733  
   734  // Tests the reconciler properly handles lost nodes with allocations while
   735  // scaling up
   736  func TestReconciler_LostNode_ScaleUp(t *testing.T) {
   737  	// Set desired 15
   738  	job := mock.Job()
   739  	job.TaskGroups[0].Count = 15
   740  
   741  	// Create 10 existing allocations
   742  	var allocs []*structs.Allocation
   743  	for i := 0; i < 10; i++ {
   744  		alloc := mock.Alloc()
   745  		alloc.Job = job
   746  		alloc.JobID = job.ID
   747  		alloc.NodeID = structs.GenerateUUID()
   748  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   749  		allocs = append(allocs, alloc)
   750  	}
   751  
   752  	// Build a map of tainted nodes
   753  	tainted := make(map[string]*structs.Node, 2)
   754  	for i := 0; i < 2; i++ {
   755  		n := mock.Node()
   756  		n.ID = allocs[i].NodeID
   757  		n.Status = structs.NodeStatusDown
   758  		tainted[n.ID] = n
   759  	}
   760  
   761  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
   762  	r := reconciler.Compute()
   763  
   764  	// Assert the correct results
   765  	assertResults(t, r, &resultExpectation{
   766  		createDeployment:  nil,
   767  		deploymentUpdates: nil,
   768  		place:             7,
   769  		inplace:           0,
   770  		stop:              2,
   771  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   772  			job.TaskGroups[0].Name: {
   773  				Place:  7,
   774  				Stop:   2,
   775  				Ignore: 8,
   776  			},
   777  		},
   778  	})
   779  
   780  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
   781  	assertNamesHaveIndexes(t, intRange(0, 1, 10, 14), placeResultsToNames(r.place))
   782  }
   783  
   784  // Tests the reconciler properly handles lost nodes with allocations while
   785  // scaling down
   786  func TestReconciler_LostNode_ScaleDown(t *testing.T) {
   787  	// Set desired 5
   788  	job := mock.Job()
   789  	job.TaskGroups[0].Count = 5
   790  
   791  	// Create 10 existing allocations
   792  	var allocs []*structs.Allocation
   793  	for i := 0; i < 10; i++ {
   794  		alloc := mock.Alloc()
   795  		alloc.Job = job
   796  		alloc.JobID = job.ID
   797  		alloc.NodeID = structs.GenerateUUID()
   798  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   799  		allocs = append(allocs, alloc)
   800  	}
   801  
   802  	// Build a map of tainted nodes
   803  	tainted := make(map[string]*structs.Node, 2)
   804  	for i := 0; i < 2; i++ {
   805  		n := mock.Node()
   806  		n.ID = allocs[i].NodeID
   807  		n.Status = structs.NodeStatusDown
   808  		tainted[n.ID] = n
   809  	}
   810  
   811  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
   812  	r := reconciler.Compute()
   813  
   814  	// Assert the correct results
   815  	assertResults(t, r, &resultExpectation{
   816  		createDeployment:  nil,
   817  		deploymentUpdates: nil,
   818  		place:             0,
   819  		inplace:           0,
   820  		stop:              5,
   821  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   822  			job.TaskGroups[0].Name: {
   823  				Stop:   5,
   824  				Ignore: 5,
   825  			},
   826  		},
   827  	})
   828  
   829  	assertNamesHaveIndexes(t, intRange(0, 1, 7, 9), stopResultsToNames(r.stop))
   830  }
   831  
   832  // Tests the reconciler properly handles draining nodes with allocations
   833  func TestReconciler_DrainNode(t *testing.T) {
   834  	job := mock.Job()
   835  
   836  	// Create 10 existing allocations
   837  	var allocs []*structs.Allocation
   838  	for i := 0; i < 10; i++ {
   839  		alloc := mock.Alloc()
   840  		alloc.Job = job
   841  		alloc.JobID = job.ID
   842  		alloc.NodeID = structs.GenerateUUID()
   843  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   844  		allocs = append(allocs, alloc)
   845  	}
   846  
   847  	// Build a map of tainted nodes
   848  	tainted := make(map[string]*structs.Node, 2)
   849  	for i := 0; i < 2; i++ {
   850  		n := mock.Node()
   851  		n.ID = allocs[i].NodeID
   852  		n.Drain = true
   853  		tainted[n.ID] = n
   854  	}
   855  
   856  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
   857  	r := reconciler.Compute()
   858  
   859  	// Assert the correct results
   860  	assertResults(t, r, &resultExpectation{
   861  		createDeployment:  nil,
   862  		deploymentUpdates: nil,
   863  		place:             2,
   864  		inplace:           0,
   865  		stop:              2,
   866  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   867  			job.TaskGroups[0].Name: {
   868  				Migrate: 2,
   869  				Ignore:  8,
   870  			},
   871  		},
   872  	})
   873  
   874  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
   875  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
   876  	assertPlaceResultsHavePreviousAllocs(t, 2, r.place)
   877  }
   878  
   879  // Tests the reconciler properly handles draining nodes with allocations while
   880  // scaling up
   881  func TestReconciler_DrainNode_ScaleUp(t *testing.T) {
   882  	// Set desired 15
   883  	job := mock.Job()
   884  	job.TaskGroups[0].Count = 15
   885  
   886  	// Create 10 existing allocations
   887  	var allocs []*structs.Allocation
   888  	for i := 0; i < 10; i++ {
   889  		alloc := mock.Alloc()
   890  		alloc.Job = job
   891  		alloc.JobID = job.ID
   892  		alloc.NodeID = structs.GenerateUUID()
   893  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   894  		allocs = append(allocs, alloc)
   895  	}
   896  
   897  	// Build a map of tainted nodes
   898  	tainted := make(map[string]*structs.Node, 2)
   899  	for i := 0; i < 2; i++ {
   900  		n := mock.Node()
   901  		n.ID = allocs[i].NodeID
   902  		n.Drain = true
   903  		tainted[n.ID] = n
   904  	}
   905  
   906  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
   907  	r := reconciler.Compute()
   908  
   909  	// Assert the correct results
   910  	assertResults(t, r, &resultExpectation{
   911  		createDeployment:  nil,
   912  		deploymentUpdates: nil,
   913  		place:             7,
   914  		inplace:           0,
   915  		stop:              2,
   916  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   917  			job.TaskGroups[0].Name: {
   918  				Place:   5,
   919  				Migrate: 2,
   920  				Ignore:  8,
   921  			},
   922  		},
   923  	})
   924  
   925  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
   926  	assertNamesHaveIndexes(t, intRange(0, 1, 10, 14), placeResultsToNames(r.place))
   927  	assertPlaceResultsHavePreviousAllocs(t, 2, r.place)
   928  }
   929  
   930  // Tests the reconciler properly handles draining nodes with allocations while
   931  // scaling down
   932  func TestReconciler_DrainNode_ScaleDown(t *testing.T) {
   933  	// Set desired 8
   934  	job := mock.Job()
   935  	job.TaskGroups[0].Count = 8
   936  
   937  	// Create 10 existing allocations
   938  	var allocs []*structs.Allocation
   939  	for i := 0; i < 10; i++ {
   940  		alloc := mock.Alloc()
   941  		alloc.Job = job
   942  		alloc.JobID = job.ID
   943  		alloc.NodeID = structs.GenerateUUID()
   944  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   945  		allocs = append(allocs, alloc)
   946  	}
   947  
   948  	// Build a map of tainted nodes
   949  	tainted := make(map[string]*structs.Node, 3)
   950  	for i := 0; i < 3; i++ {
   951  		n := mock.Node()
   952  		n.ID = allocs[i].NodeID
   953  		n.Drain = true
   954  		tainted[n.ID] = n
   955  	}
   956  
   957  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
   958  	r := reconciler.Compute()
   959  
   960  	// Assert the correct results
   961  	assertResults(t, r, &resultExpectation{
   962  		createDeployment:  nil,
   963  		deploymentUpdates: nil,
   964  		place:             1,
   965  		inplace:           0,
   966  		stop:              3,
   967  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
   968  			job.TaskGroups[0].Name: {
   969  				Migrate: 1,
   970  				Stop:    2,
   971  				Ignore:  7,
   972  			},
   973  		},
   974  	})
   975  
   976  	assertNamesHaveIndexes(t, intRange(0, 2), stopResultsToNames(r.stop))
   977  	assertNamesHaveIndexes(t, intRange(0, 0), placeResultsToNames(r.place))
   978  	assertPlaceResultsHavePreviousAllocs(t, 1, r.place)
   979  }
   980  
   981  // Tests the reconciler properly handles a task group being removed
   982  func TestReconciler_RemovedTG(t *testing.T) {
   983  	job := mock.Job()
   984  
   985  	// Create 10 allocations for a tg that no longer exists
   986  	var allocs []*structs.Allocation
   987  	for i := 0; i < 10; i++ {
   988  		alloc := mock.Alloc()
   989  		alloc.Job = job
   990  		alloc.JobID = job.ID
   991  		alloc.NodeID = structs.GenerateUUID()
   992  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
   993  		allocs = append(allocs, alloc)
   994  	}
   995  
   996  	oldName := job.TaskGroups[0].Name
   997  	newName := "different"
   998  	job.TaskGroups[0].Name = newName
   999  
  1000  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil)
  1001  	r := reconciler.Compute()
  1002  
  1003  	// Assert the correct results
  1004  	assertResults(t, r, &resultExpectation{
  1005  		createDeployment:  nil,
  1006  		deploymentUpdates: nil,
  1007  		place:             10,
  1008  		inplace:           0,
  1009  		stop:              10,
  1010  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1011  			oldName: {
  1012  				Stop: 10,
  1013  			},
  1014  			newName: {
  1015  				Place: 10,
  1016  			},
  1017  		},
  1018  	})
  1019  
  1020  	assertNamesHaveIndexes(t, intRange(0, 9), stopResultsToNames(r.stop))
  1021  	assertNamesHaveIndexes(t, intRange(0, 9), placeResultsToNames(r.place))
  1022  }
  1023  
  1024  // Tests the reconciler properly handles a job in stopped states
  1025  func TestReconciler_JobStopped(t *testing.T) {
  1026  	job := mock.Job()
  1027  	job.Stop = true
  1028  
  1029  	cases := []struct {
  1030  		name             string
  1031  		job              *structs.Job
  1032  		jobID, taskGroup string
  1033  	}{
  1034  		{
  1035  			name:      "stopped job",
  1036  			job:       job,
  1037  			jobID:     job.ID,
  1038  			taskGroup: job.TaskGroups[0].Name,
  1039  		},
  1040  		{
  1041  			name:      "nil job",
  1042  			job:       nil,
  1043  			jobID:     "foo",
  1044  			taskGroup: "bar",
  1045  		},
  1046  	}
  1047  
  1048  	for _, c := range cases {
  1049  		t.Run(c.name, func(t *testing.T) {
  1050  			// Create 10 allocations
  1051  			var allocs []*structs.Allocation
  1052  			for i := 0; i < 10; i++ {
  1053  				alloc := mock.Alloc()
  1054  				alloc.Job = c.job
  1055  				alloc.JobID = c.jobID
  1056  				alloc.NodeID = structs.GenerateUUID()
  1057  				alloc.Name = structs.AllocName(c.jobID, c.taskGroup, uint(i))
  1058  				alloc.TaskGroup = c.taskGroup
  1059  				allocs = append(allocs, alloc)
  1060  			}
  1061  
  1062  			reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, c.jobID, c.job, nil, allocs, nil)
  1063  			r := reconciler.Compute()
  1064  
  1065  			// Assert the correct results
  1066  			assertResults(t, r, &resultExpectation{
  1067  				createDeployment:  nil,
  1068  				deploymentUpdates: nil,
  1069  				place:             0,
  1070  				inplace:           0,
  1071  				stop:              10,
  1072  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1073  					c.taskGroup: {
  1074  						Stop: 10,
  1075  					},
  1076  				},
  1077  			})
  1078  
  1079  			assertNamesHaveIndexes(t, intRange(0, 9), stopResultsToNames(r.stop))
  1080  		})
  1081  	}
  1082  }
  1083  
  1084  // Tests the reconciler properly handles jobs with multiple task groups
  1085  func TestReconciler_MultiTG(t *testing.T) {
  1086  	job := mock.Job()
  1087  	tg2 := job.TaskGroups[0].Copy()
  1088  	tg2.Name = "foo"
  1089  	job.TaskGroups = append(job.TaskGroups, tg2)
  1090  
  1091  	// Create 2 existing allocations for the first tg
  1092  	var allocs []*structs.Allocation
  1093  	for i := 0; i < 2; i++ {
  1094  		alloc := mock.Alloc()
  1095  		alloc.Job = job
  1096  		alloc.JobID = job.ID
  1097  		alloc.NodeID = structs.GenerateUUID()
  1098  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1099  		allocs = append(allocs, alloc)
  1100  	}
  1101  
  1102  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil)
  1103  	r := reconciler.Compute()
  1104  
  1105  	// Assert the correct results
  1106  	assertResults(t, r, &resultExpectation{
  1107  		createDeployment:  nil,
  1108  		deploymentUpdates: nil,
  1109  		place:             18,
  1110  		inplace:           0,
  1111  		stop:              0,
  1112  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1113  			job.TaskGroups[0].Name: {
  1114  				Place:  8,
  1115  				Ignore: 2,
  1116  			},
  1117  			tg2.Name: {
  1118  				Place: 10,
  1119  			},
  1120  		},
  1121  	})
  1122  
  1123  	assertNamesHaveIndexes(t, intRange(2, 9, 0, 9), placeResultsToNames(r.place))
  1124  }
  1125  
  1126  // Tests the reconciler cancels an old deployment when the job is being stopped
  1127  func TestReconciler_CancelDeployment_JobStop(t *testing.T) {
  1128  	job := mock.Job()
  1129  	job.Stop = true
  1130  
  1131  	running := structs.NewDeployment(job)
  1132  	failed := structs.NewDeployment(job)
  1133  	failed.Status = structs.DeploymentStatusFailed
  1134  
  1135  	cases := []struct {
  1136  		name             string
  1137  		job              *structs.Job
  1138  		jobID, taskGroup string
  1139  		deployment       *structs.Deployment
  1140  		cancel           bool
  1141  	}{
  1142  		{
  1143  			name:       "stopped job, running deployment",
  1144  			job:        job,
  1145  			jobID:      job.ID,
  1146  			taskGroup:  job.TaskGroups[0].Name,
  1147  			deployment: running,
  1148  			cancel:     true,
  1149  		},
  1150  		{
  1151  			name:       "nil job, running deployment",
  1152  			job:        nil,
  1153  			jobID:      "foo",
  1154  			taskGroup:  "bar",
  1155  			deployment: running,
  1156  			cancel:     true,
  1157  		},
  1158  		{
  1159  			name:       "stopped job, failed deployment",
  1160  			job:        job,
  1161  			jobID:      job.ID,
  1162  			taskGroup:  job.TaskGroups[0].Name,
  1163  			deployment: failed,
  1164  			cancel:     false,
  1165  		},
  1166  		{
  1167  			name:       "nil job, failed deployment",
  1168  			job:        nil,
  1169  			jobID:      "foo",
  1170  			taskGroup:  "bar",
  1171  			deployment: failed,
  1172  			cancel:     false,
  1173  		},
  1174  	}
  1175  
  1176  	for _, c := range cases {
  1177  		t.Run(c.name, func(t *testing.T) {
  1178  			// Create 10 allocations
  1179  			var allocs []*structs.Allocation
  1180  			for i := 0; i < 10; i++ {
  1181  				alloc := mock.Alloc()
  1182  				alloc.Job = c.job
  1183  				alloc.JobID = c.jobID
  1184  				alloc.NodeID = structs.GenerateUUID()
  1185  				alloc.Name = structs.AllocName(c.jobID, c.taskGroup, uint(i))
  1186  				alloc.TaskGroup = c.taskGroup
  1187  				allocs = append(allocs, alloc)
  1188  			}
  1189  
  1190  			reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, c.jobID, c.job, c.deployment, allocs, nil)
  1191  			r := reconciler.Compute()
  1192  
  1193  			var updates []*structs.DeploymentStatusUpdate
  1194  			if c.cancel {
  1195  				updates = []*structs.DeploymentStatusUpdate{
  1196  					{
  1197  						DeploymentID:      c.deployment.ID,
  1198  						Status:            structs.DeploymentStatusCancelled,
  1199  						StatusDescription: structs.DeploymentStatusDescriptionStoppedJob,
  1200  					},
  1201  				}
  1202  			}
  1203  
  1204  			// Assert the correct results
  1205  			assertResults(t, r, &resultExpectation{
  1206  				createDeployment:  nil,
  1207  				deploymentUpdates: updates,
  1208  				place:             0,
  1209  				inplace:           0,
  1210  				stop:              10,
  1211  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1212  					c.taskGroup: {
  1213  						Stop: 10,
  1214  					},
  1215  				},
  1216  			})
  1217  
  1218  			assertNamesHaveIndexes(t, intRange(0, 9), stopResultsToNames(r.stop))
  1219  		})
  1220  	}
  1221  }
  1222  
  1223  // Tests the reconciler cancels an old deployment when the job is updated
  1224  func TestReconciler_CancelDeployment_JobUpdate(t *testing.T) {
  1225  	// Create a base job
  1226  	job := mock.Job()
  1227  
  1228  	// Create two deployments
  1229  	running := structs.NewDeployment(job)
  1230  	failed := structs.NewDeployment(job)
  1231  	failed.Status = structs.DeploymentStatusFailed
  1232  
  1233  	// Make the job newer than the deployment
  1234  	job.Version += 10
  1235  
  1236  	cases := []struct {
  1237  		name       string
  1238  		deployment *structs.Deployment
  1239  		cancel     bool
  1240  	}{
  1241  		{
  1242  			name:       "running deployment",
  1243  			deployment: running,
  1244  			cancel:     true,
  1245  		},
  1246  		{
  1247  			name:       "failed deployment",
  1248  			deployment: failed,
  1249  			cancel:     false,
  1250  		},
  1251  	}
  1252  
  1253  	for _, c := range cases {
  1254  		t.Run(c.name, func(t *testing.T) {
  1255  			// Create 10 allocations
  1256  			var allocs []*structs.Allocation
  1257  			for i := 0; i < 10; i++ {
  1258  				alloc := mock.Alloc()
  1259  				alloc.Job = job
  1260  				alloc.JobID = job.ID
  1261  				alloc.NodeID = structs.GenerateUUID()
  1262  				alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1263  				alloc.TaskGroup = job.TaskGroups[0].Name
  1264  				allocs = append(allocs, alloc)
  1265  			}
  1266  
  1267  			reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, c.deployment, allocs, nil)
  1268  			r := reconciler.Compute()
  1269  
  1270  			var updates []*structs.DeploymentStatusUpdate
  1271  			if c.cancel {
  1272  				updates = []*structs.DeploymentStatusUpdate{
  1273  					{
  1274  						DeploymentID:      c.deployment.ID,
  1275  						Status:            structs.DeploymentStatusCancelled,
  1276  						StatusDescription: structs.DeploymentStatusDescriptionNewerJob,
  1277  					},
  1278  				}
  1279  			}
  1280  
  1281  			// Assert the correct results
  1282  			assertResults(t, r, &resultExpectation{
  1283  				createDeployment:  nil,
  1284  				deploymentUpdates: updates,
  1285  				place:             0,
  1286  				inplace:           0,
  1287  				stop:              0,
  1288  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1289  					job.TaskGroups[0].Name: {
  1290  						Ignore: 10,
  1291  					},
  1292  				},
  1293  			})
  1294  		})
  1295  	}
  1296  }
  1297  
  1298  // Tests the reconciler creates a deployment and does a rolling upgrade with
  1299  // destructive changes
  1300  func TestReconciler_CreateDeployment_RollingUpgrade_Destructive(t *testing.T) {
  1301  	job := mock.Job()
  1302  	job.TaskGroups[0].Update = noCanaryUpdate
  1303  
  1304  	// Create 10 allocations from the old job
  1305  	var allocs []*structs.Allocation
  1306  	for i := 0; i < 10; i++ {
  1307  		alloc := mock.Alloc()
  1308  		alloc.Job = job
  1309  		alloc.JobID = job.ID
  1310  		alloc.NodeID = structs.GenerateUUID()
  1311  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1312  		alloc.TaskGroup = job.TaskGroups[0].Name
  1313  		allocs = append(allocs, alloc)
  1314  	}
  1315  
  1316  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
  1317  	r := reconciler.Compute()
  1318  
  1319  	d := structs.NewDeployment(job)
  1320  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1321  		DesiredTotal: 10,
  1322  	}
  1323  
  1324  	// Assert the correct results
  1325  	assertResults(t, r, &resultExpectation{
  1326  		createDeployment:  d,
  1327  		deploymentUpdates: nil,
  1328  		destructive:       4,
  1329  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1330  			job.TaskGroups[0].Name: {
  1331  				DestructiveUpdate: 4,
  1332  				Ignore:            6,
  1333  			},
  1334  		},
  1335  	})
  1336  
  1337  	assertNamesHaveIndexes(t, intRange(0, 3), destructiveResultsToNames(r.destructiveUpdate))
  1338  }
  1339  
  1340  // Tests the reconciler creates a deployment for inplace updates
  1341  func TestReconciler_CreateDeployment_RollingUpgrade_Inplace(t *testing.T) {
  1342  	job := mock.Job()
  1343  	job.TaskGroups[0].Update = noCanaryUpdate
  1344  
  1345  	// Create 10 allocations from the old job
  1346  	var allocs []*structs.Allocation
  1347  	for i := 0; i < 10; i++ {
  1348  		alloc := mock.Alloc()
  1349  		alloc.Job = job
  1350  		alloc.JobID = job.ID
  1351  		alloc.NodeID = structs.GenerateUUID()
  1352  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1353  		alloc.TaskGroup = job.TaskGroups[0].Name
  1354  		allocs = append(allocs, alloc)
  1355  	}
  1356  
  1357  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnInplace, false, job.ID, job, nil, allocs, nil)
  1358  	r := reconciler.Compute()
  1359  
  1360  	d := structs.NewDeployment(job)
  1361  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1362  		DesiredTotal: 10,
  1363  	}
  1364  
  1365  	// Assert the correct results
  1366  	assertResults(t, r, &resultExpectation{
  1367  		createDeployment:  d,
  1368  		deploymentUpdates: nil,
  1369  		place:             0,
  1370  		inplace:           10,
  1371  		stop:              0,
  1372  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1373  			job.TaskGroups[0].Name: {
  1374  				InPlaceUpdate: 10,
  1375  			},
  1376  		},
  1377  	})
  1378  }
  1379  
  1380  // Tests the reconciler doesn't creates a deployment if there are no changes
  1381  func TestReconciler_DontCreateDeployment_NoChanges(t *testing.T) {
  1382  	job := mock.Job()
  1383  	job.TaskGroups[0].Update = noCanaryUpdate
  1384  
  1385  	// Create 10 allocations from the job
  1386  	var allocs []*structs.Allocation
  1387  	for i := 0; i < 10; i++ {
  1388  		alloc := mock.Alloc()
  1389  		alloc.Job = job
  1390  		alloc.JobID = job.ID
  1391  		alloc.NodeID = structs.GenerateUUID()
  1392  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1393  		alloc.TaskGroup = job.TaskGroups[0].Name
  1394  		allocs = append(allocs, alloc)
  1395  	}
  1396  
  1397  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, nil)
  1398  	r := reconciler.Compute()
  1399  
  1400  	// Assert the correct results
  1401  	assertResults(t, r, &resultExpectation{
  1402  		createDeployment:  nil,
  1403  		deploymentUpdates: nil,
  1404  		place:             0,
  1405  		inplace:           0,
  1406  		stop:              0,
  1407  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1408  			job.TaskGroups[0].Name: {
  1409  				DestructiveUpdate: 0,
  1410  				Ignore:            10,
  1411  			},
  1412  		},
  1413  	})
  1414  }
  1415  
  1416  // Tests the reconciler doesn't place any more canaries when the deployment is
  1417  // paused or failed
  1418  func TestReconciler_PausedOrFailedDeployment_NoMoreCanaries(t *testing.T) {
  1419  	job := mock.Job()
  1420  	job.TaskGroups[0].Update = canaryUpdate
  1421  
  1422  	cases := []struct {
  1423  		name             string
  1424  		deploymentStatus string
  1425  		stop             uint64
  1426  	}{
  1427  		{
  1428  			name:             "paused deployment",
  1429  			deploymentStatus: structs.DeploymentStatusPaused,
  1430  			stop:             0,
  1431  		},
  1432  		{
  1433  			name:             "failed deployment",
  1434  			deploymentStatus: structs.DeploymentStatusFailed,
  1435  			stop:             1,
  1436  		},
  1437  	}
  1438  
  1439  	for _, c := range cases {
  1440  		t.Run(c.name, func(t *testing.T) {
  1441  			// Create a deployment that is paused/failed and has placed some canaries
  1442  			d := structs.NewDeployment(job)
  1443  			d.Status = c.deploymentStatus
  1444  			d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1445  				Promoted:        false,
  1446  				DesiredCanaries: 2,
  1447  				DesiredTotal:    10,
  1448  				PlacedAllocs:    1,
  1449  			}
  1450  
  1451  			// Create 10 allocations for the original job
  1452  			var allocs []*structs.Allocation
  1453  			for i := 0; i < 10; i++ {
  1454  				alloc := mock.Alloc()
  1455  				alloc.Job = job
  1456  				alloc.JobID = job.ID
  1457  				alloc.NodeID = structs.GenerateUUID()
  1458  				alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1459  				alloc.TaskGroup = job.TaskGroups[0].Name
  1460  				allocs = append(allocs, alloc)
  1461  			}
  1462  
  1463  			// Create one canary
  1464  			canary := mock.Alloc()
  1465  			canary.Job = job
  1466  			canary.JobID = job.ID
  1467  			canary.NodeID = structs.GenerateUUID()
  1468  			canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, 0)
  1469  			canary.TaskGroup = job.TaskGroups[0].Name
  1470  			canary.DeploymentID = d.ID
  1471  			allocs = append(allocs, canary)
  1472  			d.TaskGroups[canary.TaskGroup].PlacedCanaries = []string{canary.ID}
  1473  
  1474  			mockUpdateFn := allocUpdateFnMock(map[string]allocUpdateType{canary.ID: allocUpdateFnIgnore}, allocUpdateFnDestructive)
  1475  			reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  1476  			r := reconciler.Compute()
  1477  
  1478  			// Assert the correct results
  1479  			assertResults(t, r, &resultExpectation{
  1480  				createDeployment:  nil,
  1481  				deploymentUpdates: nil,
  1482  				place:             0,
  1483  				inplace:           0,
  1484  				stop:              int(c.stop),
  1485  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1486  					job.TaskGroups[0].Name: {
  1487  						Ignore: 11 - c.stop,
  1488  						Stop:   c.stop,
  1489  					},
  1490  				},
  1491  			})
  1492  		})
  1493  	}
  1494  }
  1495  
  1496  // Tests the reconciler doesn't place any more allocs when the deployment is
  1497  // paused or failed
  1498  func TestReconciler_PausedOrFailedDeployment_NoMorePlacements(t *testing.T) {
  1499  	job := mock.Job()
  1500  	job.TaskGroups[0].Update = noCanaryUpdate
  1501  	job.TaskGroups[0].Count = 15
  1502  
  1503  	cases := []struct {
  1504  		name             string
  1505  		deploymentStatus string
  1506  	}{
  1507  		{
  1508  			name:             "paused deployment",
  1509  			deploymentStatus: structs.DeploymentStatusPaused,
  1510  		},
  1511  		{
  1512  			name:             "failed deployment",
  1513  			deploymentStatus: structs.DeploymentStatusFailed,
  1514  		},
  1515  	}
  1516  
  1517  	for _, c := range cases {
  1518  		t.Run(c.name, func(t *testing.T) {
  1519  			// Create a deployment that is paused and has placed some canaries
  1520  			d := structs.NewDeployment(job)
  1521  			d.Status = c.deploymentStatus
  1522  			d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1523  				Promoted:     false,
  1524  				DesiredTotal: 15,
  1525  				PlacedAllocs: 10,
  1526  			}
  1527  
  1528  			// Create 10 allocations for the new job
  1529  			var allocs []*structs.Allocation
  1530  			for i := 0; i < 10; i++ {
  1531  				alloc := mock.Alloc()
  1532  				alloc.Job = job
  1533  				alloc.JobID = job.ID
  1534  				alloc.NodeID = structs.GenerateUUID()
  1535  				alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1536  				alloc.TaskGroup = job.TaskGroups[0].Name
  1537  				allocs = append(allocs, alloc)
  1538  			}
  1539  
  1540  			reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil)
  1541  			r := reconciler.Compute()
  1542  
  1543  			// Assert the correct results
  1544  			assertResults(t, r, &resultExpectation{
  1545  				createDeployment:  nil,
  1546  				deploymentUpdates: nil,
  1547  				place:             0,
  1548  				inplace:           0,
  1549  				stop:              0,
  1550  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1551  					job.TaskGroups[0].Name: {
  1552  						Ignore: 10,
  1553  					},
  1554  				},
  1555  			})
  1556  		})
  1557  	}
  1558  }
  1559  
  1560  // Tests the reconciler doesn't do any more destructive updates when the
  1561  // deployment is paused or failed
  1562  func TestReconciler_PausedOrFailedDeployment_NoMoreDestructiveUpdates(t *testing.T) {
  1563  	job := mock.Job()
  1564  	job.TaskGroups[0].Update = noCanaryUpdate
  1565  
  1566  	cases := []struct {
  1567  		name             string
  1568  		deploymentStatus string
  1569  	}{
  1570  		{
  1571  			name:             "paused deployment",
  1572  			deploymentStatus: structs.DeploymentStatusPaused,
  1573  		},
  1574  		{
  1575  			name:             "failed deployment",
  1576  			deploymentStatus: structs.DeploymentStatusFailed,
  1577  		},
  1578  	}
  1579  
  1580  	for _, c := range cases {
  1581  		t.Run(c.name, func(t *testing.T) {
  1582  			// Create a deployment that is paused and has placed some canaries
  1583  			d := structs.NewDeployment(job)
  1584  			d.Status = c.deploymentStatus
  1585  			d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1586  				Promoted:     false,
  1587  				DesiredTotal: 10,
  1588  				PlacedAllocs: 1,
  1589  			}
  1590  
  1591  			// Create 9 allocations for the original job
  1592  			var allocs []*structs.Allocation
  1593  			for i := 1; i < 10; i++ {
  1594  				alloc := mock.Alloc()
  1595  				alloc.Job = job
  1596  				alloc.JobID = job.ID
  1597  				alloc.NodeID = structs.GenerateUUID()
  1598  				alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1599  				alloc.TaskGroup = job.TaskGroups[0].Name
  1600  				allocs = append(allocs, alloc)
  1601  			}
  1602  
  1603  			// Create one for the new job
  1604  			newAlloc := mock.Alloc()
  1605  			newAlloc.Job = job
  1606  			newAlloc.JobID = job.ID
  1607  			newAlloc.NodeID = structs.GenerateUUID()
  1608  			newAlloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, 0)
  1609  			newAlloc.TaskGroup = job.TaskGroups[0].Name
  1610  			newAlloc.DeploymentID = d.ID
  1611  			allocs = append(allocs, newAlloc)
  1612  
  1613  			mockUpdateFn := allocUpdateFnMock(map[string]allocUpdateType{newAlloc.ID: allocUpdateFnIgnore}, allocUpdateFnDestructive)
  1614  			reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  1615  			r := reconciler.Compute()
  1616  
  1617  			// Assert the correct results
  1618  			assertResults(t, r, &resultExpectation{
  1619  				createDeployment:  nil,
  1620  				deploymentUpdates: nil,
  1621  				place:             0,
  1622  				inplace:           0,
  1623  				stop:              0,
  1624  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1625  					job.TaskGroups[0].Name: {
  1626  						Ignore: 10,
  1627  					},
  1628  				},
  1629  			})
  1630  		})
  1631  	}
  1632  }
  1633  
  1634  // Tests the reconciler handles migrations correctly when a deployment is paused
  1635  // or failed
  1636  func TestReconciler_PausedOrFailedDeployment_Migrations(t *testing.T) {
  1637  	job := mock.Job()
  1638  	job.TaskGroups[0].Update = noCanaryUpdate
  1639  
  1640  	cases := []struct {
  1641  		name              string
  1642  		deploymentStatus  string
  1643  		place             int
  1644  		stop              int
  1645  		ignoreAnnotation  uint64
  1646  		migrateAnnotation uint64
  1647  		stopAnnotation    uint64
  1648  	}{
  1649  		{
  1650  			name:             "paused deployment",
  1651  			deploymentStatus: structs.DeploymentStatusPaused,
  1652  			place:            0,
  1653  			stop:             3,
  1654  			ignoreAnnotation: 5,
  1655  			stopAnnotation:   3,
  1656  		},
  1657  		{
  1658  			name:              "failed deployment",
  1659  			deploymentStatus:  structs.DeploymentStatusFailed,
  1660  			place:             0,
  1661  			stop:              3,
  1662  			ignoreAnnotation:  5,
  1663  			migrateAnnotation: 0,
  1664  			stopAnnotation:    3,
  1665  		},
  1666  	}
  1667  
  1668  	for _, c := range cases {
  1669  		t.Run(c.name, func(t *testing.T) {
  1670  			// Create a deployment that is paused and has placed some canaries
  1671  			d := structs.NewDeployment(job)
  1672  			d.Status = c.deploymentStatus
  1673  			d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1674  				Promoted:     false,
  1675  				DesiredTotal: 10,
  1676  				PlacedAllocs: 8,
  1677  			}
  1678  
  1679  			// Create 8 allocations in the deployment
  1680  			var allocs []*structs.Allocation
  1681  			for i := 0; i < 8; i++ {
  1682  				alloc := mock.Alloc()
  1683  				alloc.Job = job
  1684  				alloc.JobID = job.ID
  1685  				alloc.NodeID = structs.GenerateUUID()
  1686  				alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1687  				alloc.TaskGroup = job.TaskGroups[0].Name
  1688  				alloc.DeploymentID = d.ID
  1689  				allocs = append(allocs, alloc)
  1690  			}
  1691  
  1692  			// Build a map of tainted nodes
  1693  			tainted := make(map[string]*structs.Node, 3)
  1694  			for i := 0; i < 3; i++ {
  1695  				n := mock.Node()
  1696  				n.ID = allocs[i].NodeID
  1697  				n.Drain = true
  1698  				tainted[n.ID] = n
  1699  			}
  1700  
  1701  			reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, d, allocs, tainted)
  1702  			r := reconciler.Compute()
  1703  
  1704  			// Assert the correct results
  1705  			assertResults(t, r, &resultExpectation{
  1706  				createDeployment:  nil,
  1707  				deploymentUpdates: nil,
  1708  				place:             c.place,
  1709  				inplace:           0,
  1710  				stop:              c.stop,
  1711  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1712  					job.TaskGroups[0].Name: {
  1713  						Migrate: c.migrateAnnotation,
  1714  						Ignore:  c.ignoreAnnotation,
  1715  						Stop:    c.stopAnnotation,
  1716  					},
  1717  				},
  1718  			})
  1719  		})
  1720  	}
  1721  }
  1722  
  1723  // Tests the reconciler handles migrating a canary correctly on a draining node
  1724  func TestReconciler_DrainNode_Canary(t *testing.T) {
  1725  	job := mock.Job()
  1726  	job.TaskGroups[0].Update = canaryUpdate
  1727  
  1728  	// Create a deployment that is paused and has placed some canaries
  1729  	d := structs.NewDeployment(job)
  1730  	s := &structs.DeploymentState{
  1731  		Promoted:        false,
  1732  		DesiredTotal:    10,
  1733  		DesiredCanaries: 2,
  1734  		PlacedAllocs:    2,
  1735  	}
  1736  	d.TaskGroups[job.TaskGroups[0].Name] = s
  1737  
  1738  	// Create 10 allocations from the old job
  1739  	var allocs []*structs.Allocation
  1740  	for i := 0; i < 10; i++ {
  1741  		alloc := mock.Alloc()
  1742  		alloc.Job = job
  1743  		alloc.JobID = job.ID
  1744  		alloc.NodeID = structs.GenerateUUID()
  1745  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1746  		alloc.TaskGroup = job.TaskGroups[0].Name
  1747  		allocs = append(allocs, alloc)
  1748  	}
  1749  
  1750  	// Create two canaries for the new job
  1751  	handled := make(map[string]allocUpdateType)
  1752  	for i := 0; i < 2; i++ {
  1753  		// Create one canary
  1754  		canary := mock.Alloc()
  1755  		canary.Job = job
  1756  		canary.JobID = job.ID
  1757  		canary.NodeID = structs.GenerateUUID()
  1758  		canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1759  		canary.TaskGroup = job.TaskGroups[0].Name
  1760  		canary.DeploymentID = d.ID
  1761  		s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
  1762  		allocs = append(allocs, canary)
  1763  		handled[canary.ID] = allocUpdateFnIgnore
  1764  	}
  1765  
  1766  	// Build a map of tainted nodes that contains the last canary
  1767  	tainted := make(map[string]*structs.Node, 1)
  1768  	n := mock.Node()
  1769  	n.ID = allocs[11].NodeID
  1770  	n.Drain = true
  1771  	tainted[n.ID] = n
  1772  
  1773  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  1774  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, tainted)
  1775  	r := reconciler.Compute()
  1776  
  1777  	// Assert the correct results
  1778  	assertResults(t, r, &resultExpectation{
  1779  		createDeployment:  nil,
  1780  		deploymentUpdates: nil,
  1781  		place:             1,
  1782  		inplace:           0,
  1783  		stop:              1,
  1784  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1785  			job.TaskGroups[0].Name: {
  1786  				Canary: 1,
  1787  				Ignore: 11,
  1788  			},
  1789  		},
  1790  	})
  1791  	assertNamesHaveIndexes(t, intRange(1, 1), stopResultsToNames(r.stop))
  1792  	assertNamesHaveIndexes(t, intRange(1, 1), placeResultsToNames(r.place))
  1793  }
  1794  
  1795  // Tests the reconciler handles migrating a canary correctly on a lost node
  1796  func TestReconciler_LostNode_Canary(t *testing.T) {
  1797  	job := mock.Job()
  1798  	job.TaskGroups[0].Update = canaryUpdate
  1799  
  1800  	// Create a deployment that is paused and has placed some canaries
  1801  	d := structs.NewDeployment(job)
  1802  	s := &structs.DeploymentState{
  1803  		Promoted:        false,
  1804  		DesiredTotal:    10,
  1805  		DesiredCanaries: 2,
  1806  		PlacedAllocs:    2,
  1807  	}
  1808  	d.TaskGroups[job.TaskGroups[0].Name] = s
  1809  
  1810  	// Create 10 allocations from the old job
  1811  	var allocs []*structs.Allocation
  1812  	for i := 0; i < 10; i++ {
  1813  		alloc := mock.Alloc()
  1814  		alloc.Job = job
  1815  		alloc.JobID = job.ID
  1816  		alloc.NodeID = structs.GenerateUUID()
  1817  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1818  		alloc.TaskGroup = job.TaskGroups[0].Name
  1819  		allocs = append(allocs, alloc)
  1820  	}
  1821  
  1822  	// Create two canaries for the new job
  1823  	handled := make(map[string]allocUpdateType)
  1824  	for i := 0; i < 2; i++ {
  1825  		// Create one canary
  1826  		canary := mock.Alloc()
  1827  		canary.Job = job
  1828  		canary.JobID = job.ID
  1829  		canary.NodeID = structs.GenerateUUID()
  1830  		canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1831  		canary.TaskGroup = job.TaskGroups[0].Name
  1832  		s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
  1833  		canary.DeploymentID = d.ID
  1834  		allocs = append(allocs, canary)
  1835  		handled[canary.ID] = allocUpdateFnIgnore
  1836  	}
  1837  
  1838  	// Build a map of tainted nodes that contains the last canary
  1839  	tainted := make(map[string]*structs.Node, 1)
  1840  	n := mock.Node()
  1841  	n.ID = allocs[11].NodeID
  1842  	n.Status = structs.NodeStatusDown
  1843  	tainted[n.ID] = n
  1844  
  1845  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  1846  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, tainted)
  1847  	r := reconciler.Compute()
  1848  
  1849  	// Assert the correct results
  1850  	assertResults(t, r, &resultExpectation{
  1851  		createDeployment:  nil,
  1852  		deploymentUpdates: nil,
  1853  		place:             1,
  1854  		inplace:           0,
  1855  		stop:              1,
  1856  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1857  			job.TaskGroups[0].Name: {
  1858  				Canary: 1,
  1859  				Ignore: 11,
  1860  			},
  1861  		},
  1862  	})
  1863  
  1864  	assertNamesHaveIndexes(t, intRange(1, 1), stopResultsToNames(r.stop))
  1865  	assertNamesHaveIndexes(t, intRange(1, 1), placeResultsToNames(r.place))
  1866  }
  1867  
  1868  // Tests the reconciler handles stopping canaries from older deployments
  1869  func TestReconciler_StopOldCanaries(t *testing.T) {
  1870  	job := mock.Job()
  1871  	job.TaskGroups[0].Update = canaryUpdate
  1872  
  1873  	// Create an old deployment that has placed some canaries
  1874  	d := structs.NewDeployment(job)
  1875  	s := &structs.DeploymentState{
  1876  		Promoted:        false,
  1877  		DesiredTotal:    10,
  1878  		DesiredCanaries: 2,
  1879  		PlacedAllocs:    2,
  1880  	}
  1881  	d.TaskGroups[job.TaskGroups[0].Name] = s
  1882  
  1883  	// Update the job
  1884  	job.Version += 10
  1885  
  1886  	// Create 10 allocations from the old job
  1887  	var allocs []*structs.Allocation
  1888  	for i := 0; i < 10; i++ {
  1889  		alloc := mock.Alloc()
  1890  		alloc.Job = job
  1891  		alloc.JobID = job.ID
  1892  		alloc.NodeID = structs.GenerateUUID()
  1893  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1894  		alloc.TaskGroup = job.TaskGroups[0].Name
  1895  		allocs = append(allocs, alloc)
  1896  	}
  1897  
  1898  	// Create canaries
  1899  	for i := 0; i < 2; i++ {
  1900  		// Create one canary
  1901  		canary := mock.Alloc()
  1902  		canary.Job = job
  1903  		canary.JobID = job.ID
  1904  		canary.NodeID = structs.GenerateUUID()
  1905  		canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1906  		canary.TaskGroup = job.TaskGroups[0].Name
  1907  		s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
  1908  		canary.DeploymentID = d.ID
  1909  		allocs = append(allocs, canary)
  1910  	}
  1911  
  1912  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil)
  1913  	r := reconciler.Compute()
  1914  
  1915  	newD := structs.NewDeployment(job)
  1916  	newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
  1917  	newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1918  		DesiredCanaries: 2,
  1919  		DesiredTotal:    10,
  1920  	}
  1921  
  1922  	// Assert the correct results
  1923  	assertResults(t, r, &resultExpectation{
  1924  		createDeployment: newD,
  1925  		deploymentUpdates: []*structs.DeploymentStatusUpdate{
  1926  			{
  1927  				DeploymentID:      d.ID,
  1928  				Status:            structs.DeploymentStatusCancelled,
  1929  				StatusDescription: structs.DeploymentStatusDescriptionNewerJob,
  1930  			},
  1931  		},
  1932  		place:   2,
  1933  		inplace: 0,
  1934  		stop:    2,
  1935  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1936  			job.TaskGroups[0].Name: {
  1937  				Canary: 2,
  1938  				Stop:   2,
  1939  				Ignore: 10,
  1940  			},
  1941  		},
  1942  	})
  1943  
  1944  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
  1945  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
  1946  }
  1947  
  1948  // Tests the reconciler creates new canaries when the job changes
  1949  func TestReconciler_NewCanaries(t *testing.T) {
  1950  	job := mock.Job()
  1951  	job.TaskGroups[0].Update = canaryUpdate
  1952  
  1953  	// Create 10 allocations from the old job
  1954  	var allocs []*structs.Allocation
  1955  	for i := 0; i < 10; i++ {
  1956  		alloc := mock.Alloc()
  1957  		alloc.Job = job
  1958  		alloc.JobID = job.ID
  1959  		alloc.NodeID = structs.GenerateUUID()
  1960  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  1961  		alloc.TaskGroup = job.TaskGroups[0].Name
  1962  		allocs = append(allocs, alloc)
  1963  	}
  1964  
  1965  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
  1966  	r := reconciler.Compute()
  1967  
  1968  	newD := structs.NewDeployment(job)
  1969  	newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
  1970  	newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  1971  		DesiredCanaries: 2,
  1972  		DesiredTotal:    10,
  1973  	}
  1974  
  1975  	// Assert the correct results
  1976  	assertResults(t, r, &resultExpectation{
  1977  		createDeployment:  newD,
  1978  		deploymentUpdates: nil,
  1979  		place:             2,
  1980  		inplace:           0,
  1981  		stop:              0,
  1982  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  1983  			job.TaskGroups[0].Name: {
  1984  				Canary: 2,
  1985  				Ignore: 10,
  1986  			},
  1987  		},
  1988  	})
  1989  
  1990  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
  1991  }
  1992  
  1993  // Tests the reconciler creates new canaries when the job changes and scales up
  1994  func TestReconciler_NewCanaries_ScaleUp(t *testing.T) {
  1995  	// Scale the job up to 15
  1996  	job := mock.Job()
  1997  	job.TaskGroups[0].Update = canaryUpdate
  1998  	job.TaskGroups[0].Count = 15
  1999  
  2000  	// Create 10 allocations from the old job
  2001  	var allocs []*structs.Allocation
  2002  	for i := 0; i < 10; i++ {
  2003  		alloc := mock.Alloc()
  2004  		alloc.Job = job
  2005  		alloc.JobID = job.ID
  2006  		alloc.NodeID = structs.GenerateUUID()
  2007  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2008  		alloc.TaskGroup = job.TaskGroups[0].Name
  2009  		allocs = append(allocs, alloc)
  2010  	}
  2011  
  2012  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
  2013  	r := reconciler.Compute()
  2014  
  2015  	newD := structs.NewDeployment(job)
  2016  	newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
  2017  	newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2018  		DesiredCanaries: 2,
  2019  		DesiredTotal:    15,
  2020  	}
  2021  
  2022  	// Assert the correct results
  2023  	assertResults(t, r, &resultExpectation{
  2024  		createDeployment:  newD,
  2025  		deploymentUpdates: nil,
  2026  		place:             2,
  2027  		inplace:           0,
  2028  		stop:              0,
  2029  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2030  			job.TaskGroups[0].Name: {
  2031  				Canary: 2,
  2032  				Ignore: 10,
  2033  			},
  2034  		},
  2035  	})
  2036  
  2037  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
  2038  }
  2039  
  2040  // Tests the reconciler creates new canaries when the job changes and scales
  2041  // down
  2042  func TestReconciler_NewCanaries_ScaleDown(t *testing.T) {
  2043  	// Scale the job down to 5
  2044  	job := mock.Job()
  2045  	job.TaskGroups[0].Update = canaryUpdate
  2046  	job.TaskGroups[0].Count = 5
  2047  
  2048  	// Create 10 allocations from the old job
  2049  	var allocs []*structs.Allocation
  2050  	for i := 0; i < 10; i++ {
  2051  		alloc := mock.Alloc()
  2052  		alloc.Job = job
  2053  		alloc.JobID = job.ID
  2054  		alloc.NodeID = structs.GenerateUUID()
  2055  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2056  		alloc.TaskGroup = job.TaskGroups[0].Name
  2057  		allocs = append(allocs, alloc)
  2058  	}
  2059  
  2060  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, nil, allocs, nil)
  2061  	r := reconciler.Compute()
  2062  
  2063  	newD := structs.NewDeployment(job)
  2064  	newD.StatusDescription = structs.DeploymentStatusDescriptionRunningNeedsPromotion
  2065  	newD.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2066  		DesiredCanaries: 2,
  2067  		DesiredTotal:    5,
  2068  	}
  2069  
  2070  	// Assert the correct results
  2071  	assertResults(t, r, &resultExpectation{
  2072  		createDeployment:  newD,
  2073  		deploymentUpdates: nil,
  2074  		place:             2,
  2075  		inplace:           0,
  2076  		stop:              5,
  2077  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2078  			job.TaskGroups[0].Name: {
  2079  				Canary: 2,
  2080  				Stop:   5,
  2081  				Ignore: 5,
  2082  			},
  2083  		},
  2084  	})
  2085  
  2086  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
  2087  	assertNamesHaveIndexes(t, intRange(5, 9), stopResultsToNames(r.stop))
  2088  }
  2089  
  2090  // Tests the reconciler handles filling the names of partially placed canaries
  2091  func TestReconciler_NewCanaries_FillNames(t *testing.T) {
  2092  	job := mock.Job()
  2093  	job.TaskGroups[0].Update = &structs.UpdateStrategy{
  2094  		Canary:          4,
  2095  		MaxParallel:     2,
  2096  		HealthCheck:     structs.UpdateStrategyHealthCheck_Checks,
  2097  		MinHealthyTime:  10 * time.Second,
  2098  		HealthyDeadline: 10 * time.Minute,
  2099  	}
  2100  
  2101  	// Create an existing deployment that has placed some canaries
  2102  	d := structs.NewDeployment(job)
  2103  	s := &structs.DeploymentState{
  2104  		Promoted:        false,
  2105  		DesiredTotal:    10,
  2106  		DesiredCanaries: 4,
  2107  		PlacedAllocs:    2,
  2108  	}
  2109  	d.TaskGroups[job.TaskGroups[0].Name] = s
  2110  
  2111  	// Create 10 allocations from the old job
  2112  	var allocs []*structs.Allocation
  2113  	for i := 0; i < 10; i++ {
  2114  		alloc := mock.Alloc()
  2115  		alloc.Job = job
  2116  		alloc.JobID = job.ID
  2117  		alloc.NodeID = structs.GenerateUUID()
  2118  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2119  		alloc.TaskGroup = job.TaskGroups[0].Name
  2120  		allocs = append(allocs, alloc)
  2121  	}
  2122  
  2123  	// Create canaries but pick names at the ends
  2124  	for i := 0; i < 4; i += 3 {
  2125  		// Create one canary
  2126  		canary := mock.Alloc()
  2127  		canary.Job = job
  2128  		canary.JobID = job.ID
  2129  		canary.NodeID = structs.GenerateUUID()
  2130  		canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2131  		canary.TaskGroup = job.TaskGroups[0].Name
  2132  		s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
  2133  		canary.DeploymentID = d.ID
  2134  		allocs = append(allocs, canary)
  2135  	}
  2136  
  2137  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, job, d, allocs, nil)
  2138  	r := reconciler.Compute()
  2139  
  2140  	// Assert the correct results
  2141  	assertResults(t, r, &resultExpectation{
  2142  		createDeployment:  nil,
  2143  		deploymentUpdates: nil,
  2144  		place:             2,
  2145  		inplace:           0,
  2146  		stop:              0,
  2147  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2148  			job.TaskGroups[0].Name: {
  2149  				Canary: 2,
  2150  				Ignore: 12,
  2151  			},
  2152  		},
  2153  	})
  2154  
  2155  	assertNamesHaveIndexes(t, intRange(1, 2), placeResultsToNames(r.place))
  2156  }
  2157  
  2158  // Tests the reconciler handles canary promotion by unblocking max_parallel
  2159  func TestReconciler_PromoteCanaries_Unblock(t *testing.T) {
  2160  	job := mock.Job()
  2161  	job.TaskGroups[0].Update = canaryUpdate
  2162  
  2163  	// Create an existing deployment that has placed some canaries and mark them
  2164  	// promoted
  2165  	d := structs.NewDeployment(job)
  2166  	s := &structs.DeploymentState{
  2167  		Promoted:        true,
  2168  		DesiredTotal:    10,
  2169  		DesiredCanaries: 2,
  2170  		PlacedAllocs:    2,
  2171  	}
  2172  	d.TaskGroups[job.TaskGroups[0].Name] = s
  2173  
  2174  	// Create 10 allocations from the old job
  2175  	var allocs []*structs.Allocation
  2176  	for i := 0; i < 10; i++ {
  2177  		alloc := mock.Alloc()
  2178  		alloc.Job = job
  2179  		alloc.JobID = job.ID
  2180  		alloc.NodeID = structs.GenerateUUID()
  2181  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2182  		alloc.TaskGroup = job.TaskGroups[0].Name
  2183  		allocs = append(allocs, alloc)
  2184  	}
  2185  
  2186  	// Create the canaries
  2187  	handled := make(map[string]allocUpdateType)
  2188  	for i := 0; i < 2; i++ {
  2189  		// Create one canary
  2190  		canary := mock.Alloc()
  2191  		canary.Job = job
  2192  		canary.JobID = job.ID
  2193  		canary.NodeID = structs.GenerateUUID()
  2194  		canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2195  		canary.TaskGroup = job.TaskGroups[0].Name
  2196  		s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
  2197  		canary.DeploymentID = d.ID
  2198  		canary.DeploymentStatus = &structs.AllocDeploymentStatus{
  2199  			Healthy: helper.BoolToPtr(true),
  2200  		}
  2201  		allocs = append(allocs, canary)
  2202  		handled[canary.ID] = allocUpdateFnIgnore
  2203  	}
  2204  
  2205  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2206  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  2207  	r := reconciler.Compute()
  2208  
  2209  	// Assert the correct results
  2210  	assertResults(t, r, &resultExpectation{
  2211  		createDeployment:  nil,
  2212  		deploymentUpdates: nil,
  2213  		destructive:       2,
  2214  		stop:              2,
  2215  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2216  			job.TaskGroups[0].Name: {
  2217  				Stop:              2,
  2218  				DestructiveUpdate: 2,
  2219  				Ignore:            8,
  2220  			},
  2221  		},
  2222  	})
  2223  
  2224  	assertNoCanariesStopped(t, d, r.stop)
  2225  	assertNamesHaveIndexes(t, intRange(2, 3), destructiveResultsToNames(r.destructiveUpdate))
  2226  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
  2227  }
  2228  
  2229  // Tests the reconciler handles canary promotion when the canary count equals
  2230  // the total correctly
  2231  func TestReconciler_PromoteCanaries_CanariesEqualCount(t *testing.T) {
  2232  	job := mock.Job()
  2233  	job.TaskGroups[0].Update = canaryUpdate
  2234  	job.TaskGroups[0].Count = 2
  2235  
  2236  	// Create an existing deployment that has placed some canaries and mark them
  2237  	// promoted
  2238  	d := structs.NewDeployment(job)
  2239  	s := &structs.DeploymentState{
  2240  		Promoted:        true,
  2241  		DesiredTotal:    2,
  2242  		DesiredCanaries: 2,
  2243  		PlacedAllocs:    2,
  2244  	}
  2245  	d.TaskGroups[job.TaskGroups[0].Name] = s
  2246  
  2247  	// Create 2 allocations from the old job
  2248  	var allocs []*structs.Allocation
  2249  	for i := 0; i < 2; i++ {
  2250  		alloc := mock.Alloc()
  2251  		alloc.Job = job
  2252  		alloc.JobID = job.ID
  2253  		alloc.NodeID = structs.GenerateUUID()
  2254  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2255  		alloc.TaskGroup = job.TaskGroups[0].Name
  2256  		allocs = append(allocs, alloc)
  2257  	}
  2258  
  2259  	// Create the canaries
  2260  	handled := make(map[string]allocUpdateType)
  2261  	for i := 0; i < 2; i++ {
  2262  		// Create one canary
  2263  		canary := mock.Alloc()
  2264  		canary.Job = job
  2265  		canary.JobID = job.ID
  2266  		canary.NodeID = structs.GenerateUUID()
  2267  		canary.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2268  		canary.TaskGroup = job.TaskGroups[0].Name
  2269  		s.PlacedCanaries = append(s.PlacedCanaries, canary.ID)
  2270  		canary.DeploymentID = d.ID
  2271  		canary.DeploymentStatus = &structs.AllocDeploymentStatus{
  2272  			Healthy: helper.BoolToPtr(true),
  2273  		}
  2274  		allocs = append(allocs, canary)
  2275  		handled[canary.ID] = allocUpdateFnIgnore
  2276  	}
  2277  
  2278  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2279  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  2280  	r := reconciler.Compute()
  2281  
  2282  	updates := []*structs.DeploymentStatusUpdate{
  2283  		{
  2284  			DeploymentID:      d.ID,
  2285  			Status:            structs.DeploymentStatusSuccessful,
  2286  			StatusDescription: structs.DeploymentStatusDescriptionSuccessful,
  2287  		},
  2288  	}
  2289  
  2290  	// Assert the correct results
  2291  	assertResults(t, r, &resultExpectation{
  2292  		createDeployment:  nil,
  2293  		deploymentUpdates: updates,
  2294  		place:             0,
  2295  		inplace:           0,
  2296  		stop:              2,
  2297  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2298  			job.TaskGroups[0].Name: {
  2299  				Stop:   2,
  2300  				Ignore: 2,
  2301  			},
  2302  		},
  2303  	})
  2304  
  2305  	assertNoCanariesStopped(t, d, r.stop)
  2306  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
  2307  }
  2308  
  2309  // Tests the reconciler checks the health of placed allocs to determine the
  2310  // limit
  2311  func TestReconciler_DeploymentLimit_HealthAccounting(t *testing.T) {
  2312  	job := mock.Job()
  2313  	job.TaskGroups[0].Update = noCanaryUpdate
  2314  
  2315  	cases := []struct {
  2316  		healthy int
  2317  	}{
  2318  		{
  2319  			healthy: 0,
  2320  		},
  2321  		{
  2322  			healthy: 1,
  2323  		},
  2324  		{
  2325  			healthy: 2,
  2326  		},
  2327  		{
  2328  			healthy: 3,
  2329  		},
  2330  		{
  2331  			healthy: 4,
  2332  		},
  2333  	}
  2334  
  2335  	for _, c := range cases {
  2336  		t.Run(fmt.Sprintf("%d healthy", c.healthy), func(t *testing.T) {
  2337  			// Create an existing deployment that has placed some canaries and mark them
  2338  			// promoted
  2339  			d := structs.NewDeployment(job)
  2340  			d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2341  				Promoted:     true,
  2342  				DesiredTotal: 10,
  2343  				PlacedAllocs: 4,
  2344  			}
  2345  
  2346  			// Create 6 allocations from the old job
  2347  			var allocs []*structs.Allocation
  2348  			for i := 4; i < 10; i++ {
  2349  				alloc := mock.Alloc()
  2350  				alloc.Job = job
  2351  				alloc.JobID = job.ID
  2352  				alloc.NodeID = structs.GenerateUUID()
  2353  				alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2354  				alloc.TaskGroup = job.TaskGroups[0].Name
  2355  				allocs = append(allocs, alloc)
  2356  			}
  2357  
  2358  			// Create the new allocs
  2359  			handled := make(map[string]allocUpdateType)
  2360  			for i := 0; i < 4; i++ {
  2361  				new := mock.Alloc()
  2362  				new.Job = job
  2363  				new.JobID = job.ID
  2364  				new.NodeID = structs.GenerateUUID()
  2365  				new.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2366  				new.TaskGroup = job.TaskGroups[0].Name
  2367  				new.DeploymentID = d.ID
  2368  				if i < c.healthy {
  2369  					new.DeploymentStatus = &structs.AllocDeploymentStatus{
  2370  						Healthy: helper.BoolToPtr(true),
  2371  					}
  2372  				}
  2373  				allocs = append(allocs, new)
  2374  				handled[new.ID] = allocUpdateFnIgnore
  2375  			}
  2376  
  2377  			mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2378  			reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  2379  			r := reconciler.Compute()
  2380  
  2381  			// Assert the correct results
  2382  			assertResults(t, r, &resultExpectation{
  2383  				createDeployment:  nil,
  2384  				deploymentUpdates: nil,
  2385  				destructive:       c.healthy,
  2386  				desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2387  					job.TaskGroups[0].Name: {
  2388  						DestructiveUpdate: uint64(c.healthy),
  2389  						Ignore:            uint64(10 - c.healthy),
  2390  					},
  2391  				},
  2392  			})
  2393  
  2394  			if c.healthy != 0 {
  2395  				assertNamesHaveIndexes(t, intRange(4, 3+c.healthy), destructiveResultsToNames(r.destructiveUpdate))
  2396  			}
  2397  		})
  2398  	}
  2399  }
  2400  
  2401  // Tests the reconciler handles an alloc on a tainted node during a rolling
  2402  // update
  2403  func TestReconciler_TaintedNode_RollingUpgrade(t *testing.T) {
  2404  	job := mock.Job()
  2405  	job.TaskGroups[0].Update = noCanaryUpdate
  2406  
  2407  	// Create an existing deployment that has some placed allocs
  2408  	d := structs.NewDeployment(job)
  2409  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2410  		Promoted:     true,
  2411  		DesiredTotal: 10,
  2412  		PlacedAllocs: 7,
  2413  	}
  2414  
  2415  	// Create 3 allocations from the old job
  2416  	var allocs []*structs.Allocation
  2417  	for i := 7; i < 10; i++ {
  2418  		alloc := mock.Alloc()
  2419  		alloc.Job = job
  2420  		alloc.JobID = job.ID
  2421  		alloc.NodeID = structs.GenerateUUID()
  2422  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2423  		alloc.TaskGroup = job.TaskGroups[0].Name
  2424  		allocs = append(allocs, alloc)
  2425  	}
  2426  
  2427  	// Create the healthy replacements
  2428  	handled := make(map[string]allocUpdateType)
  2429  	for i := 0; i < 7; i++ {
  2430  		new := mock.Alloc()
  2431  		new.Job = job
  2432  		new.JobID = job.ID
  2433  		new.NodeID = structs.GenerateUUID()
  2434  		new.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2435  		new.TaskGroup = job.TaskGroups[0].Name
  2436  		new.DeploymentID = d.ID
  2437  		new.DeploymentStatus = &structs.AllocDeploymentStatus{
  2438  			Healthy: helper.BoolToPtr(true),
  2439  		}
  2440  		allocs = append(allocs, new)
  2441  		handled[new.ID] = allocUpdateFnIgnore
  2442  	}
  2443  
  2444  	// Build a map of tainted nodes
  2445  	tainted := make(map[string]*structs.Node, 3)
  2446  	for i := 0; i < 3; i++ {
  2447  		n := mock.Node()
  2448  		n.ID = allocs[3+i].NodeID
  2449  		if i == 0 {
  2450  			n.Status = structs.NodeStatusDown
  2451  		} else {
  2452  			n.Drain = true
  2453  		}
  2454  		tainted[n.ID] = n
  2455  	}
  2456  
  2457  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2458  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, tainted)
  2459  	r := reconciler.Compute()
  2460  
  2461  	// Assert the correct results
  2462  	assertResults(t, r, &resultExpectation{
  2463  		createDeployment:  nil,
  2464  		deploymentUpdates: nil,
  2465  		place:             2,
  2466  		destructive:       3,
  2467  		stop:              2,
  2468  		followupEvalWait:  31 * time.Second,
  2469  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2470  			job.TaskGroups[0].Name: {
  2471  				Place:             1, // Place the lost
  2472  				Stop:              1, // Stop the lost
  2473  				Migrate:           1, // Migrate the tainted
  2474  				DestructiveUpdate: 3,
  2475  				Ignore:            5,
  2476  			},
  2477  		},
  2478  	})
  2479  
  2480  	assertNamesHaveIndexes(t, intRange(7, 9), destructiveResultsToNames(r.destructiveUpdate))
  2481  	assertNamesHaveIndexes(t, intRange(0, 1), placeResultsToNames(r.place))
  2482  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
  2483  }
  2484  
  2485  // Tests the reconciler handles a failed deployment and does no placements
  2486  func TestReconciler_FailedDeployment_NoPlacements(t *testing.T) {
  2487  	job := mock.Job()
  2488  	job.TaskGroups[0].Update = noCanaryUpdate
  2489  
  2490  	// Create an existing failed deployment that has some placed allocs
  2491  	d := structs.NewDeployment(job)
  2492  	d.Status = structs.DeploymentStatusFailed
  2493  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2494  		Promoted:     true,
  2495  		DesiredTotal: 10,
  2496  		PlacedAllocs: 4,
  2497  	}
  2498  
  2499  	// Create 6 allocations from the old job
  2500  	var allocs []*structs.Allocation
  2501  	for i := 4; i < 10; i++ {
  2502  		alloc := mock.Alloc()
  2503  		alloc.Job = job
  2504  		alloc.JobID = job.ID
  2505  		alloc.NodeID = structs.GenerateUUID()
  2506  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2507  		alloc.TaskGroup = job.TaskGroups[0].Name
  2508  		allocs = append(allocs, alloc)
  2509  	}
  2510  
  2511  	// Create the healthy replacements
  2512  	handled := make(map[string]allocUpdateType)
  2513  	for i := 0; i < 4; i++ {
  2514  		new := mock.Alloc()
  2515  		new.Job = job
  2516  		new.JobID = job.ID
  2517  		new.NodeID = structs.GenerateUUID()
  2518  		new.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2519  		new.TaskGroup = job.TaskGroups[0].Name
  2520  		new.DeploymentID = d.ID
  2521  		new.DeploymentStatus = &structs.AllocDeploymentStatus{
  2522  			Healthy: helper.BoolToPtr(true),
  2523  		}
  2524  		allocs = append(allocs, new)
  2525  		handled[new.ID] = allocUpdateFnIgnore
  2526  	}
  2527  
  2528  	// Build a map of tainted nodes
  2529  	tainted := make(map[string]*structs.Node, 2)
  2530  	for i := 0; i < 2; i++ {
  2531  		n := mock.Node()
  2532  		n.ID = allocs[6+i].NodeID
  2533  		if i == 0 {
  2534  			n.Status = structs.NodeStatusDown
  2535  		} else {
  2536  			n.Drain = true
  2537  		}
  2538  		tainted[n.ID] = n
  2539  	}
  2540  
  2541  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2542  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, tainted)
  2543  	r := reconciler.Compute()
  2544  
  2545  	// Assert the correct results
  2546  	assertResults(t, r, &resultExpectation{
  2547  		createDeployment:  nil,
  2548  		deploymentUpdates: nil,
  2549  		place:             0,
  2550  		inplace:           0,
  2551  		stop:              2,
  2552  		followupEvalWait:  0, // Since the deployment is failed, there should be no followup
  2553  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2554  			job.TaskGroups[0].Name: {
  2555  				Stop:   2,
  2556  				Ignore: 8,
  2557  			},
  2558  		},
  2559  	})
  2560  
  2561  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
  2562  }
  2563  
  2564  // Tests the reconciler handles a run after a deployment is complete
  2565  // successfully.
  2566  func TestReconciler_CompleteDeployment(t *testing.T) {
  2567  	job := mock.Job()
  2568  	job.TaskGroups[0].Update = canaryUpdate
  2569  
  2570  	d := structs.NewDeployment(job)
  2571  	d.Status = structs.DeploymentStatusSuccessful
  2572  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2573  		Promoted:        true,
  2574  		DesiredTotal:    10,
  2575  		DesiredCanaries: 2,
  2576  		PlacedAllocs:    10,
  2577  		HealthyAllocs:   10,
  2578  	}
  2579  
  2580  	// Create allocations from the old job
  2581  	var allocs []*structs.Allocation
  2582  	for i := 0; i < 10; i++ {
  2583  		alloc := mock.Alloc()
  2584  		alloc.Job = job
  2585  		alloc.JobID = job.ID
  2586  		alloc.NodeID = structs.GenerateUUID()
  2587  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2588  		alloc.TaskGroup = job.TaskGroups[0].Name
  2589  		alloc.DeploymentID = d.ID
  2590  		alloc.DeploymentStatus = &structs.AllocDeploymentStatus{
  2591  			Healthy: helper.BoolToPtr(true),
  2592  		}
  2593  		allocs = append(allocs, alloc)
  2594  	}
  2595  
  2596  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil)
  2597  	r := reconciler.Compute()
  2598  
  2599  	// Assert the correct results
  2600  	assertResults(t, r, &resultExpectation{
  2601  		createDeployment:  nil,
  2602  		deploymentUpdates: nil,
  2603  		place:             0,
  2604  		inplace:           0,
  2605  		stop:              0,
  2606  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2607  			job.TaskGroups[0].Name: {
  2608  				Ignore: 10,
  2609  			},
  2610  		},
  2611  	})
  2612  }
  2613  
  2614  // Test that a failed deployment cancels non-promoted canaries
  2615  func TestReconciler_FailedDeployment_CancelCanaries(t *testing.T) {
  2616  	// Create a job with two task groups
  2617  	job := mock.Job()
  2618  	job.TaskGroups[0].Update = canaryUpdate
  2619  	job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy())
  2620  	job.TaskGroups[1].Name = "two"
  2621  
  2622  	// Create an existing failed deployment that has promoted one task group
  2623  	d := structs.NewDeployment(job)
  2624  	d.Status = structs.DeploymentStatusFailed
  2625  	s0 := &structs.DeploymentState{
  2626  		Promoted:        true,
  2627  		DesiredTotal:    10,
  2628  		DesiredCanaries: 2,
  2629  		PlacedAllocs:    4,
  2630  	}
  2631  	s1 := &structs.DeploymentState{
  2632  		Promoted:        false,
  2633  		DesiredTotal:    10,
  2634  		DesiredCanaries: 2,
  2635  		PlacedAllocs:    2,
  2636  	}
  2637  	d.TaskGroups[job.TaskGroups[0].Name] = s0
  2638  	d.TaskGroups[job.TaskGroups[1].Name] = s1
  2639  
  2640  	// Create 6 allocations from the old job
  2641  	var allocs []*structs.Allocation
  2642  	handled := make(map[string]allocUpdateType)
  2643  	for _, group := range []int{0, 1} {
  2644  		replacements := 4
  2645  		state := s0
  2646  		if group == 1 {
  2647  			replacements = 2
  2648  			state = s1
  2649  		}
  2650  
  2651  		// Create the healthy replacements
  2652  		for i := 0; i < replacements; i++ {
  2653  			new := mock.Alloc()
  2654  			new.Job = job
  2655  			new.JobID = job.ID
  2656  			new.NodeID = structs.GenerateUUID()
  2657  			new.Name = structs.AllocName(job.ID, job.TaskGroups[group].Name, uint(i))
  2658  			new.TaskGroup = job.TaskGroups[group].Name
  2659  			new.DeploymentID = d.ID
  2660  			new.DeploymentStatus = &structs.AllocDeploymentStatus{
  2661  				Healthy: helper.BoolToPtr(true),
  2662  			}
  2663  			allocs = append(allocs, new)
  2664  			handled[new.ID] = allocUpdateFnIgnore
  2665  
  2666  			// Add the alloc to the canary list
  2667  			if i < 2 {
  2668  				state.PlacedCanaries = append(state.PlacedCanaries, new.ID)
  2669  			}
  2670  		}
  2671  		for i := replacements; i < 10; i++ {
  2672  			alloc := mock.Alloc()
  2673  			alloc.Job = job
  2674  			alloc.JobID = job.ID
  2675  			alloc.NodeID = structs.GenerateUUID()
  2676  			alloc.Name = structs.AllocName(job.ID, job.TaskGroups[group].Name, uint(i))
  2677  			alloc.TaskGroup = job.TaskGroups[group].Name
  2678  			allocs = append(allocs, alloc)
  2679  		}
  2680  	}
  2681  
  2682  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2683  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  2684  	r := reconciler.Compute()
  2685  
  2686  	// Assert the correct results
  2687  	assertResults(t, r, &resultExpectation{
  2688  		createDeployment:  nil,
  2689  		deploymentUpdates: nil,
  2690  		place:             0,
  2691  		inplace:           0,
  2692  		stop:              2,
  2693  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2694  			job.TaskGroups[0].Name: {
  2695  				Ignore: 10,
  2696  			},
  2697  			job.TaskGroups[1].Name: {
  2698  				Stop:   2,
  2699  				Ignore: 8,
  2700  			},
  2701  		},
  2702  	})
  2703  
  2704  	assertNamesHaveIndexes(t, intRange(0, 1), stopResultsToNames(r.stop))
  2705  }
  2706  
  2707  // Test that a failed deployment and updated job works
  2708  func TestReconciler_FailedDeployment_NewJob(t *testing.T) {
  2709  	job := mock.Job()
  2710  	job.TaskGroups[0].Update = noCanaryUpdate
  2711  
  2712  	// Create an existing failed deployment that has some placed allocs
  2713  	d := structs.NewDeployment(job)
  2714  	d.Status = structs.DeploymentStatusFailed
  2715  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2716  		Promoted:     true,
  2717  		DesiredTotal: 10,
  2718  		PlacedAllocs: 4,
  2719  	}
  2720  
  2721  	// Create 6 allocations from the old job
  2722  	var allocs []*structs.Allocation
  2723  	for i := 4; i < 10; i++ {
  2724  		alloc := mock.Alloc()
  2725  		alloc.Job = job
  2726  		alloc.JobID = job.ID
  2727  		alloc.NodeID = structs.GenerateUUID()
  2728  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2729  		alloc.TaskGroup = job.TaskGroups[0].Name
  2730  		allocs = append(allocs, alloc)
  2731  	}
  2732  
  2733  	// Create the healthy replacements
  2734  	for i := 0; i < 4; i++ {
  2735  		new := mock.Alloc()
  2736  		new.Job = job
  2737  		new.JobID = job.ID
  2738  		new.NodeID = structs.GenerateUUID()
  2739  		new.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2740  		new.TaskGroup = job.TaskGroups[0].Name
  2741  		new.DeploymentID = d.ID
  2742  		new.DeploymentStatus = &structs.AllocDeploymentStatus{
  2743  			Healthy: helper.BoolToPtr(true),
  2744  		}
  2745  		allocs = append(allocs, new)
  2746  	}
  2747  
  2748  	// Up the job version
  2749  	jobNew := job.Copy()
  2750  	jobNew.Version += 100
  2751  
  2752  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnDestructive, false, job.ID, jobNew, d, allocs, nil)
  2753  	r := reconciler.Compute()
  2754  
  2755  	dnew := structs.NewDeployment(jobNew)
  2756  	dnew.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2757  		DesiredTotal: 10,
  2758  	}
  2759  
  2760  	// Assert the correct results
  2761  	assertResults(t, r, &resultExpectation{
  2762  		createDeployment:  dnew,
  2763  		deploymentUpdates: nil,
  2764  		destructive:       4,
  2765  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2766  			job.TaskGroups[0].Name: {
  2767  				DestructiveUpdate: 4,
  2768  				Ignore:            6,
  2769  			},
  2770  		},
  2771  	})
  2772  
  2773  	assertNamesHaveIndexes(t, intRange(0, 3), destructiveResultsToNames(r.destructiveUpdate))
  2774  }
  2775  
  2776  // Tests the reconciler marks a deployment as complete
  2777  func TestReconciler_MarkDeploymentComplete(t *testing.T) {
  2778  	job := mock.Job()
  2779  	job.TaskGroups[0].Update = noCanaryUpdate
  2780  
  2781  	d := structs.NewDeployment(job)
  2782  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2783  		Promoted:      true,
  2784  		DesiredTotal:  10,
  2785  		PlacedAllocs:  10,
  2786  		HealthyAllocs: 10,
  2787  	}
  2788  
  2789  	// Create allocations from the old job
  2790  	var allocs []*structs.Allocation
  2791  	for i := 0; i < 10; i++ {
  2792  		alloc := mock.Alloc()
  2793  		alloc.Job = job
  2794  		alloc.JobID = job.ID
  2795  		alloc.NodeID = structs.GenerateUUID()
  2796  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2797  		alloc.TaskGroup = job.TaskGroups[0].Name
  2798  		alloc.DeploymentID = d.ID
  2799  		alloc.DeploymentStatus = &structs.AllocDeploymentStatus{
  2800  			Healthy: helper.BoolToPtr(true),
  2801  		}
  2802  		allocs = append(allocs, alloc)
  2803  	}
  2804  
  2805  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, d, allocs, nil)
  2806  	r := reconciler.Compute()
  2807  
  2808  	updates := []*structs.DeploymentStatusUpdate{
  2809  		{
  2810  			DeploymentID:      d.ID,
  2811  			Status:            structs.DeploymentStatusSuccessful,
  2812  			StatusDescription: structs.DeploymentStatusDescriptionSuccessful,
  2813  		},
  2814  	}
  2815  
  2816  	// Assert the correct results
  2817  	assertResults(t, r, &resultExpectation{
  2818  		createDeployment:  nil,
  2819  		deploymentUpdates: updates,
  2820  		place:             0,
  2821  		inplace:           0,
  2822  		stop:              0,
  2823  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2824  			job.TaskGroups[0].Name: {
  2825  				Ignore: 10,
  2826  			},
  2827  		},
  2828  	})
  2829  }
  2830  
  2831  // Tests the reconciler picks the maximum of the staggers when multiple task
  2832  // groups are under going node drains.
  2833  func TestReconciler_TaintedNode_MultiGroups(t *testing.T) {
  2834  	// Create a job with two task groups
  2835  	job := mock.Job()
  2836  	job.TaskGroups[0].Update = noCanaryUpdate
  2837  	job.TaskGroups = append(job.TaskGroups, job.TaskGroups[0].Copy())
  2838  	job.TaskGroups[1].Name = "two"
  2839  	job.TaskGroups[1].Update.Stagger = 100 * time.Second
  2840  
  2841  	// Create the allocations
  2842  	var allocs []*structs.Allocation
  2843  	for j := 0; j < 2; j++ {
  2844  		for i := 0; i < 10; i++ {
  2845  			alloc := mock.Alloc()
  2846  			alloc.Job = job
  2847  			alloc.JobID = job.ID
  2848  			alloc.NodeID = structs.GenerateUUID()
  2849  			alloc.Name = structs.AllocName(job.ID, job.TaskGroups[j].Name, uint(i))
  2850  			alloc.TaskGroup = job.TaskGroups[j].Name
  2851  			allocs = append(allocs, alloc)
  2852  		}
  2853  	}
  2854  
  2855  	// Build a map of tainted nodes
  2856  	tainted := make(map[string]*structs.Node, 15)
  2857  	for i := 0; i < 15; i++ {
  2858  		n := mock.Node()
  2859  		n.ID = allocs[i].NodeID
  2860  		n.Drain = true
  2861  		tainted[n.ID] = n
  2862  	}
  2863  
  2864  	reconciler := NewAllocReconciler(testLogger(), allocUpdateFnIgnore, false, job.ID, job, nil, allocs, tainted)
  2865  	r := reconciler.Compute()
  2866  
  2867  	// Assert the correct results
  2868  	assertResults(t, r, &resultExpectation{
  2869  		createDeployment:  nil,
  2870  		deploymentUpdates: nil,
  2871  		place:             8,
  2872  		inplace:           0,
  2873  		stop:              8,
  2874  		followupEvalWait:  100 * time.Second,
  2875  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2876  			job.TaskGroups[0].Name: {
  2877  				Place:             0,
  2878  				Stop:              0,
  2879  				Migrate:           4,
  2880  				DestructiveUpdate: 0,
  2881  				Ignore:            6,
  2882  			},
  2883  			job.TaskGroups[1].Name: {
  2884  				Place:             0,
  2885  				Stop:              0,
  2886  				Migrate:           4,
  2887  				DestructiveUpdate: 0,
  2888  				Ignore:            6,
  2889  			},
  2890  		},
  2891  	})
  2892  
  2893  	assertNamesHaveIndexes(t, intRange(0, 3, 0, 3), placeResultsToNames(r.place))
  2894  	assertNamesHaveIndexes(t, intRange(0, 3, 0, 3), stopResultsToNames(r.stop))
  2895  }
  2896  
  2897  // Tests the reconciler handles changing a job such that a deployment is created
  2898  // while doing a scale up but as the second eval.
  2899  func TestReconciler_JobChange_ScaleUp_SecondEval(t *testing.T) {
  2900  	// Scale the job up to 15
  2901  	job := mock.Job()
  2902  	job.TaskGroups[0].Update = noCanaryUpdate
  2903  	job.TaskGroups[0].Count = 30
  2904  
  2905  	// Create a deployment that is paused and has placed some canaries
  2906  	d := structs.NewDeployment(job)
  2907  	d.TaskGroups[job.TaskGroups[0].Name] = &structs.DeploymentState{
  2908  		Promoted:     false,
  2909  		DesiredTotal: 30,
  2910  		PlacedAllocs: 20,
  2911  	}
  2912  
  2913  	// Create 10 allocations from the old job
  2914  	var allocs []*structs.Allocation
  2915  	for i := 0; i < 10; i++ {
  2916  		alloc := mock.Alloc()
  2917  		alloc.Job = job
  2918  		alloc.JobID = job.ID
  2919  		alloc.NodeID = structs.GenerateUUID()
  2920  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2921  		alloc.TaskGroup = job.TaskGroups[0].Name
  2922  		allocs = append(allocs, alloc)
  2923  	}
  2924  
  2925  	// Create 20 from new job
  2926  	handled := make(map[string]allocUpdateType)
  2927  	for i := 10; i < 30; i++ {
  2928  		alloc := mock.Alloc()
  2929  		alloc.Job = job
  2930  		alloc.JobID = job.ID
  2931  		alloc.DeploymentID = d.ID
  2932  		alloc.NodeID = structs.GenerateUUID()
  2933  		alloc.Name = structs.AllocName(job.ID, job.TaskGroups[0].Name, uint(i))
  2934  		alloc.TaskGroup = job.TaskGroups[0].Name
  2935  		allocs = append(allocs, alloc)
  2936  		handled[alloc.ID] = allocUpdateFnIgnore
  2937  	}
  2938  
  2939  	mockUpdateFn := allocUpdateFnMock(handled, allocUpdateFnDestructive)
  2940  	reconciler := NewAllocReconciler(testLogger(), mockUpdateFn, false, job.ID, job, d, allocs, nil)
  2941  	r := reconciler.Compute()
  2942  
  2943  	// Assert the correct results
  2944  	assertResults(t, r, &resultExpectation{
  2945  		createDeployment:  nil,
  2946  		deploymentUpdates: nil,
  2947  		desiredTGUpdates: map[string]*structs.DesiredUpdates{
  2948  			job.TaskGroups[0].Name: {
  2949  				// All should be ignored becasue nothing has been marked as
  2950  				// healthy.
  2951  				Ignore: 30,
  2952  			},
  2953  		},
  2954  	})
  2955  }