github.com/djenriquez/nomad-1@v0.8.1/scheduler/system_sched_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"reflect"
     5  	"sort"
     6  	"testing"
     7  	"time"
     8  
     9  	memdb "github.com/hashicorp/go-memdb"
    10  	"github.com/hashicorp/nomad/helper"
    11  	"github.com/hashicorp/nomad/helper/uuid"
    12  	"github.com/hashicorp/nomad/nomad/mock"
    13  	"github.com/hashicorp/nomad/nomad/structs"
    14  )
    15  
    16  func TestSystemSched_JobRegister(t *testing.T) {
    17  	h := NewHarness(t)
    18  
    19  	// Create some nodes
    20  	for i := 0; i < 10; i++ {
    21  		node := mock.Node()
    22  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
    23  	}
    24  
    25  	// Create a job
    26  	job := mock.SystemJob()
    27  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
    28  
    29  	// Create a mock evaluation to deregister the job
    30  	eval := &structs.Evaluation{
    31  		Namespace:   structs.DefaultNamespace,
    32  		ID:          uuid.Generate(),
    33  		Priority:    job.Priority,
    34  		TriggeredBy: structs.EvalTriggerJobRegister,
    35  		JobID:       job.ID,
    36  		Status:      structs.EvalStatusPending,
    37  	}
    38  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
    39  
    40  	// Process the evaluation
    41  	err := h.Process(NewSystemScheduler, eval)
    42  	if err != nil {
    43  		t.Fatalf("err: %v", err)
    44  	}
    45  
    46  	// Ensure a single plan
    47  	if len(h.Plans) != 1 {
    48  		t.Fatalf("bad: %#v", h.Plans)
    49  	}
    50  	plan := h.Plans[0]
    51  
    52  	// Ensure the plan doesn't have annotations.
    53  	if plan.Annotations != nil {
    54  		t.Fatalf("expected no annotations")
    55  	}
    56  
    57  	// Ensure the plan allocated
    58  	var planned []*structs.Allocation
    59  	for _, allocList := range plan.NodeAllocation {
    60  		planned = append(planned, allocList...)
    61  	}
    62  	if len(planned) != 10 {
    63  		t.Fatalf("bad: %#v", plan)
    64  	}
    65  
    66  	// Lookup the allocations by JobID
    67  	ws := memdb.NewWatchSet()
    68  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
    69  	noErr(t, err)
    70  
    71  	// Ensure all allocations placed
    72  	if len(out) != 10 {
    73  		t.Fatalf("bad: %#v", out)
    74  	}
    75  
    76  	// Check the available nodes
    77  	if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 {
    78  		t.Fatalf("bad: %#v", out[0].Metrics)
    79  	}
    80  
    81  	// Ensure no allocations are queued
    82  	queued := h.Evals[0].QueuedAllocations["web"]
    83  	if queued != 0 {
    84  		t.Fatalf("expected queued allocations: %v, actual: %v", 0, queued)
    85  	}
    86  
    87  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
    88  }
    89  
    90  func TestSystemSched_JobRegister_StickyAllocs(t *testing.T) {
    91  	h := NewHarness(t)
    92  
    93  	// Create some nodes
    94  	for i := 0; i < 10; i++ {
    95  		node := mock.Node()
    96  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
    97  	}
    98  
    99  	// Create a job
   100  	job := mock.SystemJob()
   101  	job.TaskGroups[0].EphemeralDisk.Sticky = true
   102  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   103  
   104  	// Create a mock evaluation to register the job
   105  	eval := &structs.Evaluation{
   106  		Namespace:   structs.DefaultNamespace,
   107  		ID:          uuid.Generate(),
   108  		Priority:    job.Priority,
   109  		TriggeredBy: structs.EvalTriggerJobRegister,
   110  		JobID:       job.ID,
   111  		Status:      structs.EvalStatusPending,
   112  	}
   113  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   114  
   115  	// Process the evaluation
   116  	if err := h.Process(NewSystemScheduler, eval); err != nil {
   117  		t.Fatalf("err: %v", err)
   118  	}
   119  
   120  	// Ensure the plan allocated
   121  	plan := h.Plans[0]
   122  	var planned []*structs.Allocation
   123  	for _, allocList := range plan.NodeAllocation {
   124  		planned = append(planned, allocList...)
   125  	}
   126  	if len(planned) != 10 {
   127  		t.Fatalf("bad: %#v", plan)
   128  	}
   129  
   130  	// Get an allocation and mark it as failed
   131  	alloc := planned[4].Copy()
   132  	alloc.ClientStatus = structs.AllocClientStatusFailed
   133  	noErr(t, h.State.UpdateAllocsFromClient(h.NextIndex(), []*structs.Allocation{alloc}))
   134  
   135  	// Create a mock evaluation to handle the update
   136  	eval = &structs.Evaluation{
   137  		Namespace:   structs.DefaultNamespace,
   138  		ID:          uuid.Generate(),
   139  		Priority:    job.Priority,
   140  		TriggeredBy: structs.EvalTriggerNodeUpdate,
   141  		JobID:       job.ID,
   142  		Status:      structs.EvalStatusPending,
   143  	}
   144  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   145  	h1 := NewHarnessWithState(t, h.State)
   146  	if err := h1.Process(NewSystemScheduler, eval); err != nil {
   147  		t.Fatalf("err: %v", err)
   148  	}
   149  
   150  	// Ensure we have created only one new allocation
   151  	plan = h1.Plans[0]
   152  	var newPlanned []*structs.Allocation
   153  	for _, allocList := range plan.NodeAllocation {
   154  		newPlanned = append(newPlanned, allocList...)
   155  	}
   156  	if len(newPlanned) != 1 {
   157  		t.Fatalf("bad plan: %#v", plan)
   158  	}
   159  	// Ensure that the new allocation was placed on the same node as the older
   160  	// one
   161  	if newPlanned[0].NodeID != alloc.NodeID || newPlanned[0].PreviousAllocation != alloc.ID {
   162  		t.Fatalf("expected: %#v, actual: %#v", alloc, newPlanned[0])
   163  	}
   164  }
   165  
   166  func TestSystemSched_JobRegister_EphemeralDiskConstraint(t *testing.T) {
   167  	h := NewHarness(t)
   168  
   169  	// Create a nodes
   170  	node := mock.Node()
   171  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   172  
   173  	// Create a job
   174  	job := mock.SystemJob()
   175  	job.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024
   176  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   177  
   178  	// Create another job with a lot of disk resource ask so that it doesn't fit
   179  	// the node
   180  	job1 := mock.SystemJob()
   181  	job1.TaskGroups[0].EphemeralDisk.SizeMB = 60 * 1024
   182  	noErr(t, h.State.UpsertJob(h.NextIndex(), job1))
   183  
   184  	// Create a mock evaluation to register the job
   185  	eval := &structs.Evaluation{
   186  		Namespace:   structs.DefaultNamespace,
   187  		ID:          uuid.Generate(),
   188  		Priority:    job.Priority,
   189  		TriggeredBy: structs.EvalTriggerJobRegister,
   190  		JobID:       job.ID,
   191  		Status:      structs.EvalStatusPending,
   192  	}
   193  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   194  
   195  	// Process the evaluation
   196  	if err := h.Process(NewSystemScheduler, eval); err != nil {
   197  		t.Fatalf("err: %v", err)
   198  	}
   199  
   200  	// Lookup the allocations by JobID
   201  	ws := memdb.NewWatchSet()
   202  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   203  	noErr(t, err)
   204  
   205  	// Ensure all allocations placed
   206  	if len(out) != 1 {
   207  		t.Fatalf("bad: %#v", out)
   208  	}
   209  
   210  	// Create a new harness to test the scheduling result for the second job
   211  	h1 := NewHarnessWithState(t, h.State)
   212  	// Create a mock evaluation to register the job
   213  	eval1 := &structs.Evaluation{
   214  		Namespace:   structs.DefaultNamespace,
   215  		ID:          uuid.Generate(),
   216  		Priority:    job1.Priority,
   217  		TriggeredBy: structs.EvalTriggerJobRegister,
   218  		JobID:       job1.ID,
   219  		Status:      structs.EvalStatusPending,
   220  	}
   221  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   222  
   223  	// Process the evaluation
   224  	if err := h1.Process(NewSystemScheduler, eval1); err != nil {
   225  		t.Fatalf("err: %v", err)
   226  	}
   227  
   228  	out, err = h1.State.AllocsByJob(ws, job.Namespace, job1.ID, false)
   229  	noErr(t, err)
   230  	if len(out) != 0 {
   231  		t.Fatalf("bad: %#v", out)
   232  	}
   233  }
   234  
   235  func TestSystemSched_ExhaustResources(t *testing.T) {
   236  	h := NewHarness(t)
   237  
   238  	// Create a nodes
   239  	node := mock.Node()
   240  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   241  
   242  	// Create a service job which consumes most of the system resources
   243  	svcJob := mock.Job()
   244  	svcJob.TaskGroups[0].Count = 1
   245  	svcJob.TaskGroups[0].Tasks[0].Resources.CPU = 3600
   246  	noErr(t, h.State.UpsertJob(h.NextIndex(), svcJob))
   247  
   248  	// Create a mock evaluation to register the job
   249  	eval := &structs.Evaluation{
   250  		Namespace:   structs.DefaultNamespace,
   251  		ID:          uuid.Generate(),
   252  		Priority:    svcJob.Priority,
   253  		TriggeredBy: structs.EvalTriggerJobRegister,
   254  		JobID:       svcJob.ID,
   255  		Status:      structs.EvalStatusPending,
   256  	}
   257  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   258  	// Process the evaluation
   259  	err := h.Process(NewServiceScheduler, eval)
   260  	if err != nil {
   261  		t.Fatalf("err: %v", err)
   262  	}
   263  
   264  	// Create a system job
   265  	job := mock.SystemJob()
   266  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   267  
   268  	// Create a mock evaluation to register the job
   269  	eval1 := &structs.Evaluation{
   270  		Namespace:   structs.DefaultNamespace,
   271  		ID:          uuid.Generate(),
   272  		Priority:    job.Priority,
   273  		TriggeredBy: structs.EvalTriggerJobRegister,
   274  		JobID:       job.ID,
   275  		Status:      structs.EvalStatusPending,
   276  	}
   277  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   278  	// Process the evaluation
   279  	if err := h.Process(NewSystemScheduler, eval1); err != nil {
   280  		t.Fatalf("err: %v", err)
   281  	}
   282  
   283  	// Ensure that we have one allocation queued from the system job eval
   284  	queued := h.Evals[1].QueuedAllocations["web"]
   285  	if queued != 1 {
   286  		t.Fatalf("expected: %v, actual: %v", 1, queued)
   287  	}
   288  }
   289  
   290  func TestSystemSched_JobRegister_Annotate(t *testing.T) {
   291  	h := NewHarness(t)
   292  
   293  	// Create some nodes
   294  	for i := 0; i < 10; i++ {
   295  		node := mock.Node()
   296  		if i < 9 {
   297  			node.NodeClass = "foo"
   298  		} else {
   299  			node.NodeClass = "bar"
   300  		}
   301  		node.ComputeClass()
   302  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   303  	}
   304  
   305  	// Create a job constraining on node class
   306  	job := mock.SystemJob()
   307  	fooConstraint := &structs.Constraint{
   308  		LTarget: "${node.class}",
   309  		RTarget: "foo",
   310  		Operand: "==",
   311  	}
   312  	job.Constraints = append(job.Constraints, fooConstraint)
   313  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   314  
   315  	// Create a mock evaluation to deregister the job
   316  	eval := &structs.Evaluation{
   317  		Namespace:    structs.DefaultNamespace,
   318  		ID:           uuid.Generate(),
   319  		Priority:     job.Priority,
   320  		TriggeredBy:  structs.EvalTriggerJobRegister,
   321  		JobID:        job.ID,
   322  		AnnotatePlan: true,
   323  		Status:       structs.EvalStatusPending,
   324  	}
   325  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   326  
   327  	// Process the evaluation
   328  	err := h.Process(NewSystemScheduler, eval)
   329  	if err != nil {
   330  		t.Fatalf("err: %v", err)
   331  	}
   332  
   333  	// Ensure a single plan
   334  	if len(h.Plans) != 1 {
   335  		t.Fatalf("bad: %#v", h.Plans)
   336  	}
   337  	plan := h.Plans[0]
   338  
   339  	// Ensure the plan allocated
   340  	var planned []*structs.Allocation
   341  	for _, allocList := range plan.NodeAllocation {
   342  		planned = append(planned, allocList...)
   343  	}
   344  	if len(planned) != 9 {
   345  		t.Fatalf("bad: %#v %d", planned, len(planned))
   346  	}
   347  
   348  	// Lookup the allocations by JobID
   349  	ws := memdb.NewWatchSet()
   350  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   351  	noErr(t, err)
   352  
   353  	// Ensure all allocations placed
   354  	if len(out) != 9 {
   355  		t.Fatalf("bad: %#v", out)
   356  	}
   357  
   358  	// Check the available nodes
   359  	if count, ok := out[0].Metrics.NodesAvailable["dc1"]; !ok || count != 10 {
   360  		t.Fatalf("bad: %#v", out[0].Metrics)
   361  	}
   362  
   363  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   364  
   365  	// Ensure the plan had annotations.
   366  	if plan.Annotations == nil {
   367  		t.Fatalf("expected annotations")
   368  	}
   369  
   370  	desiredTGs := plan.Annotations.DesiredTGUpdates
   371  	if l := len(desiredTGs); l != 1 {
   372  		t.Fatalf("incorrect number of task groups; got %v; want %v", l, 1)
   373  	}
   374  
   375  	desiredChanges, ok := desiredTGs["web"]
   376  	if !ok {
   377  		t.Fatalf("expected task group web to have desired changes")
   378  	}
   379  
   380  	expected := &structs.DesiredUpdates{Place: 9}
   381  	if !reflect.DeepEqual(desiredChanges, expected) {
   382  		t.Fatalf("Unexpected desired updates; got %#v; want %#v", desiredChanges, expected)
   383  	}
   384  }
   385  
   386  func TestSystemSched_JobRegister_AddNode(t *testing.T) {
   387  	h := NewHarness(t)
   388  
   389  	// Create some nodes
   390  	var nodes []*structs.Node
   391  	for i := 0; i < 10; i++ {
   392  		node := mock.Node()
   393  		nodes = append(nodes, node)
   394  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   395  	}
   396  
   397  	// Generate a fake job with allocations
   398  	job := mock.SystemJob()
   399  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   400  
   401  	var allocs []*structs.Allocation
   402  	for _, node := range nodes {
   403  		alloc := mock.Alloc()
   404  		alloc.Job = job
   405  		alloc.JobID = job.ID
   406  		alloc.NodeID = node.ID
   407  		alloc.Name = "my-job.web[0]"
   408  		allocs = append(allocs, alloc)
   409  	}
   410  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
   411  
   412  	// Add a new node.
   413  	node := mock.Node()
   414  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   415  
   416  	// Create a mock evaluation to deal with the node update
   417  	eval := &structs.Evaluation{
   418  		Namespace:   structs.DefaultNamespace,
   419  		ID:          uuid.Generate(),
   420  		Priority:    50,
   421  		TriggeredBy: structs.EvalTriggerNodeUpdate,
   422  		JobID:       job.ID,
   423  		Status:      structs.EvalStatusPending,
   424  	}
   425  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   426  	// Process the evaluation
   427  	err := h.Process(NewSystemScheduler, eval)
   428  	if err != nil {
   429  		t.Fatalf("err: %v", err)
   430  	}
   431  
   432  	// Ensure a single plan
   433  	if len(h.Plans) != 1 {
   434  		t.Fatalf("bad: %#v", h.Plans)
   435  	}
   436  	plan := h.Plans[0]
   437  
   438  	// Ensure the plan had no node updates
   439  	var update []*structs.Allocation
   440  	for _, updateList := range plan.NodeUpdate {
   441  		update = append(update, updateList...)
   442  	}
   443  	if len(update) != 0 {
   444  		t.Log(len(update))
   445  		t.Fatalf("bad: %#v", plan)
   446  	}
   447  
   448  	// Ensure the plan allocated on the new node
   449  	var planned []*structs.Allocation
   450  	for _, allocList := range plan.NodeAllocation {
   451  		planned = append(planned, allocList...)
   452  	}
   453  	if len(planned) != 1 {
   454  		t.Fatalf("bad: %#v", plan)
   455  	}
   456  
   457  	// Ensure it allocated on the right node
   458  	if _, ok := plan.NodeAllocation[node.ID]; !ok {
   459  		t.Fatalf("allocated on wrong node: %#v", plan)
   460  	}
   461  
   462  	// Lookup the allocations by JobID
   463  	ws := memdb.NewWatchSet()
   464  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   465  	noErr(t, err)
   466  
   467  	// Ensure all allocations placed
   468  	out, _ = structs.FilterTerminalAllocs(out)
   469  	if len(out) != 11 {
   470  		t.Fatalf("bad: %#v", out)
   471  	}
   472  
   473  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   474  }
   475  
   476  func TestSystemSched_JobRegister_AllocFail(t *testing.T) {
   477  	h := NewHarness(t)
   478  
   479  	// Create NO nodes
   480  	// Create a job
   481  	job := mock.SystemJob()
   482  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   483  
   484  	// Create a mock evaluation to register the job
   485  	eval := &structs.Evaluation{
   486  		Namespace:   structs.DefaultNamespace,
   487  		ID:          uuid.Generate(),
   488  		Priority:    job.Priority,
   489  		TriggeredBy: structs.EvalTriggerJobRegister,
   490  		JobID:       job.ID,
   491  		Status:      structs.EvalStatusPending,
   492  	}
   493  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   494  	// Process the evaluation
   495  	err := h.Process(NewSystemScheduler, eval)
   496  	if err != nil {
   497  		t.Fatalf("err: %v", err)
   498  	}
   499  
   500  	// Ensure no plan as this should be a no-op.
   501  	if len(h.Plans) != 0 {
   502  		t.Fatalf("bad: %#v", h.Plans)
   503  	}
   504  
   505  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   506  }
   507  
   508  func TestSystemSched_JobModify(t *testing.T) {
   509  	h := NewHarness(t)
   510  
   511  	// Create some nodes
   512  	var nodes []*structs.Node
   513  	for i := 0; i < 10; i++ {
   514  		node := mock.Node()
   515  		nodes = append(nodes, node)
   516  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   517  	}
   518  
   519  	// Generate a fake job with allocations
   520  	job := mock.SystemJob()
   521  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   522  
   523  	var allocs []*structs.Allocation
   524  	for _, node := range nodes {
   525  		alloc := mock.Alloc()
   526  		alloc.Job = job
   527  		alloc.JobID = job.ID
   528  		alloc.NodeID = node.ID
   529  		alloc.Name = "my-job.web[0]"
   530  		allocs = append(allocs, alloc)
   531  	}
   532  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
   533  
   534  	// Add a few terminal status allocations, these should be ignored
   535  	var terminal []*structs.Allocation
   536  	for i := 0; i < 5; i++ {
   537  		alloc := mock.Alloc()
   538  		alloc.Job = job
   539  		alloc.JobID = job.ID
   540  		alloc.NodeID = nodes[i].ID
   541  		alloc.Name = "my-job.web[0]"
   542  		alloc.DesiredStatus = structs.AllocDesiredStatusStop
   543  		terminal = append(terminal, alloc)
   544  	}
   545  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), terminal))
   546  
   547  	// Update the job
   548  	job2 := mock.SystemJob()
   549  	job2.ID = job.ID
   550  
   551  	// Update the task, such that it cannot be done in-place
   552  	job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
   553  	noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
   554  
   555  	// Create a mock evaluation to deal with drain
   556  	eval := &structs.Evaluation{
   557  		Namespace:   structs.DefaultNamespace,
   558  		ID:          uuid.Generate(),
   559  		Priority:    50,
   560  		TriggeredBy: structs.EvalTriggerJobRegister,
   561  		JobID:       job.ID,
   562  		Status:      structs.EvalStatusPending,
   563  	}
   564  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   565  
   566  	// Process the evaluation
   567  	err := h.Process(NewSystemScheduler, eval)
   568  	if err != nil {
   569  		t.Fatalf("err: %v", err)
   570  	}
   571  
   572  	// Ensure a single plan
   573  	if len(h.Plans) != 1 {
   574  		t.Fatalf("bad: %#v", h.Plans)
   575  	}
   576  	plan := h.Plans[0]
   577  
   578  	// Ensure the plan evicted all allocs
   579  	var update []*structs.Allocation
   580  	for _, updateList := range plan.NodeUpdate {
   581  		update = append(update, updateList...)
   582  	}
   583  	if len(update) != len(allocs) {
   584  		t.Fatalf("bad: %#v", plan)
   585  	}
   586  
   587  	// Ensure the plan allocated
   588  	var planned []*structs.Allocation
   589  	for _, allocList := range plan.NodeAllocation {
   590  		planned = append(planned, allocList...)
   591  	}
   592  	if len(planned) != 10 {
   593  		t.Fatalf("bad: %#v", plan)
   594  	}
   595  
   596  	// Lookup the allocations by JobID
   597  	ws := memdb.NewWatchSet()
   598  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   599  	noErr(t, err)
   600  
   601  	// Ensure all allocations placed
   602  	out, _ = structs.FilterTerminalAllocs(out)
   603  	if len(out) != 10 {
   604  		t.Fatalf("bad: %#v", out)
   605  	}
   606  
   607  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   608  }
   609  
   610  func TestSystemSched_JobModify_Rolling(t *testing.T) {
   611  	h := NewHarness(t)
   612  
   613  	// Create some nodes
   614  	var nodes []*structs.Node
   615  	for i := 0; i < 10; i++ {
   616  		node := mock.Node()
   617  		nodes = append(nodes, node)
   618  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   619  	}
   620  
   621  	// Generate a fake job with allocations
   622  	job := mock.SystemJob()
   623  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   624  
   625  	var allocs []*structs.Allocation
   626  	for _, node := range nodes {
   627  		alloc := mock.Alloc()
   628  		alloc.Job = job
   629  		alloc.JobID = job.ID
   630  		alloc.NodeID = node.ID
   631  		alloc.Name = "my-job.web[0]"
   632  		allocs = append(allocs, alloc)
   633  	}
   634  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
   635  
   636  	// Update the job
   637  	job2 := mock.SystemJob()
   638  	job2.ID = job.ID
   639  	job2.Update = structs.UpdateStrategy{
   640  		Stagger:     30 * time.Second,
   641  		MaxParallel: 5,
   642  	}
   643  
   644  	// Update the task, such that it cannot be done in-place
   645  	job2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
   646  	noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
   647  
   648  	// Create a mock evaluation to deal with drain
   649  	eval := &structs.Evaluation{
   650  		Namespace:   structs.DefaultNamespace,
   651  		ID:          uuid.Generate(),
   652  		Priority:    50,
   653  		TriggeredBy: structs.EvalTriggerJobRegister,
   654  		JobID:       job.ID,
   655  		Status:      structs.EvalStatusPending,
   656  	}
   657  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   658  	// Process the evaluation
   659  	err := h.Process(NewSystemScheduler, eval)
   660  	if err != nil {
   661  		t.Fatalf("err: %v", err)
   662  	}
   663  
   664  	// Ensure a single plan
   665  	if len(h.Plans) != 1 {
   666  		t.Fatalf("bad: %#v", h.Plans)
   667  	}
   668  	plan := h.Plans[0]
   669  
   670  	// Ensure the plan evicted only MaxParallel
   671  	var update []*structs.Allocation
   672  	for _, updateList := range plan.NodeUpdate {
   673  		update = append(update, updateList...)
   674  	}
   675  	if len(update) != job2.Update.MaxParallel {
   676  		t.Fatalf("bad: %#v", plan)
   677  	}
   678  
   679  	// Ensure the plan allocated
   680  	var planned []*structs.Allocation
   681  	for _, allocList := range plan.NodeAllocation {
   682  		planned = append(planned, allocList...)
   683  	}
   684  	if len(planned) != job2.Update.MaxParallel {
   685  		t.Fatalf("bad: %#v", plan)
   686  	}
   687  
   688  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   689  
   690  	// Ensure a follow up eval was created
   691  	eval = h.Evals[0]
   692  	if eval.NextEval == "" {
   693  		t.Fatalf("missing next eval")
   694  	}
   695  
   696  	// Check for create
   697  	if len(h.CreateEvals) == 0 {
   698  		t.Fatalf("missing created eval")
   699  	}
   700  	create := h.CreateEvals[0]
   701  	if eval.NextEval != create.ID {
   702  		t.Fatalf("ID mismatch")
   703  	}
   704  	if create.PreviousEval != eval.ID {
   705  		t.Fatalf("missing previous eval")
   706  	}
   707  
   708  	if create.TriggeredBy != structs.EvalTriggerRollingUpdate {
   709  		t.Fatalf("bad: %#v", create)
   710  	}
   711  }
   712  
   713  func TestSystemSched_JobModify_InPlace(t *testing.T) {
   714  	h := NewHarness(t)
   715  
   716  	// Create some nodes
   717  	var nodes []*structs.Node
   718  	for i := 0; i < 10; i++ {
   719  		node := mock.Node()
   720  		nodes = append(nodes, node)
   721  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   722  	}
   723  
   724  	// Generate a fake job with allocations
   725  	job := mock.SystemJob()
   726  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   727  
   728  	var allocs []*structs.Allocation
   729  	for _, node := range nodes {
   730  		alloc := mock.Alloc()
   731  		alloc.Job = job
   732  		alloc.JobID = job.ID
   733  		alloc.NodeID = node.ID
   734  		alloc.Name = "my-job.web[0]"
   735  		allocs = append(allocs, alloc)
   736  	}
   737  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
   738  
   739  	// Update the job
   740  	job2 := mock.SystemJob()
   741  	job2.ID = job.ID
   742  	noErr(t, h.State.UpsertJob(h.NextIndex(), job2))
   743  
   744  	// Create a mock evaluation to deal with drain
   745  	eval := &structs.Evaluation{
   746  		Namespace:   structs.DefaultNamespace,
   747  		ID:          uuid.Generate(),
   748  		Priority:    50,
   749  		TriggeredBy: structs.EvalTriggerJobRegister,
   750  		JobID:       job.ID,
   751  		Status:      structs.EvalStatusPending,
   752  	}
   753  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   754  
   755  	// Process the evaluation
   756  	err := h.Process(NewSystemScheduler, eval)
   757  	if err != nil {
   758  		t.Fatalf("err: %v", err)
   759  	}
   760  
   761  	// Ensure a single plan
   762  	if len(h.Plans) != 1 {
   763  		t.Fatalf("bad: %#v", h.Plans)
   764  	}
   765  	plan := h.Plans[0]
   766  
   767  	// Ensure the plan did not evict any allocs
   768  	var update []*structs.Allocation
   769  	for _, updateList := range plan.NodeUpdate {
   770  		update = append(update, updateList...)
   771  	}
   772  	if len(update) != 0 {
   773  		t.Fatalf("bad: %#v", plan)
   774  	}
   775  
   776  	// Ensure the plan updated the existing allocs
   777  	var planned []*structs.Allocation
   778  	for _, allocList := range plan.NodeAllocation {
   779  		planned = append(planned, allocList...)
   780  	}
   781  	if len(planned) != 10 {
   782  		t.Fatalf("bad: %#v", plan)
   783  	}
   784  	for _, p := range planned {
   785  		if p.Job != job2 {
   786  			t.Fatalf("should update job")
   787  		}
   788  	}
   789  
   790  	// Lookup the allocations by JobID
   791  	ws := memdb.NewWatchSet()
   792  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   793  	noErr(t, err)
   794  
   795  	// Ensure all allocations placed
   796  	if len(out) != 10 {
   797  		t.Fatalf("bad: %#v", out)
   798  	}
   799  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   800  
   801  	// Verify the network did not change
   802  	rp := structs.Port{Label: "admin", Value: 5000}
   803  	for _, alloc := range out {
   804  		for _, resources := range alloc.TaskResources {
   805  			if resources.Networks[0].ReservedPorts[0] != rp {
   806  				t.Fatalf("bad: %#v", alloc)
   807  			}
   808  		}
   809  	}
   810  }
   811  
   812  func TestSystemSched_JobDeregister_Purged(t *testing.T) {
   813  	h := NewHarness(t)
   814  
   815  	// Create some nodes
   816  	var nodes []*structs.Node
   817  	for i := 0; i < 10; i++ {
   818  		node := mock.Node()
   819  		nodes = append(nodes, node)
   820  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   821  	}
   822  
   823  	// Generate a fake job with allocations
   824  	job := mock.SystemJob()
   825  
   826  	var allocs []*structs.Allocation
   827  	for _, node := range nodes {
   828  		alloc := mock.Alloc()
   829  		alloc.Job = job
   830  		alloc.JobID = job.ID
   831  		alloc.NodeID = node.ID
   832  		alloc.Name = "my-job.web[0]"
   833  		allocs = append(allocs, alloc)
   834  	}
   835  	for _, alloc := range allocs {
   836  		noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
   837  	}
   838  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
   839  
   840  	// Create a mock evaluation to deregister the job
   841  	eval := &structs.Evaluation{
   842  		Namespace:   structs.DefaultNamespace,
   843  		ID:          uuid.Generate(),
   844  		Priority:    50,
   845  		TriggeredBy: structs.EvalTriggerJobDeregister,
   846  		JobID:       job.ID,
   847  		Status:      structs.EvalStatusPending,
   848  	}
   849  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   850  
   851  	// Process the evaluation
   852  	err := h.Process(NewSystemScheduler, eval)
   853  	if err != nil {
   854  		t.Fatalf("err: %v", err)
   855  	}
   856  
   857  	// Ensure a single plan
   858  	if len(h.Plans) != 1 {
   859  		t.Fatalf("bad: %#v", h.Plans)
   860  	}
   861  	plan := h.Plans[0]
   862  
   863  	// Ensure the plan evicted the job from all nodes.
   864  	for _, node := range nodes {
   865  		if len(plan.NodeUpdate[node.ID]) != 1 {
   866  			t.Fatalf("bad: %#v", plan)
   867  		}
   868  	}
   869  
   870  	// Lookup the allocations by JobID
   871  	ws := memdb.NewWatchSet()
   872  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   873  	noErr(t, err)
   874  
   875  	// Ensure no remaining allocations
   876  	out, _ = structs.FilterTerminalAllocs(out)
   877  	if len(out) != 0 {
   878  		t.Fatalf("bad: %#v", out)
   879  	}
   880  
   881  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   882  }
   883  
   884  func TestSystemSched_JobDeregister_Stopped(t *testing.T) {
   885  	h := NewHarness(t)
   886  
   887  	// Create some nodes
   888  	var nodes []*structs.Node
   889  	for i := 0; i < 10; i++ {
   890  		node := mock.Node()
   891  		nodes = append(nodes, node)
   892  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   893  	}
   894  
   895  	// Generate a fake job with allocations
   896  	job := mock.SystemJob()
   897  	job.Stop = true
   898  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   899  
   900  	var allocs []*structs.Allocation
   901  	for _, node := range nodes {
   902  		alloc := mock.Alloc()
   903  		alloc.Job = job
   904  		alloc.JobID = job.ID
   905  		alloc.NodeID = node.ID
   906  		alloc.Name = "my-job.web[0]"
   907  		allocs = append(allocs, alloc)
   908  	}
   909  	for _, alloc := range allocs {
   910  		noErr(t, h.State.UpsertJobSummary(h.NextIndex(), mock.JobSummary(alloc.JobID)))
   911  	}
   912  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), allocs))
   913  
   914  	// Create a mock evaluation to deregister the job
   915  	eval := &structs.Evaluation{
   916  		Namespace:   structs.DefaultNamespace,
   917  		ID:          uuid.Generate(),
   918  		Priority:    50,
   919  		TriggeredBy: structs.EvalTriggerJobDeregister,
   920  		JobID:       job.ID,
   921  		Status:      structs.EvalStatusPending,
   922  	}
   923  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   924  
   925  	// Process the evaluation
   926  	err := h.Process(NewSystemScheduler, eval)
   927  	if err != nil {
   928  		t.Fatalf("err: %v", err)
   929  	}
   930  
   931  	// Ensure a single plan
   932  	if len(h.Plans) != 1 {
   933  		t.Fatalf("bad: %#v", h.Plans)
   934  	}
   935  	plan := h.Plans[0]
   936  
   937  	// Ensure the plan evicted the job from all nodes.
   938  	for _, node := range nodes {
   939  		if len(plan.NodeUpdate[node.ID]) != 1 {
   940  			t.Fatalf("bad: %#v", plan)
   941  		}
   942  	}
   943  
   944  	// Lookup the allocations by JobID
   945  	ws := memdb.NewWatchSet()
   946  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
   947  	noErr(t, err)
   948  
   949  	// Ensure no remaining allocations
   950  	out, _ = structs.FilterTerminalAllocs(out)
   951  	if len(out) != 0 {
   952  		t.Fatalf("bad: %#v", out)
   953  	}
   954  
   955  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
   956  }
   957  
   958  func TestSystemSched_NodeDown(t *testing.T) {
   959  	h := NewHarness(t)
   960  
   961  	// Register a down node
   962  	node := mock.Node()
   963  	node.Status = structs.NodeStatusDown
   964  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
   965  
   966  	// Generate a fake job allocated on that node.
   967  	job := mock.SystemJob()
   968  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
   969  
   970  	alloc := mock.Alloc()
   971  	alloc.Job = job
   972  	alloc.JobID = job.ID
   973  	alloc.NodeID = node.ID
   974  	alloc.Name = "my-job.web[0]"
   975  	alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
   976  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
   977  
   978  	// Create a mock evaluation to deal with drain
   979  	eval := &structs.Evaluation{
   980  		Namespace:   structs.DefaultNamespace,
   981  		ID:          uuid.Generate(),
   982  		Priority:    50,
   983  		TriggeredBy: structs.EvalTriggerNodeUpdate,
   984  		JobID:       job.ID,
   985  		NodeID:      node.ID,
   986  		Status:      structs.EvalStatusPending,
   987  	}
   988  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
   989  
   990  	// Process the evaluation
   991  	err := h.Process(NewSystemScheduler, eval)
   992  	if err != nil {
   993  		t.Fatalf("err: %v", err)
   994  	}
   995  
   996  	// Ensure a single plan
   997  	if len(h.Plans) != 1 {
   998  		t.Fatalf("bad: %#v", h.Plans)
   999  	}
  1000  	plan := h.Plans[0]
  1001  
  1002  	// Ensure the plan evicted all allocs
  1003  	if len(plan.NodeUpdate[node.ID]) != 1 {
  1004  		t.Fatalf("bad: %#v", plan)
  1005  	}
  1006  
  1007  	// Ensure the plan updated the allocation.
  1008  	var planned []*structs.Allocation
  1009  	for _, allocList := range plan.NodeUpdate {
  1010  		planned = append(planned, allocList...)
  1011  	}
  1012  	if len(planned) != 1 {
  1013  		t.Fatalf("bad: %#v", plan)
  1014  	}
  1015  
  1016  	// Ensure the allocations is stopped
  1017  	if p := planned[0]; p.DesiredStatus != structs.AllocDesiredStatusStop &&
  1018  		p.ClientStatus != structs.AllocClientStatusLost {
  1019  		t.Fatalf("bad: %#v", planned[0])
  1020  	}
  1021  
  1022  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
  1023  }
  1024  
  1025  func TestSystemSched_NodeDrain_Down(t *testing.T) {
  1026  	h := NewHarness(t)
  1027  
  1028  	// Register a draining node
  1029  	node := mock.Node()
  1030  	node.Drain = true
  1031  	node.Status = structs.NodeStatusDown
  1032  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1033  
  1034  	// Generate a fake job allocated on that node.
  1035  	job := mock.SystemJob()
  1036  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1037  
  1038  	alloc := mock.Alloc()
  1039  	alloc.Job = job
  1040  	alloc.JobID = job.ID
  1041  	alloc.NodeID = node.ID
  1042  	alloc.Name = "my-job.web[0]"
  1043  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
  1044  
  1045  	// Create a mock evaluation to deal with the node update
  1046  	eval := &structs.Evaluation{
  1047  		Namespace:   structs.DefaultNamespace,
  1048  		ID:          uuid.Generate(),
  1049  		Priority:    50,
  1050  		TriggeredBy: structs.EvalTriggerNodeUpdate,
  1051  		JobID:       job.ID,
  1052  		NodeID:      node.ID,
  1053  		Status:      structs.EvalStatusPending,
  1054  	}
  1055  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1056  
  1057  	// Process the evaluation
  1058  	err := h.Process(NewServiceScheduler, eval)
  1059  	if err != nil {
  1060  		t.Fatalf("err: %v", err)
  1061  	}
  1062  
  1063  	// Ensure a single plan
  1064  	if len(h.Plans) != 1 {
  1065  		t.Fatalf("bad: %#v", h.Plans)
  1066  	}
  1067  	plan := h.Plans[0]
  1068  
  1069  	// Ensure the plan evicted non terminal allocs
  1070  	if len(plan.NodeUpdate[node.ID]) != 1 {
  1071  		t.Fatalf("bad: %#v", plan)
  1072  	}
  1073  
  1074  	// Ensure that the allocation is marked as lost
  1075  	var lostAllocs []string
  1076  	for _, alloc := range plan.NodeUpdate[node.ID] {
  1077  		lostAllocs = append(lostAllocs, alloc.ID)
  1078  	}
  1079  	expected := []string{alloc.ID}
  1080  
  1081  	if !reflect.DeepEqual(lostAllocs, expected) {
  1082  		t.Fatalf("expected: %v, actual: %v", expected, lostAllocs)
  1083  	}
  1084  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
  1085  }
  1086  
  1087  func TestSystemSched_NodeDrain(t *testing.T) {
  1088  	h := NewHarness(t)
  1089  
  1090  	// Register a draining node
  1091  	node := mock.Node()
  1092  	node.Drain = true
  1093  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1094  
  1095  	// Generate a fake job allocated on that node.
  1096  	job := mock.SystemJob()
  1097  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1098  
  1099  	alloc := mock.Alloc()
  1100  	alloc.Job = job
  1101  	alloc.JobID = job.ID
  1102  	alloc.NodeID = node.ID
  1103  	alloc.Name = "my-job.web[0]"
  1104  	alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
  1105  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
  1106  
  1107  	// Create a mock evaluation to deal with drain
  1108  	eval := &structs.Evaluation{
  1109  		Namespace:   structs.DefaultNamespace,
  1110  		ID:          uuid.Generate(),
  1111  		Priority:    50,
  1112  		TriggeredBy: structs.EvalTriggerNodeUpdate,
  1113  		JobID:       job.ID,
  1114  		NodeID:      node.ID,
  1115  		Status:      structs.EvalStatusPending,
  1116  	}
  1117  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1118  
  1119  	// Process the evaluation
  1120  	err := h.Process(NewSystemScheduler, eval)
  1121  	if err != nil {
  1122  		t.Fatalf("err: %v", err)
  1123  	}
  1124  
  1125  	// Ensure a single plan
  1126  	if len(h.Plans) != 1 {
  1127  		t.Fatalf("bad: %#v", h.Plans)
  1128  	}
  1129  	plan := h.Plans[0]
  1130  
  1131  	// Ensure the plan evicted all allocs
  1132  	if len(plan.NodeUpdate[node.ID]) != 1 {
  1133  		t.Fatalf("bad: %#v", plan)
  1134  	}
  1135  
  1136  	// Ensure the plan updated the allocation.
  1137  	var planned []*structs.Allocation
  1138  	for _, allocList := range plan.NodeUpdate {
  1139  		planned = append(planned, allocList...)
  1140  	}
  1141  	if len(planned) != 1 {
  1142  		t.Log(len(planned))
  1143  		t.Fatalf("bad: %#v", plan)
  1144  	}
  1145  
  1146  	// Ensure the allocations is stopped
  1147  	if planned[0].DesiredStatus != structs.AllocDesiredStatusStop {
  1148  		t.Fatalf("bad: %#v", planned[0])
  1149  	}
  1150  
  1151  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
  1152  }
  1153  
  1154  func TestSystemSched_NodeUpdate(t *testing.T) {
  1155  	h := NewHarness(t)
  1156  
  1157  	// Register a node
  1158  	node := mock.Node()
  1159  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1160  
  1161  	// Generate a fake job allocated on that node.
  1162  	job := mock.SystemJob()
  1163  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1164  
  1165  	alloc := mock.Alloc()
  1166  	alloc.Job = job
  1167  	alloc.JobID = job.ID
  1168  	alloc.NodeID = node.ID
  1169  	alloc.Name = "my-job.web[0]"
  1170  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc}))
  1171  
  1172  	// Create a mock evaluation to deal
  1173  	eval := &structs.Evaluation{
  1174  		Namespace:   structs.DefaultNamespace,
  1175  		ID:          uuid.Generate(),
  1176  		Priority:    50,
  1177  		TriggeredBy: structs.EvalTriggerNodeUpdate,
  1178  		JobID:       job.ID,
  1179  		NodeID:      node.ID,
  1180  		Status:      structs.EvalStatusPending,
  1181  	}
  1182  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1183  
  1184  	// Process the evaluation
  1185  	err := h.Process(NewSystemScheduler, eval)
  1186  	if err != nil {
  1187  		t.Fatalf("err: %v", err)
  1188  	}
  1189  
  1190  	// Ensure that queued allocations is zero
  1191  	if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 {
  1192  		t.Fatalf("bad queued allocations: %#v", h.Evals[0].QueuedAllocations)
  1193  	}
  1194  
  1195  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
  1196  }
  1197  
  1198  func TestSystemSched_RetryLimit(t *testing.T) {
  1199  	h := NewHarness(t)
  1200  	h.Planner = &RejectPlan{h}
  1201  
  1202  	// Create some nodes
  1203  	for i := 0; i < 10; i++ {
  1204  		node := mock.Node()
  1205  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1206  	}
  1207  
  1208  	// Create a job
  1209  	job := mock.SystemJob()
  1210  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1211  
  1212  	// Create a mock evaluation to deregister the job
  1213  	eval := &structs.Evaluation{
  1214  		Namespace:   structs.DefaultNamespace,
  1215  		ID:          uuid.Generate(),
  1216  		Priority:    job.Priority,
  1217  		TriggeredBy: structs.EvalTriggerJobRegister,
  1218  		JobID:       job.ID,
  1219  		Status:      structs.EvalStatusPending,
  1220  	}
  1221  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1222  
  1223  	// Process the evaluation
  1224  	err := h.Process(NewSystemScheduler, eval)
  1225  	if err != nil {
  1226  		t.Fatalf("err: %v", err)
  1227  	}
  1228  
  1229  	// Ensure multiple plans
  1230  	if len(h.Plans) == 0 {
  1231  		t.Fatalf("bad: %#v", h.Plans)
  1232  	}
  1233  
  1234  	// Lookup the allocations by JobID
  1235  	ws := memdb.NewWatchSet()
  1236  	out, err := h.State.AllocsByJob(ws, job.Namespace, job.ID, false)
  1237  	noErr(t, err)
  1238  
  1239  	// Ensure no allocations placed
  1240  	if len(out) != 0 {
  1241  		t.Fatalf("bad: %#v", out)
  1242  	}
  1243  
  1244  	// Should hit the retry limit
  1245  	h.AssertEvalStatus(t, structs.EvalStatusFailed)
  1246  }
  1247  
  1248  // This test ensures that the scheduler doesn't increment the queued allocation
  1249  // count for a task group when allocations can't be created on currently
  1250  // available nodes because of constrain mismatches.
  1251  func TestSystemSched_Queued_With_Constraints(t *testing.T) {
  1252  	h := NewHarness(t)
  1253  
  1254  	// Register a node
  1255  	node := mock.Node()
  1256  	node.Attributes["kernel.name"] = "darwin"
  1257  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1258  
  1259  	// Generate a system job which can't be placed on the node
  1260  	job := mock.SystemJob()
  1261  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1262  
  1263  	// Create a mock evaluation to deal
  1264  	eval := &structs.Evaluation{
  1265  		Namespace:   structs.DefaultNamespace,
  1266  		ID:          uuid.Generate(),
  1267  		Priority:    50,
  1268  		TriggeredBy: structs.EvalTriggerNodeUpdate,
  1269  		JobID:       job.ID,
  1270  		NodeID:      node.ID,
  1271  		Status:      structs.EvalStatusPending,
  1272  	}
  1273  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1274  
  1275  	// Process the evaluation
  1276  	err := h.Process(NewSystemScheduler, eval)
  1277  	if err != nil {
  1278  		t.Fatalf("err: %v", err)
  1279  	}
  1280  
  1281  	// Ensure that queued allocations is zero
  1282  	if val, ok := h.Evals[0].QueuedAllocations["web"]; !ok || val != 0 {
  1283  		t.Fatalf("bad queued allocations: %#v", h.Evals[0].QueuedAllocations)
  1284  	}
  1285  }
  1286  
  1287  func TestSystemSched_ChainedAlloc(t *testing.T) {
  1288  	h := NewHarness(t)
  1289  
  1290  	// Create some nodes
  1291  	for i := 0; i < 10; i++ {
  1292  		node := mock.Node()
  1293  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1294  	}
  1295  
  1296  	// Create a job
  1297  	job := mock.SystemJob()
  1298  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1299  
  1300  	// Create a mock evaluation to register the job
  1301  	eval := &structs.Evaluation{
  1302  		Namespace:   structs.DefaultNamespace,
  1303  		ID:          uuid.Generate(),
  1304  		Priority:    job.Priority,
  1305  		TriggeredBy: structs.EvalTriggerJobRegister,
  1306  		JobID:       job.ID,
  1307  		Status:      structs.EvalStatusPending,
  1308  	}
  1309  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1310  	// Process the evaluation
  1311  	if err := h.Process(NewSystemScheduler, eval); err != nil {
  1312  		t.Fatalf("err: %v", err)
  1313  	}
  1314  
  1315  	var allocIDs []string
  1316  	for _, allocList := range h.Plans[0].NodeAllocation {
  1317  		for _, alloc := range allocList {
  1318  			allocIDs = append(allocIDs, alloc.ID)
  1319  		}
  1320  	}
  1321  	sort.Strings(allocIDs)
  1322  
  1323  	// Create a new harness to invoke the scheduler again
  1324  	h1 := NewHarnessWithState(t, h.State)
  1325  	job1 := mock.SystemJob()
  1326  	job1.ID = job.ID
  1327  	job1.TaskGroups[0].Tasks[0].Env = make(map[string]string)
  1328  	job1.TaskGroups[0].Tasks[0].Env["foo"] = "bar"
  1329  	noErr(t, h1.State.UpsertJob(h1.NextIndex(), job1))
  1330  
  1331  	// Insert two more nodes
  1332  	for i := 0; i < 2; i++ {
  1333  		node := mock.Node()
  1334  		noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1335  	}
  1336  
  1337  	// Create a mock evaluation to update the job
  1338  	eval1 := &structs.Evaluation{
  1339  		Namespace:   structs.DefaultNamespace,
  1340  		ID:          uuid.Generate(),
  1341  		Priority:    job1.Priority,
  1342  		TriggeredBy: structs.EvalTriggerJobRegister,
  1343  		JobID:       job1.ID,
  1344  		Status:      structs.EvalStatusPending,
  1345  	}
  1346  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval1}))
  1347  	// Process the evaluation
  1348  	if err := h1.Process(NewSystemScheduler, eval1); err != nil {
  1349  		t.Fatalf("err: %v", err)
  1350  	}
  1351  
  1352  	plan := h1.Plans[0]
  1353  
  1354  	// Collect all the chained allocation ids and the new allocations which
  1355  	// don't have any chained allocations
  1356  	var prevAllocs []string
  1357  	var newAllocs []string
  1358  	for _, allocList := range plan.NodeAllocation {
  1359  		for _, alloc := range allocList {
  1360  			if alloc.PreviousAllocation == "" {
  1361  				newAllocs = append(newAllocs, alloc.ID)
  1362  				continue
  1363  			}
  1364  			prevAllocs = append(prevAllocs, alloc.PreviousAllocation)
  1365  		}
  1366  	}
  1367  	sort.Strings(prevAllocs)
  1368  
  1369  	// Ensure that the new allocations has their corresponding original
  1370  	// allocation ids
  1371  	if !reflect.DeepEqual(prevAllocs, allocIDs) {
  1372  		t.Fatalf("expected: %v, actual: %v", len(allocIDs), len(prevAllocs))
  1373  	}
  1374  
  1375  	// Ensuring two new allocations don't have any chained allocations
  1376  	if len(newAllocs) != 2 {
  1377  		t.Fatalf("expected: %v, actual: %v", 2, len(newAllocs))
  1378  	}
  1379  }
  1380  
  1381  func TestSystemSched_PlanWithDrainedNode(t *testing.T) {
  1382  	h := NewHarness(t)
  1383  
  1384  	// Register two nodes with two different classes
  1385  	node := mock.Node()
  1386  	node.NodeClass = "green"
  1387  	node.Drain = true
  1388  	node.ComputeClass()
  1389  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1390  
  1391  	node2 := mock.Node()
  1392  	node2.NodeClass = "blue"
  1393  	node2.ComputeClass()
  1394  	noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
  1395  
  1396  	// Create a Job with two task groups, each constrained on node class
  1397  	job := mock.SystemJob()
  1398  	tg1 := job.TaskGroups[0]
  1399  	tg1.Constraints = append(tg1.Constraints,
  1400  		&structs.Constraint{
  1401  			LTarget: "${node.class}",
  1402  			RTarget: "green",
  1403  			Operand: "==",
  1404  		})
  1405  
  1406  	tg2 := tg1.Copy()
  1407  	tg2.Name = "web2"
  1408  	tg2.Constraints[0].RTarget = "blue"
  1409  	job.TaskGroups = append(job.TaskGroups, tg2)
  1410  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1411  
  1412  	// Create an allocation on each node
  1413  	alloc := mock.Alloc()
  1414  	alloc.Job = job
  1415  	alloc.JobID = job.ID
  1416  	alloc.NodeID = node.ID
  1417  	alloc.Name = "my-job.web[0]"
  1418  	alloc.DesiredTransition.Migrate = helper.BoolToPtr(true)
  1419  	alloc.TaskGroup = "web"
  1420  
  1421  	alloc2 := mock.Alloc()
  1422  	alloc2.Job = job
  1423  	alloc2.JobID = job.ID
  1424  	alloc2.NodeID = node2.ID
  1425  	alloc2.Name = "my-job.web2[0]"
  1426  	alloc2.TaskGroup = "web2"
  1427  	noErr(t, h.State.UpsertAllocs(h.NextIndex(), []*structs.Allocation{alloc, alloc2}))
  1428  
  1429  	// Create a mock evaluation to deal with drain
  1430  	eval := &structs.Evaluation{
  1431  		Namespace:   structs.DefaultNamespace,
  1432  		ID:          uuid.Generate(),
  1433  		Priority:    50,
  1434  		TriggeredBy: structs.EvalTriggerNodeUpdate,
  1435  		JobID:       job.ID,
  1436  		NodeID:      node.ID,
  1437  		Status:      structs.EvalStatusPending,
  1438  	}
  1439  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1440  
  1441  	// Process the evaluation
  1442  	err := h.Process(NewSystemScheduler, eval)
  1443  	if err != nil {
  1444  		t.Fatalf("err: %v", err)
  1445  	}
  1446  
  1447  	// Ensure a single plan
  1448  	if len(h.Plans) != 1 {
  1449  		t.Fatalf("bad: %#v", h.Plans)
  1450  	}
  1451  	plan := h.Plans[0]
  1452  
  1453  	// Ensure the plan evicted the alloc on the failed node
  1454  	planned := plan.NodeUpdate[node.ID]
  1455  	if len(planned) != 1 {
  1456  		t.Fatalf("bad: %#v", plan)
  1457  	}
  1458  
  1459  	// Ensure the plan didn't place
  1460  	if len(plan.NodeAllocation) != 0 {
  1461  		t.Fatalf("bad: %#v", plan)
  1462  	}
  1463  
  1464  	// Ensure the allocations is stopped
  1465  	if planned[0].DesiredStatus != structs.AllocDesiredStatusStop {
  1466  		t.Fatalf("bad: %#v", planned[0])
  1467  	}
  1468  
  1469  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
  1470  }
  1471  
  1472  func TestSystemSched_QueuedAllocsMultTG(t *testing.T) {
  1473  	h := NewHarness(t)
  1474  
  1475  	// Register two nodes with two different classes
  1476  	node := mock.Node()
  1477  	node.NodeClass = "green"
  1478  	node.ComputeClass()
  1479  	noErr(t, h.State.UpsertNode(h.NextIndex(), node))
  1480  
  1481  	node2 := mock.Node()
  1482  	node2.NodeClass = "blue"
  1483  	node2.ComputeClass()
  1484  	noErr(t, h.State.UpsertNode(h.NextIndex(), node2))
  1485  
  1486  	// Create a Job with two task groups, each constrained on node class
  1487  	job := mock.SystemJob()
  1488  	tg1 := job.TaskGroups[0]
  1489  	tg1.Constraints = append(tg1.Constraints,
  1490  		&structs.Constraint{
  1491  			LTarget: "${node.class}",
  1492  			RTarget: "green",
  1493  			Operand: "==",
  1494  		})
  1495  
  1496  	tg2 := tg1.Copy()
  1497  	tg2.Name = "web2"
  1498  	tg2.Constraints[0].RTarget = "blue"
  1499  	job.TaskGroups = append(job.TaskGroups, tg2)
  1500  	noErr(t, h.State.UpsertJob(h.NextIndex(), job))
  1501  
  1502  	// Create a mock evaluation to deal with drain
  1503  	eval := &structs.Evaluation{
  1504  		Namespace:   structs.DefaultNamespace,
  1505  		ID:          uuid.Generate(),
  1506  		Priority:    50,
  1507  		TriggeredBy: structs.EvalTriggerNodeUpdate,
  1508  		JobID:       job.ID,
  1509  		NodeID:      node.ID,
  1510  		Status:      structs.EvalStatusPending,
  1511  	}
  1512  	noErr(t, h.State.UpsertEvals(h.NextIndex(), []*structs.Evaluation{eval}))
  1513  
  1514  	// Process the evaluation
  1515  	err := h.Process(NewSystemScheduler, eval)
  1516  	if err != nil {
  1517  		t.Fatalf("err: %v", err)
  1518  	}
  1519  
  1520  	// Ensure a single plan
  1521  	if len(h.Plans) != 1 {
  1522  		t.Fatalf("bad: %#v", h.Plans)
  1523  	}
  1524  
  1525  	qa := h.Evals[0].QueuedAllocations
  1526  	if qa["web"] != 0 || qa["web2"] != 0 {
  1527  		t.Fatalf("bad queued allocations %#v", qa)
  1528  	}
  1529  
  1530  	h.AssertEvalStatus(t, structs.EvalStatusComplete)
  1531  }