github.com/dkerwin/nomad@v0.3.3-0.20160525181927-74554135514b/scheduler/util_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"os"
     7  	"reflect"
     8  	"testing"
     9  
    10  	"github.com/hashicorp/nomad/nomad/mock"
    11  	"github.com/hashicorp/nomad/nomad/state"
    12  	"github.com/hashicorp/nomad/nomad/structs"
    13  )
    14  
    15  // noErr is used to assert there are no errors
    16  func noErr(t *testing.T, err error) {
    17  	if err != nil {
    18  		t.Fatalf("err: %v", err)
    19  	}
    20  }
    21  
    22  func TestMaterializeTaskGroups(t *testing.T) {
    23  	job := mock.Job()
    24  	index := materializeTaskGroups(job)
    25  	if len(index) != 10 {
    26  		t.Fatalf("Bad: %#v", index)
    27  	}
    28  
    29  	for i := 0; i < 10; i++ {
    30  		name := fmt.Sprintf("my-job.web[%d]", i)
    31  		tg, ok := index[name]
    32  		if !ok {
    33  			t.Fatalf("bad")
    34  		}
    35  		if tg != job.TaskGroups[0] {
    36  			t.Fatalf("bad")
    37  		}
    38  	}
    39  }
    40  
    41  func TestDiffAllocs(t *testing.T) {
    42  	job := mock.Job()
    43  	required := materializeTaskGroups(job)
    44  
    45  	// The "old" job has a previous modify index
    46  	oldJob := new(structs.Job)
    47  	*oldJob = *job
    48  	oldJob.JobModifyIndex -= 1
    49  
    50  	tainted := map[string]bool{
    51  		"dead": true,
    52  		"zip":  false,
    53  	}
    54  
    55  	allocs := []*structs.Allocation{
    56  		// Update the 1st
    57  		&structs.Allocation{
    58  			ID:     structs.GenerateUUID(),
    59  			NodeID: "zip",
    60  			Name:   "my-job.web[0]",
    61  			Job:    oldJob,
    62  		},
    63  
    64  		// Ignore the 2rd
    65  		&structs.Allocation{
    66  			ID:     structs.GenerateUUID(),
    67  			NodeID: "zip",
    68  			Name:   "my-job.web[1]",
    69  			Job:    job,
    70  		},
    71  
    72  		// Evict 11th
    73  		&structs.Allocation{
    74  			ID:     structs.GenerateUUID(),
    75  			NodeID: "zip",
    76  			Name:   "my-job.web[10]",
    77  			Job:    oldJob,
    78  		},
    79  
    80  		// Migrate the 3rd
    81  		&structs.Allocation{
    82  			ID:     structs.GenerateUUID(),
    83  			NodeID: "dead",
    84  			Name:   "my-job.web[2]",
    85  			Job:    oldJob,
    86  		},
    87  	}
    88  
    89  	diff := diffAllocs(job, tainted, required, allocs)
    90  	place := diff.place
    91  	update := diff.update
    92  	migrate := diff.migrate
    93  	stop := diff.stop
    94  	ignore := diff.ignore
    95  
    96  	// We should update the first alloc
    97  	if len(update) != 1 || update[0].Alloc != allocs[0] {
    98  		t.Fatalf("bad: %#v", update)
    99  	}
   100  
   101  	// We should ignore the second alloc
   102  	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
   103  		t.Fatalf("bad: %#v", ignore)
   104  	}
   105  
   106  	// We should stop the 3rd alloc
   107  	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
   108  		t.Fatalf("bad: %#v", stop)
   109  	}
   110  
   111  	// We should migrate the 4rd alloc
   112  	if len(migrate) != 1 || migrate[0].Alloc != allocs[3] {
   113  		t.Fatalf("bad: %#v", migrate)
   114  	}
   115  
   116  	// We should place 7
   117  	if len(place) != 7 {
   118  		t.Fatalf("bad: %#v", place)
   119  	}
   120  }
   121  
   122  func TestDiffSystemAllocs(t *testing.T) {
   123  	job := mock.SystemJob()
   124  
   125  	// Create three alive nodes.
   126  	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}}
   127  
   128  	// The "old" job has a previous modify index
   129  	oldJob := new(structs.Job)
   130  	*oldJob = *job
   131  	oldJob.JobModifyIndex -= 1
   132  
   133  	tainted := map[string]bool{
   134  		"dead": true,
   135  		"baz":  false,
   136  	}
   137  
   138  	allocs := []*structs.Allocation{
   139  		// Update allocation on baz
   140  		&structs.Allocation{
   141  			ID:     structs.GenerateUUID(),
   142  			NodeID: "baz",
   143  			Name:   "my-job.web[0]",
   144  			Job:    oldJob,
   145  		},
   146  
   147  		// Ignore allocation on bar
   148  		&structs.Allocation{
   149  			ID:     structs.GenerateUUID(),
   150  			NodeID: "bar",
   151  			Name:   "my-job.web[0]",
   152  			Job:    job,
   153  		},
   154  
   155  		// Stop allocation on dead.
   156  		&structs.Allocation{
   157  			ID:     structs.GenerateUUID(),
   158  			NodeID: "dead",
   159  			Name:   "my-job.web[0]",
   160  			Job:    oldJob,
   161  		},
   162  	}
   163  
   164  	diff := diffSystemAllocs(job, nodes, tainted, allocs)
   165  	place := diff.place
   166  	update := diff.update
   167  	migrate := diff.migrate
   168  	stop := diff.stop
   169  	ignore := diff.ignore
   170  
   171  	// We should update the first alloc
   172  	if len(update) != 1 || update[0].Alloc != allocs[0] {
   173  		t.Fatalf("bad: %#v", update)
   174  	}
   175  
   176  	// We should ignore the second alloc
   177  	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
   178  		t.Fatalf("bad: %#v", ignore)
   179  	}
   180  
   181  	// We should stop the third alloc
   182  	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
   183  		t.Fatalf("bad: %#v", stop)
   184  	}
   185  
   186  	// There should be no migrates.
   187  	if len(migrate) != 0 {
   188  		t.Fatalf("bad: %#v", migrate)
   189  	}
   190  
   191  	// We should place 1
   192  	if len(place) != 1 {
   193  		t.Fatalf("bad: %#v", place)
   194  	}
   195  }
   196  
   197  func TestReadyNodesInDCs(t *testing.T) {
   198  	state, err := state.NewStateStore(os.Stderr)
   199  	if err != nil {
   200  		t.Fatalf("err: %v", err)
   201  	}
   202  
   203  	node1 := mock.Node()
   204  	node2 := mock.Node()
   205  	node2.Datacenter = "dc2"
   206  	node3 := mock.Node()
   207  	node3.Datacenter = "dc2"
   208  	node3.Status = structs.NodeStatusDown
   209  	node4 := mock.Node()
   210  	node4.Drain = true
   211  
   212  	noErr(t, state.UpsertNode(1000, node1))
   213  	noErr(t, state.UpsertNode(1001, node2))
   214  	noErr(t, state.UpsertNode(1002, node3))
   215  	noErr(t, state.UpsertNode(1003, node4))
   216  
   217  	nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"})
   218  	if err != nil {
   219  		t.Fatalf("err: %v", err)
   220  	}
   221  
   222  	if len(nodes) != 2 {
   223  		t.Fatalf("bad: %v", nodes)
   224  	}
   225  	if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID {
   226  		t.Fatalf("Bad: %#v", nodes)
   227  	}
   228  	if count, ok := dc["dc1"]; !ok || count != 1 {
   229  		t.Fatalf("Bad: dc1 count %v", count)
   230  	}
   231  	if count, ok := dc["dc2"]; !ok || count != 1 {
   232  		t.Fatalf("Bad: dc2 count %v", count)
   233  	}
   234  }
   235  
   236  func TestRetryMax(t *testing.T) {
   237  	calls := 0
   238  	bad := func() (bool, error) {
   239  		calls += 1
   240  		return false, nil
   241  	}
   242  	err := retryMax(3, bad, nil)
   243  	if err == nil {
   244  		t.Fatalf("should fail")
   245  	}
   246  	if calls != 3 {
   247  		t.Fatalf("mis match")
   248  	}
   249  
   250  	calls = 0
   251  	first := true
   252  	reset := func() bool {
   253  		if calls == 3 && first {
   254  			first = false
   255  			return true
   256  		}
   257  		return false
   258  	}
   259  	err = retryMax(3, bad, reset)
   260  	if err == nil {
   261  		t.Fatalf("should fail")
   262  	}
   263  	if calls != 6 {
   264  		t.Fatalf("mis match")
   265  	}
   266  
   267  	calls = 0
   268  	good := func() (bool, error) {
   269  		calls += 1
   270  		return true, nil
   271  	}
   272  	err = retryMax(3, good, nil)
   273  	if err != nil {
   274  		t.Fatalf("err: %v", err)
   275  	}
   276  	if calls != 1 {
   277  		t.Fatalf("mis match")
   278  	}
   279  }
   280  
   281  func TestTaintedNodes(t *testing.T) {
   282  	state, err := state.NewStateStore(os.Stderr)
   283  	if err != nil {
   284  		t.Fatalf("err: %v", err)
   285  	}
   286  
   287  	node1 := mock.Node()
   288  	node2 := mock.Node()
   289  	node2.Datacenter = "dc2"
   290  	node3 := mock.Node()
   291  	node3.Datacenter = "dc2"
   292  	node3.Status = structs.NodeStatusDown
   293  	node4 := mock.Node()
   294  	node4.Drain = true
   295  	noErr(t, state.UpsertNode(1000, node1))
   296  	noErr(t, state.UpsertNode(1001, node2))
   297  	noErr(t, state.UpsertNode(1002, node3))
   298  	noErr(t, state.UpsertNode(1003, node4))
   299  
   300  	allocs := []*structs.Allocation{
   301  		&structs.Allocation{NodeID: node1.ID},
   302  		&structs.Allocation{NodeID: node2.ID},
   303  		&structs.Allocation{NodeID: node3.ID},
   304  		&structs.Allocation{NodeID: node4.ID},
   305  		&structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
   306  	}
   307  	tainted, err := taintedNodes(state, allocs)
   308  	if err != nil {
   309  		t.Fatalf("err: %v", err)
   310  	}
   311  
   312  	if len(tainted) != 5 {
   313  		t.Fatalf("bad: %v", tainted)
   314  	}
   315  	if tainted[node1.ID] || tainted[node2.ID] {
   316  		t.Fatalf("Bad: %v", tainted)
   317  	}
   318  	if !tainted[node3.ID] || !tainted[node4.ID] || !tainted["12345678-abcd-efab-cdef-123456789abc"] {
   319  		t.Fatalf("Bad: %v", tainted)
   320  	}
   321  }
   322  
   323  func TestShuffleNodes(t *testing.T) {
   324  	// Use a large number of nodes to make the probability of shuffling to the
   325  	// original order very low.
   326  	nodes := []*structs.Node{
   327  		mock.Node(),
   328  		mock.Node(),
   329  		mock.Node(),
   330  		mock.Node(),
   331  		mock.Node(),
   332  		mock.Node(),
   333  		mock.Node(),
   334  		mock.Node(),
   335  		mock.Node(),
   336  		mock.Node(),
   337  	}
   338  	orig := make([]*structs.Node, len(nodes))
   339  	copy(orig, nodes)
   340  	shuffleNodes(nodes)
   341  	if reflect.DeepEqual(nodes, orig) {
   342  		t.Fatalf("should not match")
   343  	}
   344  }
   345  
   346  func TestTasksUpdated(t *testing.T) {
   347  	j1 := mock.Job()
   348  	j2 := mock.Job()
   349  
   350  	if tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) {
   351  		t.Fatalf("bad")
   352  	}
   353  
   354  	j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
   355  	if !tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) {
   356  		t.Fatalf("bad")
   357  	}
   358  
   359  	j3 := mock.Job()
   360  	j3.TaskGroups[0].Tasks[0].Name = "foo"
   361  	if !tasksUpdated(j1.TaskGroups[0], j3.TaskGroups[0]) {
   362  		t.Fatalf("bad")
   363  	}
   364  
   365  	j4 := mock.Job()
   366  	j4.TaskGroups[0].Tasks[0].Driver = "foo"
   367  	if !tasksUpdated(j1.TaskGroups[0], j4.TaskGroups[0]) {
   368  		t.Fatalf("bad")
   369  	}
   370  
   371  	j5 := mock.Job()
   372  	j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks,
   373  		j5.TaskGroups[0].Tasks[0])
   374  	if !tasksUpdated(j1.TaskGroups[0], j5.TaskGroups[0]) {
   375  		t.Fatalf("bad")
   376  	}
   377  
   378  	j6 := mock.Job()
   379  	j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}}
   380  	if !tasksUpdated(j1.TaskGroups[0], j6.TaskGroups[0]) {
   381  		t.Fatalf("bad")
   382  	}
   383  
   384  	j7 := mock.Job()
   385  	j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE"
   386  	if !tasksUpdated(j1.TaskGroups[0], j7.TaskGroups[0]) {
   387  		t.Fatalf("bad")
   388  	}
   389  
   390  	j8 := mock.Job()
   391  	j8.TaskGroups[0].Tasks[0].User = "foo"
   392  	if !tasksUpdated(j1.TaskGroups[0], j8.TaskGroups[0]) {
   393  		t.Fatalf("bad")
   394  	}
   395  
   396  	j9 := mock.Job()
   397  	j9.TaskGroups[0].Tasks[0].Artifacts = []*structs.TaskArtifact{
   398  		{
   399  			GetterSource: "http://foo.com/bar",
   400  		},
   401  	}
   402  	if !tasksUpdated(j1.TaskGroups[0], j9.TaskGroups[0]) {
   403  		t.Fatalf("bad")
   404  	}
   405  
   406  	j10 := mock.Job()
   407  	j10.TaskGroups[0].Tasks[0].Meta["baz"] = "boom"
   408  	if !tasksUpdated(j1.TaskGroups[0], j10.TaskGroups[0]) {
   409  		t.Fatalf("bad")
   410  	}
   411  
   412  	j11 := mock.Job()
   413  	j11.TaskGroups[0].Tasks[0].Resources.CPU = 1337
   414  	if !tasksUpdated(j1.TaskGroups[0], j11.TaskGroups[0]) {
   415  		t.Fatalf("bad")
   416  	}
   417  
   418  	j12 := mock.Job()
   419  	j12.TaskGroups[0].Tasks[0].Resources.Networks[0].MBits = 100
   420  	if !tasksUpdated(j1.TaskGroups[0], j12.TaskGroups[0]) {
   421  		t.Fatalf("bad")
   422  	}
   423  
   424  	j13 := mock.Job()
   425  	j13.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts[0].Label = "foobar"
   426  	if !tasksUpdated(j1.TaskGroups[0], j13.TaskGroups[0]) {
   427  		t.Fatalf("bad")
   428  	}
   429  
   430  	j14 := mock.Job()
   431  	j14.TaskGroups[0].Tasks[0].Resources.Networks[0].ReservedPorts = []structs.Port{{Label: "foo", Value: 1312}}
   432  	if !tasksUpdated(j1.TaskGroups[0], j14.TaskGroups[0]) {
   433  		t.Fatalf("bad")
   434  	}
   435  }
   436  
   437  func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) {
   438  	_, ctx := testContext(t)
   439  	allocs := []allocTuple{
   440  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   441  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   442  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   443  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   444  	}
   445  	diff := &diffResult{}
   446  
   447  	limit := 2
   448  	if !evictAndPlace(ctx, diff, allocs, "", &limit) {
   449  		t.Fatal("evictAndReplace() should have returned true")
   450  	}
   451  
   452  	if limit != 0 {
   453  		t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit)
   454  	}
   455  
   456  	if len(diff.place) != 2 {
   457  		t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   458  	}
   459  }
   460  
   461  func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) {
   462  	_, ctx := testContext(t)
   463  	allocs := []allocTuple{
   464  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   465  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   466  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   467  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   468  	}
   469  	diff := &diffResult{}
   470  
   471  	limit := 4
   472  	if evictAndPlace(ctx, diff, allocs, "", &limit) {
   473  		t.Fatal("evictAndReplace() should have returned false")
   474  	}
   475  
   476  	if limit != 0 {
   477  		t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit)
   478  	}
   479  
   480  	if len(diff.place) != 4 {
   481  		t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   482  	}
   483  }
   484  
   485  func TestSetStatus(t *testing.T) {
   486  	h := NewHarness(t)
   487  	logger := log.New(os.Stderr, "", log.LstdFlags)
   488  	eval := mock.Eval()
   489  	status := "a"
   490  	desc := "b"
   491  	if err := setStatus(logger, h, eval, nil, nil, status, desc); err != nil {
   492  		t.Fatalf("setStatus() failed: %v", err)
   493  	}
   494  
   495  	if len(h.Evals) != 1 {
   496  		t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
   497  	}
   498  
   499  	newEval := h.Evals[0]
   500  	if newEval.ID != eval.ID || newEval.Status != status || newEval.StatusDescription != desc {
   501  		t.Fatalf("setStatus() submited invalid eval: %v", newEval)
   502  	}
   503  
   504  	// Test next evals
   505  	h = NewHarness(t)
   506  	next := mock.Eval()
   507  	if err := setStatus(logger, h, eval, next, nil, status, desc); err != nil {
   508  		t.Fatalf("setStatus() failed: %v", err)
   509  	}
   510  
   511  	if len(h.Evals) != 1 {
   512  		t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
   513  	}
   514  
   515  	newEval = h.Evals[0]
   516  	if newEval.NextEval != next.ID {
   517  		t.Fatalf("setStatus() didn't set nextEval correctly: %v", newEval)
   518  	}
   519  
   520  	// Test blocked evals
   521  	h = NewHarness(t)
   522  	blocked := mock.Eval()
   523  	if err := setStatus(logger, h, eval, nil, blocked, status, desc); err != nil {
   524  		t.Fatalf("setStatus() failed: %v", err)
   525  	}
   526  
   527  	if len(h.Evals) != 1 {
   528  		t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
   529  	}
   530  
   531  	newEval = h.Evals[0]
   532  	if newEval.BlockedEval != blocked.ID {
   533  		t.Fatalf("setStatus() didn't set BlockedEval correctly: %v", newEval)
   534  	}
   535  }
   536  
   537  func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
   538  	state, ctx := testContext(t)
   539  	eval := mock.Eval()
   540  	job := mock.Job()
   541  
   542  	node := mock.Node()
   543  	noErr(t, state.UpsertNode(1000, node))
   544  
   545  	// Register an alloc
   546  	alloc := &structs.Allocation{
   547  		ID:     structs.GenerateUUID(),
   548  		EvalID: eval.ID,
   549  		NodeID: node.ID,
   550  		JobID:  job.ID,
   551  		Job:    job,
   552  		Resources: &structs.Resources{
   553  			CPU:      2048,
   554  			MemoryMB: 2048,
   555  		},
   556  		DesiredStatus: structs.AllocDesiredStatusRun,
   557  	}
   558  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   559  	noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   560  
   561  	// Create a new task group that prevents in-place updates.
   562  	tg := &structs.TaskGroup{}
   563  	*tg = *job.TaskGroups[0]
   564  	task := &structs.Task{Name: "FOO"}
   565  	tg.Tasks = nil
   566  	tg.Tasks = append(tg.Tasks, task)
   567  
   568  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   569  	stack := NewGenericStack(false, ctx)
   570  
   571  	// Do the inplace update.
   572  	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
   573  
   574  	if len(unplaced) != 1 || len(inplace) != 0 {
   575  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   576  	}
   577  
   578  	if len(ctx.plan.NodeAllocation) != 0 {
   579  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   580  	}
   581  }
   582  
   583  func TestInplaceUpdate_NoMatch(t *testing.T) {
   584  	state, ctx := testContext(t)
   585  	eval := mock.Eval()
   586  	job := mock.Job()
   587  
   588  	node := mock.Node()
   589  	noErr(t, state.UpsertNode(1000, node))
   590  
   591  	// Register an alloc
   592  	alloc := &structs.Allocation{
   593  		ID:     structs.GenerateUUID(),
   594  		EvalID: eval.ID,
   595  		NodeID: node.ID,
   596  		JobID:  job.ID,
   597  		Job:    job,
   598  		Resources: &structs.Resources{
   599  			CPU:      2048,
   600  			MemoryMB: 2048,
   601  		},
   602  		DesiredStatus: structs.AllocDesiredStatusRun,
   603  	}
   604  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   605  	noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   606  
   607  	// Create a new task group that requires too much resources.
   608  	tg := &structs.TaskGroup{}
   609  	*tg = *job.TaskGroups[0]
   610  	resource := &structs.Resources{CPU: 9999}
   611  	tg.Tasks[0].Resources = resource
   612  
   613  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   614  	stack := NewGenericStack(false, ctx)
   615  
   616  	// Do the inplace update.
   617  	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
   618  
   619  	if len(unplaced) != 1 || len(inplace) != 0 {
   620  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   621  	}
   622  
   623  	if len(ctx.plan.NodeAllocation) != 0 {
   624  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   625  	}
   626  }
   627  
   628  func TestInplaceUpdate_Success(t *testing.T) {
   629  	state, ctx := testContext(t)
   630  	eval := mock.Eval()
   631  	job := mock.Job()
   632  
   633  	node := mock.Node()
   634  	noErr(t, state.UpsertNode(1000, node))
   635  
   636  	// Register an alloc
   637  	alloc := &structs.Allocation{
   638  		ID:        structs.GenerateUUID(),
   639  		EvalID:    eval.ID,
   640  		NodeID:    node.ID,
   641  		JobID:     job.ID,
   642  		Job:       job,
   643  		TaskGroup: job.TaskGroups[0].Name,
   644  		Resources: &structs.Resources{
   645  			CPU:      2048,
   646  			MemoryMB: 2048,
   647  		},
   648  		DesiredStatus: structs.AllocDesiredStatusRun,
   649  	}
   650  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   651  	alloc.PopulateServiceIDs(job.TaskGroups[0])
   652  	noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   653  
   654  	webFeSrvID := alloc.Services["web-frontend"]
   655  	adminSrvID := alloc.Services["web-admin"]
   656  
   657  	if webFeSrvID == "" || adminSrvID == "" {
   658  		t.Fatal("Service ID needs to be generated for service")
   659  	}
   660  
   661  	// Create a new task group that updates the resources.
   662  	tg := &structs.TaskGroup{}
   663  	*tg = *job.TaskGroups[0]
   664  	resource := &structs.Resources{CPU: 737}
   665  	tg.Tasks[0].Resources = resource
   666  	newServices := []*structs.Service{
   667  		{
   668  			Name:      "dummy-service",
   669  			PortLabel: "http",
   670  		},
   671  		{
   672  			Name:      "dummy-service2",
   673  			PortLabel: "http",
   674  		},
   675  	}
   676  
   677  	// Delete service 2
   678  	tg.Tasks[0].Services = tg.Tasks[0].Services[:1]
   679  
   680  	// Add the new services
   681  	tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...)
   682  
   683  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   684  	stack := NewGenericStack(false, ctx)
   685  	stack.SetJob(job)
   686  
   687  	// Do the inplace update.
   688  	unplaced, inplace := inplaceUpdate(ctx, eval, job, stack, updates)
   689  
   690  	if len(unplaced) != 0 || len(inplace) != 1 {
   691  		t.Fatal("inplaceUpdate did not do an inplace update")
   692  	}
   693  
   694  	if len(ctx.plan.NodeAllocation) != 1 {
   695  		t.Fatal("inplaceUpdate did not do an inplace update")
   696  	}
   697  
   698  	if inplace[0].Alloc.ID != alloc.ID {
   699  		t.Fatalf("inplaceUpdate returned the wrong, inplace updated alloc: %#v", inplace)
   700  	}
   701  
   702  	// Get the alloc we inserted.
   703  	a := ctx.plan.NodeAllocation[alloc.NodeID][0]
   704  	if len(a.Services) != 3 {
   705  		t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Services))
   706  	}
   707  
   708  	// Test that the service id for the old service is still the same
   709  	if a.Services["web-frontend"] != webFeSrvID {
   710  		t.Fatalf("Expected service ID: %v, Actual: %v", webFeSrvID, a.Services["web-frontend"])
   711  	}
   712  
   713  	// Test that the map doesn't contain the service ID of the admin Service
   714  	// anymore
   715  	if _, ok := a.Services["web-admin"]; ok {
   716  		t.Fatal("Service shouldn't be present")
   717  	}
   718  }
   719  
   720  func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) {
   721  	_, ctx := testContext(t)
   722  	allocs := []allocTuple{
   723  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   724  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   725  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   726  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   727  	}
   728  	diff := &diffResult{}
   729  
   730  	limit := 6
   731  	if evictAndPlace(ctx, diff, allocs, "", &limit) {
   732  		t.Fatal("evictAndReplace() should have returned false")
   733  	}
   734  
   735  	if limit != 2 {
   736  		t.Fatalf("evictAndReplace() should decremented limit; got %v; want 2", limit)
   737  	}
   738  
   739  	if len(diff.place) != 4 {
   740  		t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   741  	}
   742  }
   743  
   744  func TestTaskGroupConstraints(t *testing.T) {
   745  	constr := &structs.Constraint{RTarget: "bar"}
   746  	constr2 := &structs.Constraint{LTarget: "foo"}
   747  	constr3 := &structs.Constraint{Operand: "<"}
   748  
   749  	tg := &structs.TaskGroup{
   750  		Name:        "web",
   751  		Count:       10,
   752  		Constraints: []*structs.Constraint{constr},
   753  		Tasks: []*structs.Task{
   754  			&structs.Task{
   755  				Driver: "exec",
   756  				Resources: &structs.Resources{
   757  					CPU:      500,
   758  					MemoryMB: 256,
   759  				},
   760  				Constraints: []*structs.Constraint{constr2},
   761  			},
   762  			&structs.Task{
   763  				Driver: "docker",
   764  				Resources: &structs.Resources{
   765  					CPU:      500,
   766  					MemoryMB: 256,
   767  				},
   768  				Constraints: []*structs.Constraint{constr3},
   769  			},
   770  		},
   771  	}
   772  
   773  	// Build the expected values.
   774  	expConstr := []*structs.Constraint{constr, constr2, constr3}
   775  	expDrivers := map[string]struct{}{"exec": struct{}{}, "docker": struct{}{}}
   776  	expSize := &structs.Resources{
   777  		CPU:      1000,
   778  		MemoryMB: 512,
   779  	}
   780  
   781  	actConstrains := taskGroupConstraints(tg)
   782  	if !reflect.DeepEqual(actConstrains.constraints, expConstr) {
   783  		t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
   784  	}
   785  	if !reflect.DeepEqual(actConstrains.drivers, expDrivers) {
   786  		t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
   787  	}
   788  	if !reflect.DeepEqual(actConstrains.size, expSize) {
   789  		t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.size, expSize)
   790  	}
   791  
   792  }
   793  
   794  func TestProgressMade(t *testing.T) {
   795  	noopPlan := &structs.PlanResult{}
   796  	if progressMade(nil) || progressMade(noopPlan) {
   797  		t.Fatal("no progress plan marked as making progress")
   798  	}
   799  
   800  	m := map[string][]*structs.Allocation{
   801  		"foo": []*structs.Allocation{mock.Alloc()},
   802  	}
   803  	both := &structs.PlanResult{
   804  		NodeAllocation: m,
   805  		NodeUpdate:     m,
   806  	}
   807  	update := &structs.PlanResult{NodeUpdate: m}
   808  	alloc := &structs.PlanResult{NodeAllocation: m}
   809  	if !(progressMade(both) && progressMade(update) && progressMade(alloc)) {
   810  		t.Fatal("bad")
   811  	}
   812  }
   813  
   814  func TestDesiredUpdates(t *testing.T) {
   815  	tg1 := &structs.TaskGroup{Name: "foo"}
   816  	tg2 := &structs.TaskGroup{Name: "bar"}
   817  	a2 := &structs.Allocation{TaskGroup: "bar"}
   818  
   819  	place := []allocTuple{
   820  		allocTuple{TaskGroup: tg1},
   821  		allocTuple{TaskGroup: tg1},
   822  		allocTuple{TaskGroup: tg1},
   823  		allocTuple{TaskGroup: tg2},
   824  	}
   825  	stop := []allocTuple{
   826  		allocTuple{TaskGroup: tg2, Alloc: a2},
   827  		allocTuple{TaskGroup: tg2, Alloc: a2},
   828  	}
   829  	ignore := []allocTuple{
   830  		allocTuple{TaskGroup: tg1},
   831  	}
   832  	migrate := []allocTuple{
   833  		allocTuple{TaskGroup: tg2},
   834  	}
   835  	inplace := []allocTuple{
   836  		allocTuple{TaskGroup: tg1},
   837  		allocTuple{TaskGroup: tg1},
   838  	}
   839  	destructive := []allocTuple{
   840  		allocTuple{TaskGroup: tg1},
   841  		allocTuple{TaskGroup: tg2},
   842  		allocTuple{TaskGroup: tg2},
   843  	}
   844  	diff := &diffResult{
   845  		place:   place,
   846  		stop:    stop,
   847  		ignore:  ignore,
   848  		migrate: migrate,
   849  	}
   850  
   851  	expected := map[string]*structs.DesiredUpdates{
   852  		"foo": {
   853  			Place:             3,
   854  			Ignore:            1,
   855  			InPlaceUpdate:     2,
   856  			DestructiveUpdate: 1,
   857  		},
   858  		"bar": {
   859  			Place:             1,
   860  			Stop:              2,
   861  			Migrate:           1,
   862  			DestructiveUpdate: 2,
   863  		},
   864  	}
   865  
   866  	desired := desiredUpdates(diff, inplace, destructive)
   867  	if !reflect.DeepEqual(desired, expected) {
   868  		t.Fatalf("desiredUpdates() returned %#v; want %#v", desired, expected)
   869  	}
   870  }