github.com/ryanslade/nomad@v0.2.4-0.20160128061903-fc95782f2089/scheduler/util_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"log"
     6  	"os"
     7  	"reflect"
     8  	"testing"
     9  
    10  	"github.com/hashicorp/nomad/nomad/mock"
    11  	"github.com/hashicorp/nomad/nomad/state"
    12  	"github.com/hashicorp/nomad/nomad/structs"
    13  )
    14  
    15  func TestMaterializeTaskGroups(t *testing.T) {
    16  	job := mock.Job()
    17  	index := materializeTaskGroups(job)
    18  	if len(index) != 10 {
    19  		t.Fatalf("Bad: %#v", index)
    20  	}
    21  
    22  	for i := 0; i < 10; i++ {
    23  		name := fmt.Sprintf("my-job.web[%d]", i)
    24  		tg, ok := index[name]
    25  		if !ok {
    26  			t.Fatalf("bad")
    27  		}
    28  		if tg != job.TaskGroups[0] {
    29  			t.Fatalf("bad")
    30  		}
    31  	}
    32  }
    33  
    34  func TestDiffAllocs(t *testing.T) {
    35  	job := mock.Job()
    36  	required := materializeTaskGroups(job)
    37  
    38  	// The "old" job has a previous modify index
    39  	oldJob := new(structs.Job)
    40  	*oldJob = *job
    41  	oldJob.JobModifyIndex -= 1
    42  
    43  	tainted := map[string]bool{
    44  		"dead": true,
    45  		"zip":  false,
    46  	}
    47  
    48  	allocs := []*structs.Allocation{
    49  		// Update the 1st
    50  		&structs.Allocation{
    51  			ID:     structs.GenerateUUID(),
    52  			NodeID: "zip",
    53  			Name:   "my-job.web[0]",
    54  			Job:    oldJob,
    55  		},
    56  
    57  		// Ignore the 2rd
    58  		&structs.Allocation{
    59  			ID:     structs.GenerateUUID(),
    60  			NodeID: "zip",
    61  			Name:   "my-job.web[1]",
    62  			Job:    job,
    63  		},
    64  
    65  		// Evict 11th
    66  		&structs.Allocation{
    67  			ID:     structs.GenerateUUID(),
    68  			NodeID: "zip",
    69  			Name:   "my-job.web[10]",
    70  		},
    71  
    72  		// Migrate the 3rd
    73  		&structs.Allocation{
    74  			ID:     structs.GenerateUUID(),
    75  			NodeID: "dead",
    76  			Name:   "my-job.web[2]",
    77  		},
    78  	}
    79  
    80  	diff := diffAllocs(job, tainted, required, allocs)
    81  	place := diff.place
    82  	update := diff.update
    83  	migrate := diff.migrate
    84  	stop := diff.stop
    85  	ignore := diff.ignore
    86  
    87  	// We should update the first alloc
    88  	if len(update) != 1 || update[0].Alloc != allocs[0] {
    89  		t.Fatalf("bad: %#v", update)
    90  	}
    91  
    92  	// We should ignore the second alloc
    93  	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
    94  		t.Fatalf("bad: %#v", ignore)
    95  	}
    96  
    97  	// We should stop the 3rd alloc
    98  	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
    99  		t.Fatalf("bad: %#v", stop)
   100  	}
   101  
   102  	// We should migrate the 4rd alloc
   103  	if len(migrate) != 1 || migrate[0].Alloc != allocs[3] {
   104  		t.Fatalf("bad: %#v", migrate)
   105  	}
   106  
   107  	// We should place 7
   108  	if len(place) != 7 {
   109  		t.Fatalf("bad: %#v", place)
   110  	}
   111  }
   112  
   113  func TestDiffSystemAllocs(t *testing.T) {
   114  	job := mock.SystemJob()
   115  
   116  	// Create three alive nodes.
   117  	nodes := []*structs.Node{{ID: "foo"}, {ID: "bar"}, {ID: "baz"}}
   118  
   119  	// The "old" job has a previous modify index
   120  	oldJob := new(structs.Job)
   121  	*oldJob = *job
   122  	oldJob.JobModifyIndex -= 1
   123  
   124  	tainted := map[string]bool{
   125  		"dead": true,
   126  		"baz":  false,
   127  	}
   128  
   129  	allocs := []*structs.Allocation{
   130  		// Update allocation on baz
   131  		&structs.Allocation{
   132  			ID:     structs.GenerateUUID(),
   133  			NodeID: "baz",
   134  			Name:   "my-job.web[0]",
   135  			Job:    oldJob,
   136  		},
   137  
   138  		// Ignore allocation on bar
   139  		&structs.Allocation{
   140  			ID:     structs.GenerateUUID(),
   141  			NodeID: "bar",
   142  			Name:   "my-job.web[0]",
   143  			Job:    job,
   144  		},
   145  
   146  		// Stop allocation on dead.
   147  		&structs.Allocation{
   148  			ID:     structs.GenerateUUID(),
   149  			NodeID: "dead",
   150  			Name:   "my-job.web[0]",
   151  		},
   152  	}
   153  
   154  	diff := diffSystemAllocs(job, nodes, tainted, allocs)
   155  	place := diff.place
   156  	update := diff.update
   157  	migrate := diff.migrate
   158  	stop := diff.stop
   159  	ignore := diff.ignore
   160  
   161  	// We should update the first alloc
   162  	if len(update) != 1 || update[0].Alloc != allocs[0] {
   163  		t.Fatalf("bad: %#v", update)
   164  	}
   165  
   166  	// We should ignore the second alloc
   167  	if len(ignore) != 1 || ignore[0].Alloc != allocs[1] {
   168  		t.Fatalf("bad: %#v", ignore)
   169  	}
   170  
   171  	// We should stop the third alloc
   172  	if len(stop) != 1 || stop[0].Alloc != allocs[2] {
   173  		t.Fatalf("bad: %#v", stop)
   174  	}
   175  
   176  	// There should be no migrates.
   177  	if len(migrate) != 0 {
   178  		t.Fatalf("bad: %#v", migrate)
   179  	}
   180  
   181  	// We should place 1
   182  	if len(place) != 1 {
   183  		t.Fatalf("bad: %#v", place)
   184  	}
   185  }
   186  
   187  func TestReadyNodesInDCs(t *testing.T) {
   188  	state, err := state.NewStateStore(os.Stderr)
   189  	if err != nil {
   190  		t.Fatalf("err: %v", err)
   191  	}
   192  
   193  	node1 := mock.Node()
   194  	node2 := mock.Node()
   195  	node2.Datacenter = "dc2"
   196  	node3 := mock.Node()
   197  	node3.Datacenter = "dc2"
   198  	node3.Status = structs.NodeStatusDown
   199  	node4 := mock.Node()
   200  	node4.Drain = true
   201  
   202  	noErr(t, state.UpsertNode(1000, node1))
   203  	noErr(t, state.UpsertNode(1001, node2))
   204  	noErr(t, state.UpsertNode(1002, node3))
   205  	noErr(t, state.UpsertNode(1003, node4))
   206  
   207  	nodes, dc, err := readyNodesInDCs(state, []string{"dc1", "dc2"})
   208  	if err != nil {
   209  		t.Fatalf("err: %v", err)
   210  	}
   211  
   212  	if len(nodes) != 2 {
   213  		t.Fatalf("bad: %v", nodes)
   214  	}
   215  	if nodes[0].ID == node3.ID || nodes[1].ID == node3.ID {
   216  		t.Fatalf("Bad: %#v", nodes)
   217  	}
   218  	if count, ok := dc["dc1"]; !ok || count != 1 {
   219  		t.Fatalf("Bad: dc1 count %v", count)
   220  	}
   221  	if count, ok := dc["dc2"]; !ok || count != 1 {
   222  		t.Fatalf("Bad: dc2 count %v", count)
   223  	}
   224  }
   225  
   226  func TestRetryMax(t *testing.T) {
   227  	calls := 0
   228  	bad := func() (bool, error) {
   229  		calls += 1
   230  		return false, nil
   231  	}
   232  	err := retryMax(3, bad)
   233  	if err == nil {
   234  		t.Fatalf("should fail")
   235  	}
   236  	if calls != 3 {
   237  		t.Fatalf("mis match")
   238  	}
   239  
   240  	calls = 0
   241  	good := func() (bool, error) {
   242  		calls += 1
   243  		return true, nil
   244  	}
   245  	err = retryMax(3, good)
   246  	if err != nil {
   247  		t.Fatalf("err: %v", err)
   248  	}
   249  	if calls != 1 {
   250  		t.Fatalf("mis match")
   251  	}
   252  }
   253  
   254  func TestTaintedNodes(t *testing.T) {
   255  	state, err := state.NewStateStore(os.Stderr)
   256  	if err != nil {
   257  		t.Fatalf("err: %v", err)
   258  	}
   259  
   260  	node1 := mock.Node()
   261  	node2 := mock.Node()
   262  	node2.Datacenter = "dc2"
   263  	node3 := mock.Node()
   264  	node3.Datacenter = "dc2"
   265  	node3.Status = structs.NodeStatusDown
   266  	node4 := mock.Node()
   267  	node4.Drain = true
   268  	noErr(t, state.UpsertNode(1000, node1))
   269  	noErr(t, state.UpsertNode(1001, node2))
   270  	noErr(t, state.UpsertNode(1002, node3))
   271  	noErr(t, state.UpsertNode(1003, node4))
   272  
   273  	allocs := []*structs.Allocation{
   274  		&structs.Allocation{NodeID: node1.ID},
   275  		&structs.Allocation{NodeID: node2.ID},
   276  		&structs.Allocation{NodeID: node3.ID},
   277  		&structs.Allocation{NodeID: node4.ID},
   278  		&structs.Allocation{NodeID: "12345678-abcd-efab-cdef-123456789abc"},
   279  	}
   280  	tainted, err := taintedNodes(state, allocs)
   281  	if err != nil {
   282  		t.Fatalf("err: %v", err)
   283  	}
   284  
   285  	if len(tainted) != 5 {
   286  		t.Fatalf("bad: %v", tainted)
   287  	}
   288  	if tainted[node1.ID] || tainted[node2.ID] {
   289  		t.Fatalf("Bad: %v", tainted)
   290  	}
   291  	if !tainted[node3.ID] || !tainted[node4.ID] || !tainted["12345678-abcd-efab-cdef-123456789abc"] {
   292  		t.Fatalf("Bad: %v", tainted)
   293  	}
   294  }
   295  
   296  func TestShuffleNodes(t *testing.T) {
   297  	// Use a large number of nodes to make the probability of shuffling to the
   298  	// original order very low.
   299  	nodes := []*structs.Node{
   300  		mock.Node(),
   301  		mock.Node(),
   302  		mock.Node(),
   303  		mock.Node(),
   304  		mock.Node(),
   305  		mock.Node(),
   306  		mock.Node(),
   307  		mock.Node(),
   308  		mock.Node(),
   309  		mock.Node(),
   310  	}
   311  	orig := make([]*structs.Node, len(nodes))
   312  	copy(orig, nodes)
   313  	shuffleNodes(nodes)
   314  	if reflect.DeepEqual(nodes, orig) {
   315  		t.Fatalf("should not match")
   316  	}
   317  }
   318  
   319  func TestTasksUpdated(t *testing.T) {
   320  	j1 := mock.Job()
   321  	j2 := mock.Job()
   322  
   323  	if tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) {
   324  		t.Fatalf("bad")
   325  	}
   326  
   327  	j2.TaskGroups[0].Tasks[0].Config["command"] = "/bin/other"
   328  	if !tasksUpdated(j1.TaskGroups[0], j2.TaskGroups[0]) {
   329  		t.Fatalf("bad")
   330  	}
   331  
   332  	j3 := mock.Job()
   333  	j3.TaskGroups[0].Tasks[0].Name = "foo"
   334  	if !tasksUpdated(j1.TaskGroups[0], j3.TaskGroups[0]) {
   335  		t.Fatalf("bad")
   336  	}
   337  
   338  	j4 := mock.Job()
   339  	j4.TaskGroups[0].Tasks[0].Driver = "foo"
   340  	if !tasksUpdated(j1.TaskGroups[0], j4.TaskGroups[0]) {
   341  		t.Fatalf("bad")
   342  	}
   343  
   344  	j5 := mock.Job()
   345  	j5.TaskGroups[0].Tasks = append(j5.TaskGroups[0].Tasks,
   346  		j5.TaskGroups[0].Tasks[0])
   347  	if !tasksUpdated(j1.TaskGroups[0], j5.TaskGroups[0]) {
   348  		t.Fatalf("bad")
   349  	}
   350  
   351  	j6 := mock.Job()
   352  	j6.TaskGroups[0].Tasks[0].Resources.Networks[0].DynamicPorts = []structs.Port{{"http", 0}, {"https", 0}, {"admin", 0}}
   353  	if !tasksUpdated(j1.TaskGroups[0], j6.TaskGroups[0]) {
   354  		t.Fatalf("bad")
   355  	}
   356  
   357  	j7 := mock.Job()
   358  	j7.TaskGroups[0].Tasks[0].Env["NEW_ENV"] = "NEW_VALUE"
   359  	if !tasksUpdated(j1.TaskGroups[0], j7.TaskGroups[0]) {
   360  		t.Fatalf("bad")
   361  	}
   362  }
   363  
   364  func TestEvictAndPlace_LimitLessThanAllocs(t *testing.T) {
   365  	_, ctx := testContext(t)
   366  	allocs := []allocTuple{
   367  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   368  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   369  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   370  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   371  	}
   372  	diff := &diffResult{}
   373  
   374  	limit := 2
   375  	if !evictAndPlace(ctx, diff, allocs, "", &limit) {
   376  		t.Fatal("evictAndReplace() should have returned true")
   377  	}
   378  
   379  	if limit != 0 {
   380  		t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit)
   381  	}
   382  
   383  	if len(diff.place) != 2 {
   384  		t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   385  	}
   386  }
   387  
   388  func TestEvictAndPlace_LimitEqualToAllocs(t *testing.T) {
   389  	_, ctx := testContext(t)
   390  	allocs := []allocTuple{
   391  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   392  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   393  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   394  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   395  	}
   396  	diff := &diffResult{}
   397  
   398  	limit := 4
   399  	if evictAndPlace(ctx, diff, allocs, "", &limit) {
   400  		t.Fatal("evictAndReplace() should have returned false")
   401  	}
   402  
   403  	if limit != 0 {
   404  		t.Fatalf("evictAndReplace() should decremented limit; got %v; want 0", limit)
   405  	}
   406  
   407  	if len(diff.place) != 4 {
   408  		t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   409  	}
   410  }
   411  
   412  func TestSetStatus(t *testing.T) {
   413  	h := NewHarness(t)
   414  	logger := log.New(os.Stderr, "", log.LstdFlags)
   415  	eval := mock.Eval()
   416  	status := "a"
   417  	desc := "b"
   418  	if err := setStatus(logger, h, eval, nil, status, desc); err != nil {
   419  		t.Fatalf("setStatus() failed: %v", err)
   420  	}
   421  
   422  	if len(h.Evals) != 1 {
   423  		t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
   424  	}
   425  
   426  	newEval := h.Evals[0]
   427  	if newEval.ID != eval.ID || newEval.Status != status || newEval.StatusDescription != desc {
   428  		t.Fatalf("setStatus() submited invalid eval: %v", newEval)
   429  	}
   430  
   431  	h = NewHarness(t)
   432  	next := mock.Eval()
   433  	if err := setStatus(logger, h, eval, next, status, desc); err != nil {
   434  		t.Fatalf("setStatus() failed: %v", err)
   435  	}
   436  
   437  	if len(h.Evals) != 1 {
   438  		t.Fatalf("setStatus() didn't update plan: %v", h.Evals)
   439  	}
   440  
   441  	newEval = h.Evals[0]
   442  	if newEval.NextEval != next.ID {
   443  		t.Fatalf("setStatus() didn't set nextEval correctly: %v", newEval)
   444  	}
   445  }
   446  
   447  func TestInplaceUpdate_ChangedTaskGroup(t *testing.T) {
   448  	state, ctx := testContext(t)
   449  	eval := mock.Eval()
   450  	job := mock.Job()
   451  
   452  	node := mock.Node()
   453  	noErr(t, state.UpsertNode(1000, node))
   454  
   455  	// Register an alloc
   456  	alloc := &structs.Allocation{
   457  		ID:     structs.GenerateUUID(),
   458  		EvalID: eval.ID,
   459  		NodeID: node.ID,
   460  		JobID:  job.ID,
   461  		Job:    job,
   462  		Resources: &structs.Resources{
   463  			CPU:      2048,
   464  			MemoryMB: 2048,
   465  		},
   466  		DesiredStatus: structs.AllocDesiredStatusRun,
   467  	}
   468  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   469  	noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   470  
   471  	// Create a new task group that prevents in-place updates.
   472  	tg := &structs.TaskGroup{}
   473  	*tg = *job.TaskGroups[0]
   474  	task := &structs.Task{Name: "FOO"}
   475  	tg.Tasks = nil
   476  	tg.Tasks = append(tg.Tasks, task)
   477  
   478  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   479  	stack := NewGenericStack(false, ctx)
   480  
   481  	// Do the inplace update.
   482  	unplaced := inplaceUpdate(ctx, eval, job, stack, updates)
   483  
   484  	if len(unplaced) != 1 {
   485  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   486  	}
   487  
   488  	if len(ctx.plan.NodeAllocation) != 0 {
   489  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   490  	}
   491  }
   492  
   493  func TestInplaceUpdate_NoMatch(t *testing.T) {
   494  	state, ctx := testContext(t)
   495  	eval := mock.Eval()
   496  	job := mock.Job()
   497  
   498  	node := mock.Node()
   499  	noErr(t, state.UpsertNode(1000, node))
   500  
   501  	// Register an alloc
   502  	alloc := &structs.Allocation{
   503  		ID:     structs.GenerateUUID(),
   504  		EvalID: eval.ID,
   505  		NodeID: node.ID,
   506  		JobID:  job.ID,
   507  		Job:    job,
   508  		Resources: &structs.Resources{
   509  			CPU:      2048,
   510  			MemoryMB: 2048,
   511  		},
   512  		DesiredStatus: structs.AllocDesiredStatusRun,
   513  	}
   514  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   515  	noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   516  
   517  	// Create a new task group that requires too much resources.
   518  	tg := &structs.TaskGroup{}
   519  	*tg = *job.TaskGroups[0]
   520  	resource := &structs.Resources{CPU: 9999}
   521  	tg.Tasks[0].Resources = resource
   522  
   523  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   524  	stack := NewGenericStack(false, ctx)
   525  
   526  	// Do the inplace update.
   527  	unplaced := inplaceUpdate(ctx, eval, job, stack, updates)
   528  
   529  	if len(unplaced) != 1 {
   530  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   531  	}
   532  
   533  	if len(ctx.plan.NodeAllocation) != 0 {
   534  		t.Fatal("inplaceUpdate incorrectly did an inplace update")
   535  	}
   536  }
   537  
   538  func TestInplaceUpdate_Success(t *testing.T) {
   539  	state, ctx := testContext(t)
   540  	eval := mock.Eval()
   541  	job := mock.Job()
   542  
   543  	node := mock.Node()
   544  	noErr(t, state.UpsertNode(1000, node))
   545  
   546  	// Register an alloc
   547  	alloc := &structs.Allocation{
   548  		ID:        structs.GenerateUUID(),
   549  		EvalID:    eval.ID,
   550  		NodeID:    node.ID,
   551  		JobID:     job.ID,
   552  		Job:       job,
   553  		TaskGroup: job.TaskGroups[0].Name,
   554  		Resources: &structs.Resources{
   555  			CPU:      2048,
   556  			MemoryMB: 2048,
   557  		},
   558  		DesiredStatus: structs.AllocDesiredStatusRun,
   559  	}
   560  	alloc.TaskResources = map[string]*structs.Resources{"web": alloc.Resources}
   561  	alloc.PopulateServiceIDs()
   562  	noErr(t, state.UpsertAllocs(1001, []*structs.Allocation{alloc}))
   563  
   564  	webFeSrvID := alloc.Services["web-frontend"]
   565  	adminSrvID := alloc.Services["web-admin"]
   566  
   567  	if webFeSrvID == "" || adminSrvID == "" {
   568  		t.Fatal("Service ID needs to be generated for service")
   569  	}
   570  
   571  	// Create a new task group that updates the resources.
   572  	tg := &structs.TaskGroup{}
   573  	*tg = *job.TaskGroups[0]
   574  	resource := &structs.Resources{CPU: 737}
   575  	tg.Tasks[0].Resources = resource
   576  	newServices := []*structs.Service{
   577  		{
   578  			Name:      "dummy-service",
   579  			PortLabel: "http",
   580  		},
   581  		{
   582  			Name:      "dummy-service2",
   583  			PortLabel: "http",
   584  		},
   585  	}
   586  
   587  	// Delete service 2
   588  	tg.Tasks[0].Services = tg.Tasks[0].Services[:1]
   589  
   590  	// Add the new services
   591  	tg.Tasks[0].Services = append(tg.Tasks[0].Services, newServices...)
   592  
   593  	updates := []allocTuple{{Alloc: alloc, TaskGroup: tg}}
   594  	stack := NewGenericStack(false, ctx)
   595  	stack.SetJob(job)
   596  
   597  	// Do the inplace update.
   598  	unplaced := inplaceUpdate(ctx, eval, job, stack, updates)
   599  
   600  	if len(unplaced) != 0 {
   601  		t.Fatal("inplaceUpdate did not do an inplace update")
   602  	}
   603  
   604  	if len(ctx.plan.NodeAllocation) != 1 {
   605  		t.Fatal("inplaceUpdate did not do an inplace update")
   606  	}
   607  
   608  	// Get the alloc we inserted.
   609  	a := ctx.plan.NodeAllocation[alloc.NodeID][0]
   610  	if len(a.Services) != 3 {
   611  		t.Fatalf("Expected number of services: %v, Actual: %v", 3, len(a.Services))
   612  	}
   613  
   614  	// Test that the service id for the old service is still the same
   615  	if a.Services["web-frontend"] != webFeSrvID {
   616  		t.Fatalf("Expected service ID: %v, Actual: %v", webFeSrvID, a.Services["web-frontend"])
   617  	}
   618  
   619  	// Test that the map doesn't contain the service ID of the admin Service
   620  	// anymore
   621  	if _, ok := a.Services["web-admin"]; ok {
   622  		t.Fatal("Service shouldn't be present")
   623  	}
   624  }
   625  
   626  func TestEvictAndPlace_LimitGreaterThanAllocs(t *testing.T) {
   627  	_, ctx := testContext(t)
   628  	allocs := []allocTuple{
   629  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   630  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   631  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   632  		allocTuple{Alloc: &structs.Allocation{ID: structs.GenerateUUID()}},
   633  	}
   634  	diff := &diffResult{}
   635  
   636  	limit := 6
   637  	if evictAndPlace(ctx, diff, allocs, "", &limit) {
   638  		t.Fatal("evictAndReplace() should have returned false")
   639  	}
   640  
   641  	if limit != 2 {
   642  		t.Fatalf("evictAndReplace() should decremented limit; got %v; want 2", limit)
   643  	}
   644  
   645  	if len(diff.place) != 4 {
   646  		t.Fatalf("evictAndReplace() didn't insert into diffResult properly: %v", diff.place)
   647  	}
   648  }
   649  
   650  func TestTaskGroupConstraints(t *testing.T) {
   651  	constr := &structs.Constraint{RTarget: "bar"}
   652  	constr2 := &structs.Constraint{LTarget: "foo"}
   653  	constr3 := &structs.Constraint{Operand: "<"}
   654  
   655  	tg := &structs.TaskGroup{
   656  		Name:        "web",
   657  		Count:       10,
   658  		Constraints: []*structs.Constraint{constr},
   659  		Tasks: []*structs.Task{
   660  			&structs.Task{
   661  				Driver: "exec",
   662  				Resources: &structs.Resources{
   663  					CPU:      500,
   664  					MemoryMB: 256,
   665  				},
   666  				Constraints: []*structs.Constraint{constr2},
   667  			},
   668  			&structs.Task{
   669  				Driver: "docker",
   670  				Resources: &structs.Resources{
   671  					CPU:      500,
   672  					MemoryMB: 256,
   673  				},
   674  				Constraints: []*structs.Constraint{constr3},
   675  			},
   676  		},
   677  	}
   678  
   679  	// Build the expected values.
   680  	expConstr := []*structs.Constraint{constr, constr2, constr3}
   681  	expDrivers := map[string]struct{}{"exec": struct{}{}, "docker": struct{}{}}
   682  	expSize := &structs.Resources{
   683  		CPU:      1000,
   684  		MemoryMB: 512,
   685  	}
   686  
   687  	actConstrains := taskGroupConstraints(tg)
   688  	if !reflect.DeepEqual(actConstrains.constraints, expConstr) {
   689  		t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.constraints, expConstr)
   690  	}
   691  	if !reflect.DeepEqual(actConstrains.drivers, expDrivers) {
   692  		t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.drivers, expDrivers)
   693  	}
   694  	if !reflect.DeepEqual(actConstrains.size, expSize) {
   695  		t.Fatalf("taskGroupConstraints(%v) returned %v; want %v", tg, actConstrains.size, expSize)
   696  	}
   697  
   698  }
   699  
   700  func TestInitTaskState(t *testing.T) {
   701  	tg := &structs.TaskGroup{
   702  		Tasks: []*structs.Task{
   703  			&structs.Task{Name: "foo"},
   704  			&structs.Task{Name: "bar"},
   705  		},
   706  	}
   707  	expPending := map[string]*structs.TaskState{
   708  		"foo": &structs.TaskState{State: structs.TaskStatePending},
   709  		"bar": &structs.TaskState{State: structs.TaskStatePending},
   710  	}
   711  	expDead := map[string]*structs.TaskState{
   712  		"foo": &structs.TaskState{State: structs.TaskStateDead},
   713  		"bar": &structs.TaskState{State: structs.TaskStateDead},
   714  	}
   715  	actPending := initTaskState(tg, structs.TaskStatePending)
   716  	actDead := initTaskState(tg, structs.TaskStateDead)
   717  
   718  	if !(reflect.DeepEqual(expPending, actPending) && reflect.DeepEqual(expDead, actDead)) {
   719  		t.Fatal("Expected and actual not equal")
   720  	}
   721  }