github.com/anuvu/nomad@v0.8.7-atom1/scheduler/feasible_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/hashicorp/nomad/helper/uuid"
    10  	"github.com/hashicorp/nomad/nomad/mock"
    11  	"github.com/hashicorp/nomad/nomad/structs"
    12  	"github.com/stretchr/testify/require"
    13  )
    14  
    15  func TestStaticIterator_Reset(t *testing.T) {
    16  	_, ctx := testContext(t)
    17  	var nodes []*structs.Node
    18  	for i := 0; i < 3; i++ {
    19  		nodes = append(nodes, mock.Node())
    20  	}
    21  	static := NewStaticIterator(ctx, nodes)
    22  
    23  	for i := 0; i < 6; i++ {
    24  		static.Reset()
    25  		for j := 0; j < i; j++ {
    26  			static.Next()
    27  		}
    28  		static.Reset()
    29  
    30  		out := collectFeasible(static)
    31  		if len(out) != len(nodes) {
    32  			t.Fatalf("out: %#v", out)
    33  			t.Fatalf("missing nodes %d %#v", i, static)
    34  		}
    35  
    36  		ids := make(map[string]struct{})
    37  		for _, o := range out {
    38  			if _, ok := ids[o.ID]; ok {
    39  				t.Fatalf("duplicate")
    40  			}
    41  			ids[o.ID] = struct{}{}
    42  		}
    43  	}
    44  }
    45  
    46  func TestStaticIterator_SetNodes(t *testing.T) {
    47  	_, ctx := testContext(t)
    48  	var nodes []*structs.Node
    49  	for i := 0; i < 3; i++ {
    50  		nodes = append(nodes, mock.Node())
    51  	}
    52  	static := NewStaticIterator(ctx, nodes)
    53  
    54  	newNodes := []*structs.Node{mock.Node()}
    55  	static.SetNodes(newNodes)
    56  
    57  	out := collectFeasible(static)
    58  	if !reflect.DeepEqual(out, newNodes) {
    59  		t.Fatalf("bad: %#v", out)
    60  	}
    61  }
    62  
    63  func TestRandomIterator(t *testing.T) {
    64  	_, ctx := testContext(t)
    65  	var nodes []*structs.Node
    66  	for i := 0; i < 10; i++ {
    67  		nodes = append(nodes, mock.Node())
    68  	}
    69  
    70  	nc := make([]*structs.Node, len(nodes))
    71  	copy(nc, nodes)
    72  	rand := NewRandomIterator(ctx, nc)
    73  
    74  	out := collectFeasible(rand)
    75  	if len(out) != len(nodes) {
    76  		t.Fatalf("missing nodes")
    77  	}
    78  	if reflect.DeepEqual(out, nodes) {
    79  		t.Fatalf("same order")
    80  	}
    81  }
    82  
    83  func TestDriverChecker(t *testing.T) {
    84  	_, ctx := testContext(t)
    85  	nodes := []*structs.Node{
    86  		mock.Node(),
    87  		mock.Node(),
    88  		mock.Node(),
    89  		mock.Node(),
    90  	}
    91  	nodes[0].Attributes["driver.foo"] = "1"
    92  	nodes[1].Attributes["driver.foo"] = "0"
    93  	nodes[2].Attributes["driver.foo"] = "true"
    94  	nodes[3].Attributes["driver.foo"] = "False"
    95  
    96  	drivers := map[string]struct{}{
    97  		"exec": {},
    98  		"foo":  {},
    99  	}
   100  	checker := NewDriverChecker(ctx, drivers)
   101  	cases := []struct {
   102  		Node   *structs.Node
   103  		Result bool
   104  	}{
   105  		{
   106  			Node:   nodes[0],
   107  			Result: true,
   108  		},
   109  		{
   110  			Node:   nodes[1],
   111  			Result: false,
   112  		},
   113  		{
   114  			Node:   nodes[2],
   115  			Result: true,
   116  		},
   117  		{
   118  			Node:   nodes[3],
   119  			Result: false,
   120  		},
   121  	}
   122  
   123  	for i, c := range cases {
   124  		if act := checker.Feasible(c.Node); act != c.Result {
   125  			t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
   126  		}
   127  	}
   128  }
   129  
   130  func Test_HealthChecks(t *testing.T) {
   131  	require := require.New(t)
   132  	_, ctx := testContext(t)
   133  
   134  	nodes := []*structs.Node{
   135  		mock.Node(),
   136  		mock.Node(),
   137  		mock.Node(),
   138  	}
   139  	for _, e := range nodes {
   140  		e.Drivers = make(map[string]*structs.DriverInfo)
   141  	}
   142  	nodes[0].Attributes["driver.foo"] = "1"
   143  	nodes[0].Drivers["foo"] = &structs.DriverInfo{
   144  		Detected:          true,
   145  		Healthy:           true,
   146  		HealthDescription: "running",
   147  		UpdateTime:        time.Now(),
   148  	}
   149  	nodes[1].Attributes["driver.bar"] = "1"
   150  	nodes[1].Drivers["bar"] = &structs.DriverInfo{
   151  		Detected:          true,
   152  		Healthy:           false,
   153  		HealthDescription: "not running",
   154  		UpdateTime:        time.Now(),
   155  	}
   156  	nodes[2].Attributes["driver.baz"] = "0"
   157  	nodes[2].Drivers["baz"] = &structs.DriverInfo{
   158  		Detected:          false,
   159  		Healthy:           false,
   160  		HealthDescription: "not running",
   161  		UpdateTime:        time.Now(),
   162  	}
   163  
   164  	testDrivers := []string{"foo", "bar", "baz"}
   165  	cases := []struct {
   166  		Node   *structs.Node
   167  		Result bool
   168  	}{
   169  		{
   170  			Node:   nodes[0],
   171  			Result: true,
   172  		},
   173  		{
   174  			Node:   nodes[1],
   175  			Result: false,
   176  		},
   177  		{
   178  			Node:   nodes[2],
   179  			Result: false,
   180  		},
   181  	}
   182  
   183  	for i, c := range cases {
   184  		drivers := map[string]struct{}{
   185  			testDrivers[i]: {},
   186  		}
   187  		checker := NewDriverChecker(ctx, drivers)
   188  		act := checker.Feasible(c.Node)
   189  		require.Equal(act, c.Result)
   190  	}
   191  }
   192  
   193  func TestConstraintChecker(t *testing.T) {
   194  	_, ctx := testContext(t)
   195  	nodes := []*structs.Node{
   196  		mock.Node(),
   197  		mock.Node(),
   198  		mock.Node(),
   199  		mock.Node(),
   200  	}
   201  
   202  	nodes[0].Attributes["kernel.name"] = "freebsd"
   203  	nodes[1].Datacenter = "dc2"
   204  	nodes[2].NodeClass = "large"
   205  
   206  	constraints := []*structs.Constraint{
   207  		{
   208  			Operand: "=",
   209  			LTarget: "${node.datacenter}",
   210  			RTarget: "dc1",
   211  		},
   212  		{
   213  			Operand: "is",
   214  			LTarget: "${attr.kernel.name}",
   215  			RTarget: "linux",
   216  		},
   217  		{
   218  			Operand: "is",
   219  			LTarget: "${node.class}",
   220  			RTarget: "large",
   221  		},
   222  	}
   223  	checker := NewConstraintChecker(ctx, constraints)
   224  	cases := []struct {
   225  		Node   *structs.Node
   226  		Result bool
   227  	}{
   228  		{
   229  			Node:   nodes[0],
   230  			Result: false,
   231  		},
   232  		{
   233  			Node:   nodes[1],
   234  			Result: false,
   235  		},
   236  		{
   237  			Node:   nodes[2],
   238  			Result: true,
   239  		},
   240  	}
   241  
   242  	for i, c := range cases {
   243  		if act := checker.Feasible(c.Node); act != c.Result {
   244  			t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
   245  		}
   246  	}
   247  }
   248  
   249  func TestResolveConstraintTarget(t *testing.T) {
   250  	type tcase struct {
   251  		target string
   252  		node   *structs.Node
   253  		val    interface{}
   254  		result bool
   255  	}
   256  	node := mock.Node()
   257  	cases := []tcase{
   258  		{
   259  			target: "${node.unique.id}",
   260  			node:   node,
   261  			val:    node.ID,
   262  			result: true,
   263  		},
   264  		{
   265  			target: "${node.datacenter}",
   266  			node:   node,
   267  			val:    node.Datacenter,
   268  			result: true,
   269  		},
   270  		{
   271  			target: "${node.unique.name}",
   272  			node:   node,
   273  			val:    node.Name,
   274  			result: true,
   275  		},
   276  		{
   277  			target: "${node.class}",
   278  			node:   node,
   279  			val:    node.NodeClass,
   280  			result: true,
   281  		},
   282  		{
   283  			target: "${node.foo}",
   284  			node:   node,
   285  			result: false,
   286  		},
   287  		{
   288  			target: "${attr.kernel.name}",
   289  			node:   node,
   290  			val:    node.Attributes["kernel.name"],
   291  			result: true,
   292  		},
   293  		{
   294  			target: "${attr.rand}",
   295  			node:   node,
   296  			result: false,
   297  		},
   298  		{
   299  			target: "${meta.pci-dss}",
   300  			node:   node,
   301  			val:    node.Meta["pci-dss"],
   302  			result: true,
   303  		},
   304  		{
   305  			target: "${meta.rand}",
   306  			node:   node,
   307  			result: false,
   308  		},
   309  	}
   310  
   311  	for _, tc := range cases {
   312  		res, ok := resolveConstraintTarget(tc.target, tc.node)
   313  		if ok != tc.result {
   314  			t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
   315  		}
   316  		if ok && !reflect.DeepEqual(res, tc.val) {
   317  			t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
   318  		}
   319  	}
   320  }
   321  
   322  func TestCheckConstraint(t *testing.T) {
   323  	type tcase struct {
   324  		op         string
   325  		lVal, rVal interface{}
   326  		result     bool
   327  	}
   328  	cases := []tcase{
   329  		{
   330  			op:   "=",
   331  			lVal: "foo", rVal: "foo",
   332  			result: true,
   333  		},
   334  		{
   335  			op:   "is",
   336  			lVal: "foo", rVal: "foo",
   337  			result: true,
   338  		},
   339  		{
   340  			op:   "==",
   341  			lVal: "foo", rVal: "foo",
   342  			result: true,
   343  		},
   344  		{
   345  			op:   "!=",
   346  			lVal: "foo", rVal: "foo",
   347  			result: false,
   348  		},
   349  		{
   350  			op:   "!=",
   351  			lVal: "foo", rVal: "bar",
   352  			result: true,
   353  		},
   354  		{
   355  			op:   "not",
   356  			lVal: "foo", rVal: "bar",
   357  			result: true,
   358  		},
   359  		{
   360  			op:   structs.ConstraintVersion,
   361  			lVal: "1.2.3", rVal: "~> 1.0",
   362  			result: true,
   363  		},
   364  		{
   365  			op:   structs.ConstraintRegex,
   366  			lVal: "foobarbaz", rVal: "[\\w]+",
   367  			result: true,
   368  		},
   369  		{
   370  			op:   "<",
   371  			lVal: "foo", rVal: "bar",
   372  			result: false,
   373  		},
   374  		{
   375  			op:   structs.ConstraintSetContains,
   376  			lVal: "foo,bar,baz", rVal: "foo,  bar  ",
   377  			result: true,
   378  		},
   379  		{
   380  			op:   structs.ConstraintSetContains,
   381  			lVal: "foo,bar,baz", rVal: "foo,bam",
   382  			result: false,
   383  		},
   384  	}
   385  
   386  	for _, tc := range cases {
   387  		_, ctx := testContext(t)
   388  		if res := checkConstraint(ctx, tc.op, tc.lVal, tc.rVal); res != tc.result {
   389  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   390  		}
   391  	}
   392  }
   393  
   394  func TestCheckLexicalOrder(t *testing.T) {
   395  	type tcase struct {
   396  		op         string
   397  		lVal, rVal interface{}
   398  		result     bool
   399  	}
   400  	cases := []tcase{
   401  		{
   402  			op:   "<",
   403  			lVal: "bar", rVal: "foo",
   404  			result: true,
   405  		},
   406  		{
   407  			op:   "<=",
   408  			lVal: "foo", rVal: "foo",
   409  			result: true,
   410  		},
   411  		{
   412  			op:   ">",
   413  			lVal: "bar", rVal: "foo",
   414  			result: false,
   415  		},
   416  		{
   417  			op:   ">=",
   418  			lVal: "bar", rVal: "bar",
   419  			result: true,
   420  		},
   421  		{
   422  			op:   ">",
   423  			lVal: 1, rVal: "foo",
   424  			result: false,
   425  		},
   426  	}
   427  	for _, tc := range cases {
   428  		if res := checkLexicalOrder(tc.op, tc.lVal, tc.rVal); res != tc.result {
   429  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   430  		}
   431  	}
   432  }
   433  
   434  func TestCheckVersionConstraint(t *testing.T) {
   435  	type tcase struct {
   436  		lVal, rVal interface{}
   437  		result     bool
   438  	}
   439  	cases := []tcase{
   440  		{
   441  			lVal: "1.2.3", rVal: "~> 1.0",
   442  			result: true,
   443  		},
   444  		{
   445  			lVal: "1.2.3", rVal: ">= 1.0, < 1.4",
   446  			result: true,
   447  		},
   448  		{
   449  			lVal: "2.0.1", rVal: "~> 1.0",
   450  			result: false,
   451  		},
   452  		{
   453  			lVal: "1.4", rVal: ">= 1.0, < 1.4",
   454  			result: false,
   455  		},
   456  		{
   457  			lVal: 1, rVal: "~> 1.0",
   458  			result: true,
   459  		},
   460  	}
   461  	for _, tc := range cases {
   462  		_, ctx := testContext(t)
   463  		if res := checkVersionConstraint(ctx, tc.lVal, tc.rVal); res != tc.result {
   464  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   465  		}
   466  	}
   467  }
   468  
   469  func TestCheckRegexpConstraint(t *testing.T) {
   470  	type tcase struct {
   471  		lVal, rVal interface{}
   472  		result     bool
   473  	}
   474  	cases := []tcase{
   475  		{
   476  			lVal: "foobar", rVal: "bar",
   477  			result: true,
   478  		},
   479  		{
   480  			lVal: "foobar", rVal: "^foo",
   481  			result: true,
   482  		},
   483  		{
   484  			lVal: "foobar", rVal: "^bar",
   485  			result: false,
   486  		},
   487  		{
   488  			lVal: "zipzap", rVal: "foo",
   489  			result: false,
   490  		},
   491  		{
   492  			lVal: 1, rVal: "foo",
   493  			result: false,
   494  		},
   495  	}
   496  	for _, tc := range cases {
   497  		_, ctx := testContext(t)
   498  		if res := checkRegexpConstraint(ctx, tc.lVal, tc.rVal); res != tc.result {
   499  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   500  		}
   501  	}
   502  }
   503  
   504  // This test puts allocations on the node to test if it detects infeasibility of
   505  // nodes correctly and picks the only feasible one
   506  func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
   507  	_, ctx := testContext(t)
   508  	nodes := []*structs.Node{
   509  		mock.Node(),
   510  		mock.Node(),
   511  		mock.Node(),
   512  	}
   513  	static := NewStaticIterator(ctx, nodes)
   514  
   515  	// Create a job with a distinct_hosts constraint and two task groups.
   516  	tg1 := &structs.TaskGroup{Name: "bar"}
   517  	tg2 := &structs.TaskGroup{Name: "baz"}
   518  
   519  	job := &structs.Job{
   520  		ID:          "foo",
   521  		Namespace:   structs.DefaultNamespace,
   522  		Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
   523  		TaskGroups:  []*structs.TaskGroup{tg1, tg2},
   524  	}
   525  
   526  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
   527  	// job unsatisfiable on all nodes but node3
   528  	plan := ctx.Plan()
   529  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   530  		{
   531  			Namespace: structs.DefaultNamespace,
   532  			TaskGroup: tg1.Name,
   533  			JobID:     job.ID,
   534  			Job:       job,
   535  			ID:        uuid.Generate(),
   536  		},
   537  
   538  		// Should be ignored as it is a different job.
   539  		{
   540  			Namespace: structs.DefaultNamespace,
   541  			TaskGroup: tg2.Name,
   542  			JobID:     "ignore 2",
   543  			Job:       job,
   544  			ID:        uuid.Generate(),
   545  		},
   546  	}
   547  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   548  		{
   549  			Namespace: structs.DefaultNamespace,
   550  			TaskGroup: tg2.Name,
   551  			JobID:     job.ID,
   552  			Job:       job,
   553  			ID:        uuid.Generate(),
   554  		},
   555  
   556  		// Should be ignored as it is a different job.
   557  		{
   558  			Namespace: structs.DefaultNamespace,
   559  			TaskGroup: tg1.Name,
   560  			JobID:     "ignore 2",
   561  			Job:       job,
   562  			ID:        uuid.Generate(),
   563  		},
   564  	}
   565  
   566  	proposed := NewDistinctHostsIterator(ctx, static)
   567  	proposed.SetTaskGroup(tg1)
   568  	proposed.SetJob(job)
   569  
   570  	out := collectFeasible(proposed)
   571  	if len(out) != 1 {
   572  		t.Fatalf("Bad: %#v", out)
   573  	}
   574  
   575  	if out[0].ID != nodes[2].ID {
   576  		t.Fatalf("wrong node picked")
   577  	}
   578  }
   579  
   580  func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
   581  	_, ctx := testContext(t)
   582  	nodes := []*structs.Node{
   583  		mock.Node(),
   584  		mock.Node(),
   585  	}
   586  	static := NewStaticIterator(ctx, nodes)
   587  
   588  	// Create a job with a distinct_hosts constraint and three task groups.
   589  	tg1 := &structs.TaskGroup{Name: "bar"}
   590  	tg2 := &structs.TaskGroup{Name: "baz"}
   591  	tg3 := &structs.TaskGroup{Name: "bam"}
   592  
   593  	job := &structs.Job{
   594  		ID:          "foo",
   595  		Namespace:   structs.DefaultNamespace,
   596  		Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
   597  		TaskGroups:  []*structs.TaskGroup{tg1, tg2, tg3},
   598  	}
   599  
   600  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
   601  	// job unsatisfiable for tg3
   602  	plan := ctx.Plan()
   603  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   604  		{
   605  			Namespace: structs.DefaultNamespace,
   606  			TaskGroup: tg1.Name,
   607  			JobID:     job.ID,
   608  			ID:        uuid.Generate(),
   609  		},
   610  	}
   611  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   612  		{
   613  			Namespace: structs.DefaultNamespace,
   614  			TaskGroup: tg2.Name,
   615  			JobID:     job.ID,
   616  			ID:        uuid.Generate(),
   617  		},
   618  	}
   619  
   620  	proposed := NewDistinctHostsIterator(ctx, static)
   621  	proposed.SetTaskGroup(tg3)
   622  	proposed.SetJob(job)
   623  
   624  	// It should not be able to place 3 tasks with only two nodes.
   625  	out := collectFeasible(proposed)
   626  	if len(out) != 0 {
   627  		t.Fatalf("Bad: %#v", out)
   628  	}
   629  }
   630  
   631  func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
   632  	_, ctx := testContext(t)
   633  	nodes := []*structs.Node{
   634  		mock.Node(),
   635  		mock.Node(),
   636  	}
   637  	static := NewStaticIterator(ctx, nodes)
   638  
   639  	// Create a task group with a distinct_hosts constraint.
   640  	tg1 := &structs.TaskGroup{
   641  		Name: "example",
   642  		Constraints: []*structs.Constraint{
   643  			{Operand: structs.ConstraintDistinctHosts},
   644  		},
   645  	}
   646  	tg2 := &structs.TaskGroup{Name: "baz"}
   647  
   648  	// Add a planned alloc to node1.
   649  	plan := ctx.Plan()
   650  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   651  		{
   652  			Namespace: structs.DefaultNamespace,
   653  			TaskGroup: tg1.Name,
   654  			JobID:     "foo",
   655  		},
   656  	}
   657  
   658  	// Add a planned alloc to node2 with the same task group name but a
   659  	// different job.
   660  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   661  		{
   662  			Namespace: structs.DefaultNamespace,
   663  			TaskGroup: tg1.Name,
   664  			JobID:     "bar",
   665  		},
   666  	}
   667  
   668  	proposed := NewDistinctHostsIterator(ctx, static)
   669  	proposed.SetTaskGroup(tg1)
   670  	proposed.SetJob(&structs.Job{
   671  		ID:        "foo",
   672  		Namespace: structs.DefaultNamespace,
   673  	})
   674  
   675  	out := collectFeasible(proposed)
   676  	if len(out) != 1 {
   677  		t.Fatalf("Bad: %#v", out)
   678  	}
   679  
   680  	// Expect it to skip the first node as there is a previous alloc on it for
   681  	// the same task group.
   682  	if out[0] != nodes[1] {
   683  		t.Fatalf("Bad: %v", out)
   684  	}
   685  
   686  	// Since the other task group doesn't have the constraint, both nodes should
   687  	// be feasible.
   688  	proposed.Reset()
   689  	proposed.SetTaskGroup(tg2)
   690  	out = collectFeasible(proposed)
   691  	if len(out) != 2 {
   692  		t.Fatalf("Bad: %#v", out)
   693  	}
   694  }
   695  
   696  // This test puts creates allocations across task groups that use a property
   697  // value to detect if the constraint at the job level properly considers all
   698  // task groups.
   699  func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
   700  	state, ctx := testContext(t)
   701  	nodes := []*structs.Node{
   702  		mock.Node(),
   703  		mock.Node(),
   704  		mock.Node(),
   705  		mock.Node(),
   706  		mock.Node(),
   707  	}
   708  
   709  	for i, n := range nodes {
   710  		n.Meta["rack"] = fmt.Sprintf("%d", i)
   711  
   712  		// Add to state store
   713  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
   714  			t.Fatalf("failed to upsert node: %v", err)
   715  		}
   716  	}
   717  
   718  	static := NewStaticIterator(ctx, nodes)
   719  
   720  	// Create a job with a distinct_property constraint and a task groups.
   721  	tg1 := &structs.TaskGroup{Name: "bar"}
   722  	tg2 := &structs.TaskGroup{Name: "baz"}
   723  
   724  	job := &structs.Job{
   725  		ID:        "foo",
   726  		Namespace: structs.DefaultNamespace,
   727  		Constraints: []*structs.Constraint{
   728  			{
   729  				Operand: structs.ConstraintDistinctProperty,
   730  				LTarget: "${meta.rack}",
   731  			},
   732  		},
   733  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
   734  	}
   735  
   736  	// Add allocs placing tg1 on node1 and 2 and tg2 on node3 and 4. This should make the
   737  	// job unsatisfiable on all nodes but node5. Also mix the allocations
   738  	// existing in the plan and the state store.
   739  	plan := ctx.Plan()
   740  	alloc1ID := uuid.Generate()
   741  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   742  		{
   743  			Namespace: structs.DefaultNamespace,
   744  			TaskGroup: tg1.Name,
   745  			JobID:     job.ID,
   746  			Job:       job,
   747  			ID:        alloc1ID,
   748  			NodeID:    nodes[0].ID,
   749  		},
   750  
   751  		// Should be ignored as it is a different job.
   752  		{
   753  			Namespace: structs.DefaultNamespace,
   754  			TaskGroup: tg2.Name,
   755  			JobID:     "ignore 2",
   756  			Job:       job,
   757  			ID:        uuid.Generate(),
   758  			NodeID:    nodes[0].ID,
   759  		},
   760  	}
   761  	plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
   762  		{
   763  			Namespace: structs.DefaultNamespace,
   764  			TaskGroup: tg2.Name,
   765  			JobID:     job.ID,
   766  			Job:       job,
   767  			ID:        uuid.Generate(),
   768  			NodeID:    nodes[2].ID,
   769  		},
   770  
   771  		// Should be ignored as it is a different job.
   772  		{
   773  			Namespace: structs.DefaultNamespace,
   774  			TaskGroup: tg1.Name,
   775  			JobID:     "ignore 2",
   776  			Job:       job,
   777  			ID:        uuid.Generate(),
   778  			NodeID:    nodes[2].ID,
   779  		},
   780  	}
   781  
   782  	// Put an allocation on Node 5 but make it stopped in the plan
   783  	stoppingAllocID := uuid.Generate()
   784  	plan.NodeUpdate[nodes[4].ID] = []*structs.Allocation{
   785  		{
   786  			Namespace: structs.DefaultNamespace,
   787  			TaskGroup: tg2.Name,
   788  			JobID:     job.ID,
   789  			Job:       job,
   790  			ID:        stoppingAllocID,
   791  			NodeID:    nodes[4].ID,
   792  		},
   793  	}
   794  
   795  	upserting := []*structs.Allocation{
   796  		// Have one of the allocations exist in both the plan and the state
   797  		// store. This resembles an allocation update
   798  		{
   799  			Namespace: structs.DefaultNamespace,
   800  			TaskGroup: tg1.Name,
   801  			JobID:     job.ID,
   802  			Job:       job,
   803  			ID:        alloc1ID,
   804  			EvalID:    uuid.Generate(),
   805  			NodeID:    nodes[0].ID,
   806  		},
   807  
   808  		{
   809  			Namespace: structs.DefaultNamespace,
   810  			TaskGroup: tg1.Name,
   811  			JobID:     job.ID,
   812  			Job:       job,
   813  			ID:        uuid.Generate(),
   814  			EvalID:    uuid.Generate(),
   815  			NodeID:    nodes[1].ID,
   816  		},
   817  
   818  		// Should be ignored as it is a different job.
   819  		{
   820  			Namespace: structs.DefaultNamespace,
   821  			TaskGroup: tg2.Name,
   822  			JobID:     "ignore 2",
   823  			Job:       job,
   824  			ID:        uuid.Generate(),
   825  			EvalID:    uuid.Generate(),
   826  			NodeID:    nodes[1].ID,
   827  		},
   828  		{
   829  			Namespace: structs.DefaultNamespace,
   830  			TaskGroup: tg2.Name,
   831  			JobID:     job.ID,
   832  			Job:       job,
   833  			ID:        uuid.Generate(),
   834  			EvalID:    uuid.Generate(),
   835  			NodeID:    nodes[3].ID,
   836  		},
   837  
   838  		// Should be ignored as it is a different job.
   839  		{
   840  			Namespace: structs.DefaultNamespace,
   841  			TaskGroup: tg1.Name,
   842  			JobID:     "ignore 2",
   843  			Job:       job,
   844  			ID:        uuid.Generate(),
   845  			EvalID:    uuid.Generate(),
   846  			NodeID:    nodes[3].ID,
   847  		},
   848  		{
   849  			Namespace: structs.DefaultNamespace,
   850  			TaskGroup: tg2.Name,
   851  			JobID:     job.ID,
   852  			Job:       job,
   853  			ID:        stoppingAllocID,
   854  			EvalID:    uuid.Generate(),
   855  			NodeID:    nodes[4].ID,
   856  		},
   857  	}
   858  	if err := state.UpsertAllocs(1000, upserting); err != nil {
   859  		t.Fatalf("failed to UpsertAllocs: %v", err)
   860  	}
   861  
   862  	proposed := NewDistinctPropertyIterator(ctx, static)
   863  	proposed.SetJob(job)
   864  	proposed.SetTaskGroup(tg2)
   865  	proposed.Reset()
   866  
   867  	out := collectFeasible(proposed)
   868  	if len(out) != 1 {
   869  		t.Fatalf("Bad: %#v", out)
   870  	}
   871  	if out[0].ID != nodes[4].ID {
   872  		t.Fatalf("wrong node picked")
   873  	}
   874  }
   875  
   876  // This test creates allocations across task groups that use a property value to
   877  // detect if the constraint at the job level properly considers all task groups
   878  // when the constraint allows a count greater than one
   879  func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
   880  	state, ctx := testContext(t)
   881  	nodes := []*structs.Node{
   882  		mock.Node(),
   883  		mock.Node(),
   884  		mock.Node(),
   885  	}
   886  
   887  	for i, n := range nodes {
   888  		n.Meta["rack"] = fmt.Sprintf("%d", i)
   889  
   890  		// Add to state store
   891  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
   892  			t.Fatalf("failed to upsert node: %v", err)
   893  		}
   894  	}
   895  
   896  	static := NewStaticIterator(ctx, nodes)
   897  
   898  	// Create a job with a distinct_property constraint and a task groups.
   899  	tg1 := &structs.TaskGroup{Name: "bar"}
   900  	tg2 := &structs.TaskGroup{Name: "baz"}
   901  
   902  	job := &structs.Job{
   903  		ID:        "foo",
   904  		Namespace: structs.DefaultNamespace,
   905  		Constraints: []*structs.Constraint{
   906  			{
   907  				Operand: structs.ConstraintDistinctProperty,
   908  				LTarget: "${meta.rack}",
   909  				RTarget: "2",
   910  			},
   911  		},
   912  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
   913  	}
   914  
   915  	// Add allocs placing two allocations on both node 1 and 2 and only one on
   916  	// node 3. This should make the job unsatisfiable on all nodes but node5.
   917  	// Also mix the allocations existing in the plan and the state store.
   918  	plan := ctx.Plan()
   919  	alloc1ID := uuid.Generate()
   920  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   921  		{
   922  			Namespace: structs.DefaultNamespace,
   923  			TaskGroup: tg1.Name,
   924  			JobID:     job.ID,
   925  			Job:       job,
   926  			ID:        alloc1ID,
   927  			NodeID:    nodes[0].ID,
   928  		},
   929  
   930  		{
   931  			Namespace: structs.DefaultNamespace,
   932  			TaskGroup: tg2.Name,
   933  			JobID:     job.ID,
   934  			Job:       job,
   935  			ID:        alloc1ID,
   936  			NodeID:    nodes[0].ID,
   937  		},
   938  
   939  		// Should be ignored as it is a different job.
   940  		{
   941  			Namespace: structs.DefaultNamespace,
   942  			TaskGroup: tg2.Name,
   943  			JobID:     "ignore 2",
   944  			Job:       job,
   945  			ID:        uuid.Generate(),
   946  			NodeID:    nodes[0].ID,
   947  		},
   948  	}
   949  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   950  		{
   951  			Namespace: structs.DefaultNamespace,
   952  			TaskGroup: tg1.Name,
   953  			JobID:     job.ID,
   954  			Job:       job,
   955  			ID:        uuid.Generate(),
   956  			NodeID:    nodes[1].ID,
   957  		},
   958  
   959  		{
   960  			Namespace: structs.DefaultNamespace,
   961  			TaskGroup: tg2.Name,
   962  			JobID:     job.ID,
   963  			Job:       job,
   964  			ID:        uuid.Generate(),
   965  			NodeID:    nodes[1].ID,
   966  		},
   967  
   968  		// Should be ignored as it is a different job.
   969  		{
   970  			Namespace: structs.DefaultNamespace,
   971  			TaskGroup: tg1.Name,
   972  			JobID:     "ignore 2",
   973  			Job:       job,
   974  			ID:        uuid.Generate(),
   975  			NodeID:    nodes[1].ID,
   976  		},
   977  	}
   978  	plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
   979  		{
   980  			Namespace: structs.DefaultNamespace,
   981  			TaskGroup: tg1.Name,
   982  			JobID:     job.ID,
   983  			Job:       job,
   984  			ID:        uuid.Generate(),
   985  			NodeID:    nodes[2].ID,
   986  		},
   987  
   988  		// Should be ignored as it is a different job.
   989  		{
   990  			Namespace: structs.DefaultNamespace,
   991  			TaskGroup: tg1.Name,
   992  			JobID:     "ignore 2",
   993  			Job:       job,
   994  			ID:        uuid.Generate(),
   995  			NodeID:    nodes[2].ID,
   996  		},
   997  	}
   998  
   999  	// Put an allocation on Node 3 but make it stopped in the plan
  1000  	stoppingAllocID := uuid.Generate()
  1001  	plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
  1002  		{
  1003  			Namespace: structs.DefaultNamespace,
  1004  			TaskGroup: tg2.Name,
  1005  			JobID:     job.ID,
  1006  			Job:       job,
  1007  			ID:        stoppingAllocID,
  1008  			NodeID:    nodes[2].ID,
  1009  		},
  1010  	}
  1011  
  1012  	upserting := []*structs.Allocation{
  1013  		// Have one of the allocations exist in both the plan and the state
  1014  		// store. This resembles an allocation update
  1015  		{
  1016  			Namespace: structs.DefaultNamespace,
  1017  			TaskGroup: tg1.Name,
  1018  			JobID:     job.ID,
  1019  			Job:       job,
  1020  			ID:        alloc1ID,
  1021  			EvalID:    uuid.Generate(),
  1022  			NodeID:    nodes[0].ID,
  1023  		},
  1024  
  1025  		{
  1026  			Namespace: structs.DefaultNamespace,
  1027  			TaskGroup: tg1.Name,
  1028  			JobID:     job.ID,
  1029  			Job:       job,
  1030  			ID:        uuid.Generate(),
  1031  			EvalID:    uuid.Generate(),
  1032  			NodeID:    nodes[1].ID,
  1033  		},
  1034  
  1035  		{
  1036  			Namespace: structs.DefaultNamespace,
  1037  			TaskGroup: tg2.Name,
  1038  			JobID:     job.ID,
  1039  			Job:       job,
  1040  			ID:        uuid.Generate(),
  1041  			EvalID:    uuid.Generate(),
  1042  			NodeID:    nodes[0].ID,
  1043  		},
  1044  
  1045  		// Should be ignored as it is a different job.
  1046  		{
  1047  			Namespace: structs.DefaultNamespace,
  1048  			TaskGroup: tg1.Name,
  1049  			JobID:     "ignore 2",
  1050  			Job:       job,
  1051  			ID:        uuid.Generate(),
  1052  			EvalID:    uuid.Generate(),
  1053  			NodeID:    nodes[1].ID,
  1054  		},
  1055  		{
  1056  			Namespace: structs.DefaultNamespace,
  1057  			TaskGroup: tg2.Name,
  1058  			JobID:     "ignore 2",
  1059  			Job:       job,
  1060  			ID:        uuid.Generate(),
  1061  			EvalID:    uuid.Generate(),
  1062  			NodeID:    nodes[1].ID,
  1063  		},
  1064  	}
  1065  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1066  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1067  	}
  1068  
  1069  	proposed := NewDistinctPropertyIterator(ctx, static)
  1070  	proposed.SetJob(job)
  1071  	proposed.SetTaskGroup(tg2)
  1072  	proposed.Reset()
  1073  
  1074  	out := collectFeasible(proposed)
  1075  	if len(out) != 1 {
  1076  		t.Fatalf("Bad: %#v", out)
  1077  	}
  1078  	if out[0].ID != nodes[2].ID {
  1079  		t.Fatalf("wrong node picked")
  1080  	}
  1081  }
  1082  
  1083  // This test checks that if a node has an allocation on it that gets stopped,
  1084  // there is a plan to re-use that for a new allocation, that the next select
  1085  // won't select that node.
  1086  func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testing.T) {
  1087  	state, ctx := testContext(t)
  1088  	nodes := []*structs.Node{
  1089  		mock.Node(),
  1090  	}
  1091  
  1092  	nodes[0].Meta["rack"] = "1"
  1093  
  1094  	// Add to state store
  1095  	if err := state.UpsertNode(uint64(100), nodes[0]); err != nil {
  1096  		t.Fatalf("failed to upsert node: %v", err)
  1097  	}
  1098  
  1099  	static := NewStaticIterator(ctx, nodes)
  1100  
  1101  	// Create a job with a distinct_property constraint and a task groups.
  1102  	tg1 := &structs.TaskGroup{Name: "bar"}
  1103  	job := &structs.Job{
  1104  		Namespace: structs.DefaultNamespace,
  1105  		ID:        "foo",
  1106  		Constraints: []*structs.Constraint{
  1107  			{
  1108  				Operand: structs.ConstraintDistinctProperty,
  1109  				LTarget: "${meta.rack}",
  1110  			},
  1111  		},
  1112  		TaskGroups: []*structs.TaskGroup{tg1},
  1113  	}
  1114  
  1115  	plan := ctx.Plan()
  1116  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1117  		{
  1118  			Namespace: structs.DefaultNamespace,
  1119  			TaskGroup: tg1.Name,
  1120  			JobID:     job.ID,
  1121  			Job:       job,
  1122  			ID:        uuid.Generate(),
  1123  			NodeID:    nodes[0].ID,
  1124  		},
  1125  	}
  1126  
  1127  	stoppingAllocID := uuid.Generate()
  1128  	plan.NodeUpdate[nodes[0].ID] = []*structs.Allocation{
  1129  		{
  1130  			Namespace: structs.DefaultNamespace,
  1131  			TaskGroup: tg1.Name,
  1132  			JobID:     job.ID,
  1133  			Job:       job,
  1134  			ID:        stoppingAllocID,
  1135  			NodeID:    nodes[0].ID,
  1136  		},
  1137  	}
  1138  
  1139  	upserting := []*structs.Allocation{
  1140  		{
  1141  			Namespace: structs.DefaultNamespace,
  1142  			TaskGroup: tg1.Name,
  1143  			JobID:     job.ID,
  1144  			Job:       job,
  1145  			ID:        stoppingAllocID,
  1146  			EvalID:    uuid.Generate(),
  1147  			NodeID:    nodes[0].ID,
  1148  		},
  1149  	}
  1150  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1151  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1152  	}
  1153  
  1154  	proposed := NewDistinctPropertyIterator(ctx, static)
  1155  	proposed.SetJob(job)
  1156  	proposed.SetTaskGroup(tg1)
  1157  	proposed.Reset()
  1158  
  1159  	out := collectFeasible(proposed)
  1160  	if len(out) != 0 {
  1161  		t.Fatalf("Bad: %#v", out)
  1162  	}
  1163  }
  1164  
  1165  // This test creates previous allocations selecting certain property values to
  1166  // test if it detects infeasibility of property values correctly and picks the
  1167  // only feasible one
  1168  func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
  1169  	state, ctx := testContext(t)
  1170  	nodes := []*structs.Node{
  1171  		mock.Node(),
  1172  		mock.Node(),
  1173  	}
  1174  
  1175  	for i, n := range nodes {
  1176  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1177  
  1178  		// Add to state store
  1179  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1180  			t.Fatalf("failed to upsert node: %v", err)
  1181  		}
  1182  	}
  1183  
  1184  	static := NewStaticIterator(ctx, nodes)
  1185  
  1186  	// Create a job with a distinct_property constraint and a task groups.
  1187  	tg1 := &structs.TaskGroup{Name: "bar"}
  1188  	tg2 := &structs.TaskGroup{Name: "baz"}
  1189  	tg3 := &structs.TaskGroup{Name: "bam"}
  1190  
  1191  	job := &structs.Job{
  1192  		Namespace: structs.DefaultNamespace,
  1193  		ID:        "foo",
  1194  		Constraints: []*structs.Constraint{
  1195  			{
  1196  				Operand: structs.ConstraintDistinctProperty,
  1197  				LTarget: "${meta.rack}",
  1198  			},
  1199  		},
  1200  		TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
  1201  	}
  1202  
  1203  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
  1204  	// job unsatisfiable for tg3.
  1205  	plan := ctx.Plan()
  1206  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1207  		{
  1208  			Namespace: structs.DefaultNamespace,
  1209  			TaskGroup: tg1.Name,
  1210  			JobID:     job.ID,
  1211  			Job:       job,
  1212  			ID:        uuid.Generate(),
  1213  			NodeID:    nodes[0].ID,
  1214  		},
  1215  	}
  1216  	upserting := []*structs.Allocation{
  1217  		{
  1218  			Namespace: structs.DefaultNamespace,
  1219  			TaskGroup: tg2.Name,
  1220  			JobID:     job.ID,
  1221  			Job:       job,
  1222  			ID:        uuid.Generate(),
  1223  			EvalID:    uuid.Generate(),
  1224  			NodeID:    nodes[1].ID,
  1225  		},
  1226  	}
  1227  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1228  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1229  	}
  1230  
  1231  	proposed := NewDistinctPropertyIterator(ctx, static)
  1232  	proposed.SetJob(job)
  1233  	proposed.SetTaskGroup(tg3)
  1234  	proposed.Reset()
  1235  
  1236  	out := collectFeasible(proposed)
  1237  	if len(out) != 0 {
  1238  		t.Fatalf("Bad: %#v", out)
  1239  	}
  1240  }
  1241  
  1242  // This test creates previous allocations selecting certain property values to
  1243  // test if it detects infeasibility of property values correctly and picks the
  1244  // only feasible one
  1245  func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testing.T) {
  1246  	state, ctx := testContext(t)
  1247  	nodes := []*structs.Node{
  1248  		mock.Node(),
  1249  		mock.Node(),
  1250  	}
  1251  
  1252  	for i, n := range nodes {
  1253  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1254  
  1255  		// Add to state store
  1256  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1257  			t.Fatalf("failed to upsert node: %v", err)
  1258  		}
  1259  	}
  1260  
  1261  	static := NewStaticIterator(ctx, nodes)
  1262  
  1263  	// Create a job with a distinct_property constraint and a task groups.
  1264  	tg1 := &structs.TaskGroup{Name: "bar"}
  1265  	tg2 := &structs.TaskGroup{Name: "baz"}
  1266  	tg3 := &structs.TaskGroup{Name: "bam"}
  1267  
  1268  	job := &structs.Job{
  1269  		Namespace: structs.DefaultNamespace,
  1270  		ID:        "foo",
  1271  		Constraints: []*structs.Constraint{
  1272  			{
  1273  				Operand: structs.ConstraintDistinctProperty,
  1274  				LTarget: "${meta.rack}",
  1275  				RTarget: "2",
  1276  			},
  1277  		},
  1278  		TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
  1279  	}
  1280  
  1281  	// Add allocs placing two tg1's on node1 and two tg2's on node2. This should
  1282  	// make the job unsatisfiable for tg3.
  1283  	plan := ctx.Plan()
  1284  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1285  		{
  1286  			Namespace: structs.DefaultNamespace,
  1287  			TaskGroup: tg1.Name,
  1288  			JobID:     job.ID,
  1289  			Job:       job,
  1290  			ID:        uuid.Generate(),
  1291  			NodeID:    nodes[0].ID,
  1292  		},
  1293  		{
  1294  			Namespace: structs.DefaultNamespace,
  1295  			TaskGroup: tg2.Name,
  1296  			JobID:     job.ID,
  1297  			Job:       job,
  1298  			ID:        uuid.Generate(),
  1299  			NodeID:    nodes[0].ID,
  1300  		},
  1301  	}
  1302  	upserting := []*structs.Allocation{
  1303  		{
  1304  			Namespace: structs.DefaultNamespace,
  1305  			TaskGroup: tg1.Name,
  1306  			JobID:     job.ID,
  1307  			Job:       job,
  1308  			ID:        uuid.Generate(),
  1309  			EvalID:    uuid.Generate(),
  1310  			NodeID:    nodes[1].ID,
  1311  		},
  1312  		{
  1313  			Namespace: structs.DefaultNamespace,
  1314  			TaskGroup: tg2.Name,
  1315  			JobID:     job.ID,
  1316  			Job:       job,
  1317  			ID:        uuid.Generate(),
  1318  			EvalID:    uuid.Generate(),
  1319  			NodeID:    nodes[1].ID,
  1320  		},
  1321  	}
  1322  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1323  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1324  	}
  1325  
  1326  	proposed := NewDistinctPropertyIterator(ctx, static)
  1327  	proposed.SetJob(job)
  1328  	proposed.SetTaskGroup(tg3)
  1329  	proposed.Reset()
  1330  
  1331  	out := collectFeasible(proposed)
  1332  	if len(out) != 0 {
  1333  		t.Fatalf("Bad: %#v", out)
  1334  	}
  1335  }
  1336  
  1337  // This test creates previous allocations selecting certain property values to
  1338  // test if it detects infeasibility of property values correctly and picks the
  1339  // only feasible one when the constraint is at the task group.
  1340  func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
  1341  	state, ctx := testContext(t)
  1342  	nodes := []*structs.Node{
  1343  		mock.Node(),
  1344  		mock.Node(),
  1345  		mock.Node(),
  1346  	}
  1347  
  1348  	for i, n := range nodes {
  1349  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1350  
  1351  		// Add to state store
  1352  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1353  			t.Fatalf("failed to upsert node: %v", err)
  1354  		}
  1355  	}
  1356  
  1357  	static := NewStaticIterator(ctx, nodes)
  1358  
  1359  	// Create a job with a task group with the distinct_property constraint
  1360  	tg1 := &structs.TaskGroup{
  1361  		Name: "example",
  1362  		Constraints: []*structs.Constraint{
  1363  			{
  1364  				Operand: structs.ConstraintDistinctProperty,
  1365  				LTarget: "${meta.rack}",
  1366  			},
  1367  		},
  1368  	}
  1369  	tg2 := &structs.TaskGroup{Name: "baz"}
  1370  
  1371  	job := &structs.Job{
  1372  		Namespace:  structs.DefaultNamespace,
  1373  		ID:         "foo",
  1374  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
  1375  	}
  1376  
  1377  	// Add allocs placing tg1 on node1 and 2. This should make the
  1378  	// job unsatisfiable on all nodes but node3. Also mix the allocations
  1379  	// existing in the plan and the state store.
  1380  	plan := ctx.Plan()
  1381  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1382  		{
  1383  			Namespace: structs.DefaultNamespace,
  1384  			TaskGroup: tg1.Name,
  1385  			JobID:     job.ID,
  1386  			Job:       job,
  1387  			ID:        uuid.Generate(),
  1388  			NodeID:    nodes[0].ID,
  1389  		},
  1390  	}
  1391  
  1392  	// Put an allocation on Node 3 but make it stopped in the plan
  1393  	stoppingAllocID := uuid.Generate()
  1394  	plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
  1395  		{
  1396  			Namespace: structs.DefaultNamespace,
  1397  			TaskGroup: tg1.Name,
  1398  			JobID:     job.ID,
  1399  			Job:       job,
  1400  			ID:        stoppingAllocID,
  1401  			NodeID:    nodes[2].ID,
  1402  		},
  1403  	}
  1404  
  1405  	upserting := []*structs.Allocation{
  1406  		{
  1407  			Namespace: structs.DefaultNamespace,
  1408  			TaskGroup: tg1.Name,
  1409  			JobID:     job.ID,
  1410  			Job:       job,
  1411  			ID:        uuid.Generate(),
  1412  			EvalID:    uuid.Generate(),
  1413  			NodeID:    nodes[1].ID,
  1414  		},
  1415  
  1416  		// Should be ignored as it is a different job.
  1417  		{
  1418  			Namespace: structs.DefaultNamespace,
  1419  			TaskGroup: tg1.Name,
  1420  			JobID:     "ignore 2",
  1421  			Job:       job,
  1422  			ID:        uuid.Generate(),
  1423  			EvalID:    uuid.Generate(),
  1424  			NodeID:    nodes[2].ID,
  1425  		},
  1426  
  1427  		{
  1428  			Namespace: structs.DefaultNamespace,
  1429  			TaskGroup: tg1.Name,
  1430  			JobID:     job.ID,
  1431  			Job:       job,
  1432  			ID:        stoppingAllocID,
  1433  			EvalID:    uuid.Generate(),
  1434  			NodeID:    nodes[2].ID,
  1435  		},
  1436  	}
  1437  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1438  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1439  	}
  1440  
  1441  	proposed := NewDistinctPropertyIterator(ctx, static)
  1442  	proposed.SetJob(job)
  1443  	proposed.SetTaskGroup(tg1)
  1444  	proposed.Reset()
  1445  
  1446  	out := collectFeasible(proposed)
  1447  	if len(out) != 1 {
  1448  		t.Fatalf("Bad: %#v", out)
  1449  	}
  1450  	if out[0].ID != nodes[2].ID {
  1451  		t.Fatalf("wrong node picked")
  1452  	}
  1453  
  1454  	// Since the other task group doesn't have the constraint, both nodes should
  1455  	// be feasible.
  1456  	proposed.SetTaskGroup(tg2)
  1457  	proposed.Reset()
  1458  
  1459  	out = collectFeasible(proposed)
  1460  	if len(out) != 3 {
  1461  		t.Fatalf("Bad: %#v", out)
  1462  	}
  1463  }
  1464  
  1465  func collectFeasible(iter FeasibleIterator) (out []*structs.Node) {
  1466  	for {
  1467  		next := iter.Next()
  1468  		if next == nil {
  1469  			break
  1470  		}
  1471  		out = append(out, next)
  1472  	}
  1473  	return
  1474  }
  1475  
  1476  // mockFeasibilityChecker is a FeasibilityChecker that returns predetermined
  1477  // feasibility values.
  1478  type mockFeasibilityChecker struct {
  1479  	retVals []bool
  1480  	i       int
  1481  }
  1482  
  1483  func newMockFeasibilityChecker(values ...bool) *mockFeasibilityChecker {
  1484  	return &mockFeasibilityChecker{retVals: values}
  1485  }
  1486  
  1487  func (c *mockFeasibilityChecker) Feasible(*structs.Node) bool {
  1488  	if c.i >= len(c.retVals) {
  1489  		c.i++
  1490  		return false
  1491  	}
  1492  
  1493  	f := c.retVals[c.i]
  1494  	c.i++
  1495  	return f
  1496  }
  1497  
  1498  // calls returns how many times the checker was called.
  1499  func (c *mockFeasibilityChecker) calls() int { return c.i }
  1500  
  1501  func TestFeasibilityWrapper_JobIneligible(t *testing.T) {
  1502  	_, ctx := testContext(t)
  1503  	nodes := []*structs.Node{mock.Node()}
  1504  	static := NewStaticIterator(ctx, nodes)
  1505  	mocked := newMockFeasibilityChecker(false)
  1506  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
  1507  
  1508  	// Set the job to ineligible
  1509  	ctx.Eligibility().SetJobEligibility(false, nodes[0].ComputedClass)
  1510  
  1511  	// Run the wrapper.
  1512  	out := collectFeasible(wrapper)
  1513  
  1514  	if out != nil || mocked.calls() != 0 {
  1515  		t.Fatalf("bad: %#v %d", out, mocked.calls())
  1516  	}
  1517  }
  1518  
  1519  func TestFeasibilityWrapper_JobEscapes(t *testing.T) {
  1520  	_, ctx := testContext(t)
  1521  	nodes := []*structs.Node{mock.Node()}
  1522  	static := NewStaticIterator(ctx, nodes)
  1523  	mocked := newMockFeasibilityChecker(false)
  1524  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
  1525  
  1526  	// Set the job to escaped
  1527  	cc := nodes[0].ComputedClass
  1528  	ctx.Eligibility().job[cc] = EvalComputedClassEscaped
  1529  
  1530  	// Run the wrapper.
  1531  	out := collectFeasible(wrapper)
  1532  
  1533  	if out != nil || mocked.calls() != 1 {
  1534  		t.Fatalf("bad: %#v", out)
  1535  	}
  1536  
  1537  	// Ensure that the job status didn't change from escaped even though the
  1538  	// option failed.
  1539  	if status := ctx.Eligibility().JobStatus(cc); status != EvalComputedClassEscaped {
  1540  		t.Fatalf("job status is %v; want %v", status, EvalComputedClassEscaped)
  1541  	}
  1542  }
  1543  
  1544  func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) {
  1545  	_, ctx := testContext(t)
  1546  	nodes := []*structs.Node{mock.Node()}
  1547  	static := NewStaticIterator(ctx, nodes)
  1548  	jobMock := newMockFeasibilityChecker(true)
  1549  	tgMock := newMockFeasibilityChecker(false)
  1550  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1551  
  1552  	// Set the job to escaped
  1553  	cc := nodes[0].ComputedClass
  1554  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1555  	ctx.Eligibility().SetTaskGroupEligibility(true, "foo", cc)
  1556  	wrapper.SetTaskGroup("foo")
  1557  
  1558  	// Run the wrapper.
  1559  	out := collectFeasible(wrapper)
  1560  
  1561  	if out == nil || tgMock.calls() != 0 {
  1562  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1563  	}
  1564  }
  1565  
  1566  func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) {
  1567  	_, ctx := testContext(t)
  1568  	nodes := []*structs.Node{mock.Node()}
  1569  	static := NewStaticIterator(ctx, nodes)
  1570  	jobMock := newMockFeasibilityChecker(true)
  1571  	tgMock := newMockFeasibilityChecker(false)
  1572  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1573  
  1574  	// Set the job to escaped
  1575  	cc := nodes[0].ComputedClass
  1576  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1577  	ctx.Eligibility().SetTaskGroupEligibility(false, "foo", cc)
  1578  	wrapper.SetTaskGroup("foo")
  1579  
  1580  	// Run the wrapper.
  1581  	out := collectFeasible(wrapper)
  1582  
  1583  	if out != nil || tgMock.calls() != 0 {
  1584  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1585  	}
  1586  }
  1587  
  1588  func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) {
  1589  	_, ctx := testContext(t)
  1590  	nodes := []*structs.Node{mock.Node()}
  1591  	static := NewStaticIterator(ctx, nodes)
  1592  	jobMock := newMockFeasibilityChecker(true)
  1593  	tgMock := newMockFeasibilityChecker(true)
  1594  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1595  
  1596  	// Set the job to escaped
  1597  	cc := nodes[0].ComputedClass
  1598  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1599  	ctx.Eligibility().taskGroups["foo"] =
  1600  		map[string]ComputedClassFeasibility{cc: EvalComputedClassEscaped}
  1601  	wrapper.SetTaskGroup("foo")
  1602  
  1603  	// Run the wrapper.
  1604  	out := collectFeasible(wrapper)
  1605  
  1606  	if out == nil || tgMock.calls() != 1 {
  1607  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1608  	}
  1609  
  1610  	if e, ok := ctx.Eligibility().taskGroups["foo"][cc]; !ok || e != EvalComputedClassEscaped {
  1611  		t.Fatalf("bad: %v %v", e, ok)
  1612  	}
  1613  }