github.com/blixtra/nomad@v0.7.2-0.20171221000451-da9a1d7bb050/scheduler/feasible_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"testing"
     7  
     8  	"github.com/hashicorp/nomad/helper/uuid"
     9  	"github.com/hashicorp/nomad/nomad/mock"
    10  	"github.com/hashicorp/nomad/nomad/structs"
    11  )
    12  
    13  func TestStaticIterator_Reset(t *testing.T) {
    14  	_, ctx := testContext(t)
    15  	var nodes []*structs.Node
    16  	for i := 0; i < 3; i++ {
    17  		nodes = append(nodes, mock.Node())
    18  	}
    19  	static := NewStaticIterator(ctx, nodes)
    20  
    21  	for i := 0; i < 6; i++ {
    22  		static.Reset()
    23  		for j := 0; j < i; j++ {
    24  			static.Next()
    25  		}
    26  		static.Reset()
    27  
    28  		out := collectFeasible(static)
    29  		if len(out) != len(nodes) {
    30  			t.Fatalf("out: %#v", out)
    31  			t.Fatalf("missing nodes %d %#v", i, static)
    32  		}
    33  
    34  		ids := make(map[string]struct{})
    35  		for _, o := range out {
    36  			if _, ok := ids[o.ID]; ok {
    37  				t.Fatalf("duplicate")
    38  			}
    39  			ids[o.ID] = struct{}{}
    40  		}
    41  	}
    42  }
    43  
    44  func TestStaticIterator_SetNodes(t *testing.T) {
    45  	_, ctx := testContext(t)
    46  	var nodes []*structs.Node
    47  	for i := 0; i < 3; i++ {
    48  		nodes = append(nodes, mock.Node())
    49  	}
    50  	static := NewStaticIterator(ctx, nodes)
    51  
    52  	newNodes := []*structs.Node{mock.Node()}
    53  	static.SetNodes(newNodes)
    54  
    55  	out := collectFeasible(static)
    56  	if !reflect.DeepEqual(out, newNodes) {
    57  		t.Fatalf("bad: %#v", out)
    58  	}
    59  }
    60  
    61  func TestRandomIterator(t *testing.T) {
    62  	_, ctx := testContext(t)
    63  	var nodes []*structs.Node
    64  	for i := 0; i < 10; i++ {
    65  		nodes = append(nodes, mock.Node())
    66  	}
    67  
    68  	nc := make([]*structs.Node, len(nodes))
    69  	copy(nc, nodes)
    70  	rand := NewRandomIterator(ctx, nc)
    71  
    72  	out := collectFeasible(rand)
    73  	if len(out) != len(nodes) {
    74  		t.Fatalf("missing nodes")
    75  	}
    76  	if reflect.DeepEqual(out, nodes) {
    77  		t.Fatalf("same order")
    78  	}
    79  }
    80  
    81  func TestDriverChecker(t *testing.T) {
    82  	_, ctx := testContext(t)
    83  	nodes := []*structs.Node{
    84  		mock.Node(),
    85  		mock.Node(),
    86  		mock.Node(),
    87  		mock.Node(),
    88  	}
    89  	nodes[0].Attributes["driver.foo"] = "1"
    90  	nodes[1].Attributes["driver.foo"] = "0"
    91  	nodes[2].Attributes["driver.foo"] = "true"
    92  	nodes[3].Attributes["driver.foo"] = "False"
    93  
    94  	drivers := map[string]struct{}{
    95  		"exec": {},
    96  		"foo":  {},
    97  	}
    98  	checker := NewDriverChecker(ctx, drivers)
    99  	cases := []struct {
   100  		Node   *structs.Node
   101  		Result bool
   102  	}{
   103  		{
   104  			Node:   nodes[0],
   105  			Result: true,
   106  		},
   107  		{
   108  			Node:   nodes[1],
   109  			Result: false,
   110  		},
   111  		{
   112  			Node:   nodes[2],
   113  			Result: true,
   114  		},
   115  		{
   116  			Node:   nodes[3],
   117  			Result: false,
   118  		},
   119  	}
   120  
   121  	for i, c := range cases {
   122  		if act := checker.Feasible(c.Node); act != c.Result {
   123  			t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
   124  		}
   125  	}
   126  }
   127  
   128  func TestConstraintChecker(t *testing.T) {
   129  	_, ctx := testContext(t)
   130  	nodes := []*structs.Node{
   131  		mock.Node(),
   132  		mock.Node(),
   133  		mock.Node(),
   134  		mock.Node(),
   135  	}
   136  
   137  	nodes[0].Attributes["kernel.name"] = "freebsd"
   138  	nodes[1].Datacenter = "dc2"
   139  	nodes[2].NodeClass = "large"
   140  
   141  	constraints := []*structs.Constraint{
   142  		{
   143  			Operand: "=",
   144  			LTarget: "${node.datacenter}",
   145  			RTarget: "dc1",
   146  		},
   147  		{
   148  			Operand: "is",
   149  			LTarget: "${attr.kernel.name}",
   150  			RTarget: "linux",
   151  		},
   152  		{
   153  			Operand: "is",
   154  			LTarget: "${node.class}",
   155  			RTarget: "large",
   156  		},
   157  	}
   158  	checker := NewConstraintChecker(ctx, constraints)
   159  	cases := []struct {
   160  		Node   *structs.Node
   161  		Result bool
   162  	}{
   163  		{
   164  			Node:   nodes[0],
   165  			Result: false,
   166  		},
   167  		{
   168  			Node:   nodes[1],
   169  			Result: false,
   170  		},
   171  		{
   172  			Node:   nodes[2],
   173  			Result: true,
   174  		},
   175  	}
   176  
   177  	for i, c := range cases {
   178  		if act := checker.Feasible(c.Node); act != c.Result {
   179  			t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
   180  		}
   181  	}
   182  }
   183  
   184  func TestResolveConstraintTarget(t *testing.T) {
   185  	type tcase struct {
   186  		target string
   187  		node   *structs.Node
   188  		val    interface{}
   189  		result bool
   190  	}
   191  	node := mock.Node()
   192  	cases := []tcase{
   193  		{
   194  			target: "${node.unique.id}",
   195  			node:   node,
   196  			val:    node.ID,
   197  			result: true,
   198  		},
   199  		{
   200  			target: "${node.datacenter}",
   201  			node:   node,
   202  			val:    node.Datacenter,
   203  			result: true,
   204  		},
   205  		{
   206  			target: "${node.unique.name}",
   207  			node:   node,
   208  			val:    node.Name,
   209  			result: true,
   210  		},
   211  		{
   212  			target: "${node.class}",
   213  			node:   node,
   214  			val:    node.NodeClass,
   215  			result: true,
   216  		},
   217  		{
   218  			target: "${node.foo}",
   219  			node:   node,
   220  			result: false,
   221  		},
   222  		{
   223  			target: "${attr.kernel.name}",
   224  			node:   node,
   225  			val:    node.Attributes["kernel.name"],
   226  			result: true,
   227  		},
   228  		{
   229  			target: "${attr.rand}",
   230  			node:   node,
   231  			result: false,
   232  		},
   233  		{
   234  			target: "${meta.pci-dss}",
   235  			node:   node,
   236  			val:    node.Meta["pci-dss"],
   237  			result: true,
   238  		},
   239  		{
   240  			target: "${meta.rand}",
   241  			node:   node,
   242  			result: false,
   243  		},
   244  	}
   245  
   246  	for _, tc := range cases {
   247  		res, ok := resolveConstraintTarget(tc.target, tc.node)
   248  		if ok != tc.result {
   249  			t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
   250  		}
   251  		if ok && !reflect.DeepEqual(res, tc.val) {
   252  			t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
   253  		}
   254  	}
   255  }
   256  
   257  func TestCheckConstraint(t *testing.T) {
   258  	type tcase struct {
   259  		op         string
   260  		lVal, rVal interface{}
   261  		result     bool
   262  	}
   263  	cases := []tcase{
   264  		{
   265  			op:   "=",
   266  			lVal: "foo", rVal: "foo",
   267  			result: true,
   268  		},
   269  		{
   270  			op:   "is",
   271  			lVal: "foo", rVal: "foo",
   272  			result: true,
   273  		},
   274  		{
   275  			op:   "==",
   276  			lVal: "foo", rVal: "foo",
   277  			result: true,
   278  		},
   279  		{
   280  			op:   "!=",
   281  			lVal: "foo", rVal: "foo",
   282  			result: false,
   283  		},
   284  		{
   285  			op:   "!=",
   286  			lVal: "foo", rVal: "bar",
   287  			result: true,
   288  		},
   289  		{
   290  			op:   "not",
   291  			lVal: "foo", rVal: "bar",
   292  			result: true,
   293  		},
   294  		{
   295  			op:   structs.ConstraintVersion,
   296  			lVal: "1.2.3", rVal: "~> 1.0",
   297  			result: true,
   298  		},
   299  		{
   300  			op:   structs.ConstraintRegex,
   301  			lVal: "foobarbaz", rVal: "[\\w]+",
   302  			result: true,
   303  		},
   304  		{
   305  			op:   "<",
   306  			lVal: "foo", rVal: "bar",
   307  			result: false,
   308  		},
   309  		{
   310  			op:   structs.ConstraintSetContains,
   311  			lVal: "foo,bar,baz", rVal: "foo,  bar  ",
   312  			result: true,
   313  		},
   314  		{
   315  			op:   structs.ConstraintSetContains,
   316  			lVal: "foo,bar,baz", rVal: "foo,bam",
   317  			result: false,
   318  		},
   319  	}
   320  
   321  	for _, tc := range cases {
   322  		_, ctx := testContext(t)
   323  		if res := checkConstraint(ctx, tc.op, tc.lVal, tc.rVal); res != tc.result {
   324  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   325  		}
   326  	}
   327  }
   328  
   329  func TestCheckLexicalOrder(t *testing.T) {
   330  	type tcase struct {
   331  		op         string
   332  		lVal, rVal interface{}
   333  		result     bool
   334  	}
   335  	cases := []tcase{
   336  		{
   337  			op:   "<",
   338  			lVal: "bar", rVal: "foo",
   339  			result: true,
   340  		},
   341  		{
   342  			op:   "<=",
   343  			lVal: "foo", rVal: "foo",
   344  			result: true,
   345  		},
   346  		{
   347  			op:   ">",
   348  			lVal: "bar", rVal: "foo",
   349  			result: false,
   350  		},
   351  		{
   352  			op:   ">=",
   353  			lVal: "bar", rVal: "bar",
   354  			result: true,
   355  		},
   356  		{
   357  			op:   ">",
   358  			lVal: 1, rVal: "foo",
   359  			result: false,
   360  		},
   361  	}
   362  	for _, tc := range cases {
   363  		if res := checkLexicalOrder(tc.op, tc.lVal, tc.rVal); res != tc.result {
   364  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   365  		}
   366  	}
   367  }
   368  
   369  func TestCheckVersionConstraint(t *testing.T) {
   370  	type tcase struct {
   371  		lVal, rVal interface{}
   372  		result     bool
   373  	}
   374  	cases := []tcase{
   375  		{
   376  			lVal: "1.2.3", rVal: "~> 1.0",
   377  			result: true,
   378  		},
   379  		{
   380  			lVal: "1.2.3", rVal: ">= 1.0, < 1.4",
   381  			result: true,
   382  		},
   383  		{
   384  			lVal: "2.0.1", rVal: "~> 1.0",
   385  			result: false,
   386  		},
   387  		{
   388  			lVal: "1.4", rVal: ">= 1.0, < 1.4",
   389  			result: false,
   390  		},
   391  		{
   392  			lVal: 1, rVal: "~> 1.0",
   393  			result: true,
   394  		},
   395  	}
   396  	for _, tc := range cases {
   397  		_, ctx := testContext(t)
   398  		if res := checkVersionConstraint(ctx, tc.lVal, tc.rVal); res != tc.result {
   399  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   400  		}
   401  	}
   402  }
   403  
   404  func TestCheckRegexpConstraint(t *testing.T) {
   405  	type tcase struct {
   406  		lVal, rVal interface{}
   407  		result     bool
   408  	}
   409  	cases := []tcase{
   410  		{
   411  			lVal: "foobar", rVal: "bar",
   412  			result: true,
   413  		},
   414  		{
   415  			lVal: "foobar", rVal: "^foo",
   416  			result: true,
   417  		},
   418  		{
   419  			lVal: "foobar", rVal: "^bar",
   420  			result: false,
   421  		},
   422  		{
   423  			lVal: "zipzap", rVal: "foo",
   424  			result: false,
   425  		},
   426  		{
   427  			lVal: 1, rVal: "foo",
   428  			result: false,
   429  		},
   430  	}
   431  	for _, tc := range cases {
   432  		_, ctx := testContext(t)
   433  		if res := checkRegexpConstraint(ctx, tc.lVal, tc.rVal); res != tc.result {
   434  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   435  		}
   436  	}
   437  }
   438  
   439  // This test puts allocations on the node to test if it detects infeasibility of
   440  // nodes correctly and picks the only feasible one
   441  func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
   442  	_, ctx := testContext(t)
   443  	nodes := []*structs.Node{
   444  		mock.Node(),
   445  		mock.Node(),
   446  		mock.Node(),
   447  	}
   448  	static := NewStaticIterator(ctx, nodes)
   449  
   450  	// Create a job with a distinct_hosts constraint and two task groups.
   451  	tg1 := &structs.TaskGroup{Name: "bar"}
   452  	tg2 := &structs.TaskGroup{Name: "baz"}
   453  
   454  	job := &structs.Job{
   455  		ID:          "foo",
   456  		Namespace:   structs.DefaultNamespace,
   457  		Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
   458  		TaskGroups:  []*structs.TaskGroup{tg1, tg2},
   459  	}
   460  
   461  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
   462  	// job unsatisfiable on all nodes but node3
   463  	plan := ctx.Plan()
   464  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   465  		{
   466  			Namespace: structs.DefaultNamespace,
   467  			TaskGroup: tg1.Name,
   468  			JobID:     job.ID,
   469  			Job:       job,
   470  			ID:        uuid.Generate(),
   471  		},
   472  
   473  		// Should be ignored as it is a different job.
   474  		{
   475  			Namespace: structs.DefaultNamespace,
   476  			TaskGroup: tg2.Name,
   477  			JobID:     "ignore 2",
   478  			Job:       job,
   479  			ID:        uuid.Generate(),
   480  		},
   481  	}
   482  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   483  		{
   484  			Namespace: structs.DefaultNamespace,
   485  			TaskGroup: tg2.Name,
   486  			JobID:     job.ID,
   487  			Job:       job,
   488  			ID:        uuid.Generate(),
   489  		},
   490  
   491  		// Should be ignored as it is a different job.
   492  		{
   493  			Namespace: structs.DefaultNamespace,
   494  			TaskGroup: tg1.Name,
   495  			JobID:     "ignore 2",
   496  			Job:       job,
   497  			ID:        uuid.Generate(),
   498  		},
   499  	}
   500  
   501  	proposed := NewDistinctHostsIterator(ctx, static)
   502  	proposed.SetTaskGroup(tg1)
   503  	proposed.SetJob(job)
   504  
   505  	out := collectFeasible(proposed)
   506  	if len(out) != 1 {
   507  		t.Fatalf("Bad: %#v", out)
   508  	}
   509  
   510  	if out[0].ID != nodes[2].ID {
   511  		t.Fatalf("wrong node picked")
   512  	}
   513  }
   514  
   515  func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
   516  	_, ctx := testContext(t)
   517  	nodes := []*structs.Node{
   518  		mock.Node(),
   519  		mock.Node(),
   520  	}
   521  	static := NewStaticIterator(ctx, nodes)
   522  
   523  	// Create a job with a distinct_hosts constraint and three task groups.
   524  	tg1 := &structs.TaskGroup{Name: "bar"}
   525  	tg2 := &structs.TaskGroup{Name: "baz"}
   526  	tg3 := &structs.TaskGroup{Name: "bam"}
   527  
   528  	job := &structs.Job{
   529  		ID:          "foo",
   530  		Namespace:   structs.DefaultNamespace,
   531  		Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
   532  		TaskGroups:  []*structs.TaskGroup{tg1, tg2, tg3},
   533  	}
   534  
   535  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
   536  	// job unsatisfiable for tg3
   537  	plan := ctx.Plan()
   538  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   539  		{
   540  			Namespace: structs.DefaultNamespace,
   541  			TaskGroup: tg1.Name,
   542  			JobID:     job.ID,
   543  			ID:        uuid.Generate(),
   544  		},
   545  	}
   546  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   547  		{
   548  			Namespace: structs.DefaultNamespace,
   549  			TaskGroup: tg2.Name,
   550  			JobID:     job.ID,
   551  			ID:        uuid.Generate(),
   552  		},
   553  	}
   554  
   555  	proposed := NewDistinctHostsIterator(ctx, static)
   556  	proposed.SetTaskGroup(tg3)
   557  	proposed.SetJob(job)
   558  
   559  	// It should not be able to place 3 tasks with only two nodes.
   560  	out := collectFeasible(proposed)
   561  	if len(out) != 0 {
   562  		t.Fatalf("Bad: %#v", out)
   563  	}
   564  }
   565  
   566  func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
   567  	_, ctx := testContext(t)
   568  	nodes := []*structs.Node{
   569  		mock.Node(),
   570  		mock.Node(),
   571  	}
   572  	static := NewStaticIterator(ctx, nodes)
   573  
   574  	// Create a task group with a distinct_hosts constraint.
   575  	tg1 := &structs.TaskGroup{
   576  		Name: "example",
   577  		Constraints: []*structs.Constraint{
   578  			{Operand: structs.ConstraintDistinctHosts},
   579  		},
   580  	}
   581  	tg2 := &structs.TaskGroup{Name: "baz"}
   582  
   583  	// Add a planned alloc to node1.
   584  	plan := ctx.Plan()
   585  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   586  		{
   587  			Namespace: structs.DefaultNamespace,
   588  			TaskGroup: tg1.Name,
   589  			JobID:     "foo",
   590  		},
   591  	}
   592  
   593  	// Add a planned alloc to node2 with the same task group name but a
   594  	// different job.
   595  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   596  		{
   597  			Namespace: structs.DefaultNamespace,
   598  			TaskGroup: tg1.Name,
   599  			JobID:     "bar",
   600  		},
   601  	}
   602  
   603  	proposed := NewDistinctHostsIterator(ctx, static)
   604  	proposed.SetTaskGroup(tg1)
   605  	proposed.SetJob(&structs.Job{
   606  		ID:        "foo",
   607  		Namespace: structs.DefaultNamespace,
   608  	})
   609  
   610  	out := collectFeasible(proposed)
   611  	if len(out) != 1 {
   612  		t.Fatalf("Bad: %#v", out)
   613  	}
   614  
   615  	// Expect it to skip the first node as there is a previous alloc on it for
   616  	// the same task group.
   617  	if out[0] != nodes[1] {
   618  		t.Fatalf("Bad: %v", out)
   619  	}
   620  
   621  	// Since the other task group doesn't have the constraint, both nodes should
   622  	// be feasible.
   623  	proposed.Reset()
   624  	proposed.SetTaskGroup(tg2)
   625  	out = collectFeasible(proposed)
   626  	if len(out) != 2 {
   627  		t.Fatalf("Bad: %#v", out)
   628  	}
   629  }
   630  
   631  // This test puts creates allocations across task groups that use a property
   632  // value to detect if the constraint at the job level properly considers all
   633  // task groups.
   634  func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
   635  	state, ctx := testContext(t)
   636  	nodes := []*structs.Node{
   637  		mock.Node(),
   638  		mock.Node(),
   639  		mock.Node(),
   640  		mock.Node(),
   641  		mock.Node(),
   642  	}
   643  
   644  	for i, n := range nodes {
   645  		n.Meta["rack"] = fmt.Sprintf("%d", i)
   646  
   647  		// Add to state store
   648  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
   649  			t.Fatalf("failed to upsert node: %v", err)
   650  		}
   651  	}
   652  
   653  	static := NewStaticIterator(ctx, nodes)
   654  
   655  	// Create a job with a distinct_property constraint and a task groups.
   656  	tg1 := &structs.TaskGroup{Name: "bar"}
   657  	tg2 := &structs.TaskGroup{Name: "baz"}
   658  
   659  	job := &structs.Job{
   660  		ID:        "foo",
   661  		Namespace: structs.DefaultNamespace,
   662  		Constraints: []*structs.Constraint{
   663  			{
   664  				Operand: structs.ConstraintDistinctProperty,
   665  				LTarget: "${meta.rack}",
   666  			},
   667  		},
   668  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
   669  	}
   670  
   671  	// Add allocs placing tg1 on node1 and 2 and tg2 on node3 and 4. This should make the
   672  	// job unsatisfiable on all nodes but node5. Also mix the allocations
   673  	// existing in the plan and the state store.
   674  	plan := ctx.Plan()
   675  	alloc1ID := uuid.Generate()
   676  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   677  		{
   678  			Namespace: structs.DefaultNamespace,
   679  			TaskGroup: tg1.Name,
   680  			JobID:     job.ID,
   681  			Job:       job,
   682  			ID:        alloc1ID,
   683  			NodeID:    nodes[0].ID,
   684  		},
   685  
   686  		// Should be ignored as it is a different job.
   687  		{
   688  			Namespace: structs.DefaultNamespace,
   689  			TaskGroup: tg2.Name,
   690  			JobID:     "ignore 2",
   691  			Job:       job,
   692  			ID:        uuid.Generate(),
   693  			NodeID:    nodes[0].ID,
   694  		},
   695  	}
   696  	plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
   697  		{
   698  			Namespace: structs.DefaultNamespace,
   699  			TaskGroup: tg2.Name,
   700  			JobID:     job.ID,
   701  			Job:       job,
   702  			ID:        uuid.Generate(),
   703  			NodeID:    nodes[2].ID,
   704  		},
   705  
   706  		// Should be ignored as it is a different job.
   707  		{
   708  			Namespace: structs.DefaultNamespace,
   709  			TaskGroup: tg1.Name,
   710  			JobID:     "ignore 2",
   711  			Job:       job,
   712  			ID:        uuid.Generate(),
   713  			NodeID:    nodes[2].ID,
   714  		},
   715  	}
   716  
   717  	// Put an allocation on Node 5 but make it stopped in the plan
   718  	stoppingAllocID := uuid.Generate()
   719  	plan.NodeUpdate[nodes[4].ID] = []*structs.Allocation{
   720  		{
   721  			Namespace: structs.DefaultNamespace,
   722  			TaskGroup: tg2.Name,
   723  			JobID:     job.ID,
   724  			Job:       job,
   725  			ID:        stoppingAllocID,
   726  			NodeID:    nodes[4].ID,
   727  		},
   728  	}
   729  
   730  	upserting := []*structs.Allocation{
   731  		// Have one of the allocations exist in both the plan and the state
   732  		// store. This resembles an allocation update
   733  		{
   734  			Namespace: structs.DefaultNamespace,
   735  			TaskGroup: tg1.Name,
   736  			JobID:     job.ID,
   737  			Job:       job,
   738  			ID:        alloc1ID,
   739  			EvalID:    uuid.Generate(),
   740  			NodeID:    nodes[0].ID,
   741  		},
   742  
   743  		{
   744  			Namespace: structs.DefaultNamespace,
   745  			TaskGroup: tg1.Name,
   746  			JobID:     job.ID,
   747  			Job:       job,
   748  			ID:        uuid.Generate(),
   749  			EvalID:    uuid.Generate(),
   750  			NodeID:    nodes[1].ID,
   751  		},
   752  
   753  		// Should be ignored as it is a different job.
   754  		{
   755  			Namespace: structs.DefaultNamespace,
   756  			TaskGroup: tg2.Name,
   757  			JobID:     "ignore 2",
   758  			Job:       job,
   759  			ID:        uuid.Generate(),
   760  			EvalID:    uuid.Generate(),
   761  			NodeID:    nodes[1].ID,
   762  		},
   763  		{
   764  			Namespace: structs.DefaultNamespace,
   765  			TaskGroup: tg2.Name,
   766  			JobID:     job.ID,
   767  			Job:       job,
   768  			ID:        uuid.Generate(),
   769  			EvalID:    uuid.Generate(),
   770  			NodeID:    nodes[3].ID,
   771  		},
   772  
   773  		// Should be ignored as it is a different job.
   774  		{
   775  			Namespace: structs.DefaultNamespace,
   776  			TaskGroup: tg1.Name,
   777  			JobID:     "ignore 2",
   778  			Job:       job,
   779  			ID:        uuid.Generate(),
   780  			EvalID:    uuid.Generate(),
   781  			NodeID:    nodes[3].ID,
   782  		},
   783  		{
   784  			Namespace: structs.DefaultNamespace,
   785  			TaskGroup: tg2.Name,
   786  			JobID:     job.ID,
   787  			Job:       job,
   788  			ID:        stoppingAllocID,
   789  			EvalID:    uuid.Generate(),
   790  			NodeID:    nodes[4].ID,
   791  		},
   792  	}
   793  	if err := state.UpsertAllocs(1000, upserting); err != nil {
   794  		t.Fatalf("failed to UpsertAllocs: %v", err)
   795  	}
   796  
   797  	proposed := NewDistinctPropertyIterator(ctx, static)
   798  	proposed.SetJob(job)
   799  	proposed.SetTaskGroup(tg2)
   800  	proposed.Reset()
   801  
   802  	out := collectFeasible(proposed)
   803  	if len(out) != 1 {
   804  		t.Fatalf("Bad: %#v", out)
   805  	}
   806  	if out[0].ID != nodes[4].ID {
   807  		t.Fatalf("wrong node picked")
   808  	}
   809  }
   810  
   811  // This test creates allocations across task groups that use a property value to
   812  // detect if the constraint at the job level properly considers all task groups
   813  // when the constraint allows a count greater than one
   814  func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
   815  	state, ctx := testContext(t)
   816  	nodes := []*structs.Node{
   817  		mock.Node(),
   818  		mock.Node(),
   819  		mock.Node(),
   820  	}
   821  
   822  	for i, n := range nodes {
   823  		n.Meta["rack"] = fmt.Sprintf("%d", i)
   824  
   825  		// Add to state store
   826  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
   827  			t.Fatalf("failed to upsert node: %v", err)
   828  		}
   829  	}
   830  
   831  	static := NewStaticIterator(ctx, nodes)
   832  
   833  	// Create a job with a distinct_property constraint and a task groups.
   834  	tg1 := &structs.TaskGroup{Name: "bar"}
   835  	tg2 := &structs.TaskGroup{Name: "baz"}
   836  
   837  	job := &structs.Job{
   838  		ID:        "foo",
   839  		Namespace: structs.DefaultNamespace,
   840  		Constraints: []*structs.Constraint{
   841  			{
   842  				Operand: structs.ConstraintDistinctProperty,
   843  				LTarget: "${meta.rack}",
   844  				RTarget: "2",
   845  			},
   846  		},
   847  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
   848  	}
   849  
   850  	// Add allocs placing two allocations on both node 1 and 2 and only one on
   851  	// node 3. This should make the job unsatisfiable on all nodes but node5.
   852  	// Also mix the allocations existing in the plan and the state store.
   853  	plan := ctx.Plan()
   854  	alloc1ID := uuid.Generate()
   855  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   856  		{
   857  			Namespace: structs.DefaultNamespace,
   858  			TaskGroup: tg1.Name,
   859  			JobID:     job.ID,
   860  			Job:       job,
   861  			ID:        alloc1ID,
   862  			NodeID:    nodes[0].ID,
   863  		},
   864  
   865  		{
   866  			Namespace: structs.DefaultNamespace,
   867  			TaskGroup: tg2.Name,
   868  			JobID:     job.ID,
   869  			Job:       job,
   870  			ID:        alloc1ID,
   871  			NodeID:    nodes[0].ID,
   872  		},
   873  
   874  		// Should be ignored as it is a different job.
   875  		{
   876  			Namespace: structs.DefaultNamespace,
   877  			TaskGroup: tg2.Name,
   878  			JobID:     "ignore 2",
   879  			Job:       job,
   880  			ID:        uuid.Generate(),
   881  			NodeID:    nodes[0].ID,
   882  		},
   883  	}
   884  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   885  		{
   886  			Namespace: structs.DefaultNamespace,
   887  			TaskGroup: tg1.Name,
   888  			JobID:     job.ID,
   889  			Job:       job,
   890  			ID:        uuid.Generate(),
   891  			NodeID:    nodes[1].ID,
   892  		},
   893  
   894  		{
   895  			Namespace: structs.DefaultNamespace,
   896  			TaskGroup: tg2.Name,
   897  			JobID:     job.ID,
   898  			Job:       job,
   899  			ID:        uuid.Generate(),
   900  			NodeID:    nodes[1].ID,
   901  		},
   902  
   903  		// Should be ignored as it is a different job.
   904  		{
   905  			Namespace: structs.DefaultNamespace,
   906  			TaskGroup: tg1.Name,
   907  			JobID:     "ignore 2",
   908  			Job:       job,
   909  			ID:        uuid.Generate(),
   910  			NodeID:    nodes[1].ID,
   911  		},
   912  	}
   913  	plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
   914  		{
   915  			Namespace: structs.DefaultNamespace,
   916  			TaskGroup: tg1.Name,
   917  			JobID:     job.ID,
   918  			Job:       job,
   919  			ID:        uuid.Generate(),
   920  			NodeID:    nodes[2].ID,
   921  		},
   922  
   923  		// Should be ignored as it is a different job.
   924  		{
   925  			Namespace: structs.DefaultNamespace,
   926  			TaskGroup: tg1.Name,
   927  			JobID:     "ignore 2",
   928  			Job:       job,
   929  			ID:        uuid.Generate(),
   930  			NodeID:    nodes[2].ID,
   931  		},
   932  	}
   933  
   934  	// Put an allocation on Node 3 but make it stopped in the plan
   935  	stoppingAllocID := uuid.Generate()
   936  	plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
   937  		{
   938  			Namespace: structs.DefaultNamespace,
   939  			TaskGroup: tg2.Name,
   940  			JobID:     job.ID,
   941  			Job:       job,
   942  			ID:        stoppingAllocID,
   943  			NodeID:    nodes[2].ID,
   944  		},
   945  	}
   946  
   947  	upserting := []*structs.Allocation{
   948  		// Have one of the allocations exist in both the plan and the state
   949  		// store. This resembles an allocation update
   950  		{
   951  			Namespace: structs.DefaultNamespace,
   952  			TaskGroup: tg1.Name,
   953  			JobID:     job.ID,
   954  			Job:       job,
   955  			ID:        alloc1ID,
   956  			EvalID:    uuid.Generate(),
   957  			NodeID:    nodes[0].ID,
   958  		},
   959  
   960  		{
   961  			Namespace: structs.DefaultNamespace,
   962  			TaskGroup: tg1.Name,
   963  			JobID:     job.ID,
   964  			Job:       job,
   965  			ID:        uuid.Generate(),
   966  			EvalID:    uuid.Generate(),
   967  			NodeID:    nodes[1].ID,
   968  		},
   969  
   970  		{
   971  			Namespace: structs.DefaultNamespace,
   972  			TaskGroup: tg2.Name,
   973  			JobID:     job.ID,
   974  			Job:       job,
   975  			ID:        uuid.Generate(),
   976  			EvalID:    uuid.Generate(),
   977  			NodeID:    nodes[0].ID,
   978  		},
   979  
   980  		// Should be ignored as it is a different job.
   981  		{
   982  			Namespace: structs.DefaultNamespace,
   983  			TaskGroup: tg1.Name,
   984  			JobID:     "ignore 2",
   985  			Job:       job,
   986  			ID:        uuid.Generate(),
   987  			EvalID:    uuid.Generate(),
   988  			NodeID:    nodes[1].ID,
   989  		},
   990  		{
   991  			Namespace: structs.DefaultNamespace,
   992  			TaskGroup: tg2.Name,
   993  			JobID:     "ignore 2",
   994  			Job:       job,
   995  			ID:        uuid.Generate(),
   996  			EvalID:    uuid.Generate(),
   997  			NodeID:    nodes[1].ID,
   998  		},
   999  	}
  1000  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1001  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1002  	}
  1003  
  1004  	proposed := NewDistinctPropertyIterator(ctx, static)
  1005  	proposed.SetJob(job)
  1006  	proposed.SetTaskGroup(tg2)
  1007  	proposed.Reset()
  1008  
  1009  	out := collectFeasible(proposed)
  1010  	if len(out) != 1 {
  1011  		t.Fatalf("Bad: %#v", out)
  1012  	}
  1013  	if out[0].ID != nodes[2].ID {
  1014  		t.Fatalf("wrong node picked")
  1015  	}
  1016  }
  1017  
  1018  // This test checks that if a node has an allocation on it that gets stopped,
  1019  // there is a plan to re-use that for a new allocation, that the next select
  1020  // won't select that node.
  1021  func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testing.T) {
  1022  	state, ctx := testContext(t)
  1023  	nodes := []*structs.Node{
  1024  		mock.Node(),
  1025  	}
  1026  
  1027  	nodes[0].Meta["rack"] = "1"
  1028  
  1029  	// Add to state store
  1030  	if err := state.UpsertNode(uint64(100), nodes[0]); err != nil {
  1031  		t.Fatalf("failed to upsert node: %v", err)
  1032  	}
  1033  
  1034  	static := NewStaticIterator(ctx, nodes)
  1035  
  1036  	// Create a job with a distinct_property constraint and a task groups.
  1037  	tg1 := &structs.TaskGroup{Name: "bar"}
  1038  	job := &structs.Job{
  1039  		Namespace: structs.DefaultNamespace,
  1040  		ID:        "foo",
  1041  		Constraints: []*structs.Constraint{
  1042  			{
  1043  				Operand: structs.ConstraintDistinctProperty,
  1044  				LTarget: "${meta.rack}",
  1045  			},
  1046  		},
  1047  		TaskGroups: []*structs.TaskGroup{tg1},
  1048  	}
  1049  
  1050  	plan := ctx.Plan()
  1051  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1052  		{
  1053  			Namespace: structs.DefaultNamespace,
  1054  			TaskGroup: tg1.Name,
  1055  			JobID:     job.ID,
  1056  			Job:       job,
  1057  			ID:        uuid.Generate(),
  1058  			NodeID:    nodes[0].ID,
  1059  		},
  1060  	}
  1061  
  1062  	stoppingAllocID := uuid.Generate()
  1063  	plan.NodeUpdate[nodes[0].ID] = []*structs.Allocation{
  1064  		{
  1065  			Namespace: structs.DefaultNamespace,
  1066  			TaskGroup: tg1.Name,
  1067  			JobID:     job.ID,
  1068  			Job:       job,
  1069  			ID:        stoppingAllocID,
  1070  			NodeID:    nodes[0].ID,
  1071  		},
  1072  	}
  1073  
  1074  	upserting := []*structs.Allocation{
  1075  		{
  1076  			Namespace: structs.DefaultNamespace,
  1077  			TaskGroup: tg1.Name,
  1078  			JobID:     job.ID,
  1079  			Job:       job,
  1080  			ID:        stoppingAllocID,
  1081  			EvalID:    uuid.Generate(),
  1082  			NodeID:    nodes[0].ID,
  1083  		},
  1084  	}
  1085  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1086  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1087  	}
  1088  
  1089  	proposed := NewDistinctPropertyIterator(ctx, static)
  1090  	proposed.SetJob(job)
  1091  	proposed.SetTaskGroup(tg1)
  1092  	proposed.Reset()
  1093  
  1094  	out := collectFeasible(proposed)
  1095  	if len(out) != 0 {
  1096  		t.Fatalf("Bad: %#v", out)
  1097  	}
  1098  }
  1099  
  1100  // This test creates previous allocations selecting certain property values to
  1101  // test if it detects infeasibility of property values correctly and picks the
  1102  // only feasible one
  1103  func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
  1104  	state, ctx := testContext(t)
  1105  	nodes := []*structs.Node{
  1106  		mock.Node(),
  1107  		mock.Node(),
  1108  	}
  1109  
  1110  	for i, n := range nodes {
  1111  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1112  
  1113  		// Add to state store
  1114  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1115  			t.Fatalf("failed to upsert node: %v", err)
  1116  		}
  1117  	}
  1118  
  1119  	static := NewStaticIterator(ctx, nodes)
  1120  
  1121  	// Create a job with a distinct_property constraint and a task groups.
  1122  	tg1 := &structs.TaskGroup{Name: "bar"}
  1123  	tg2 := &structs.TaskGroup{Name: "baz"}
  1124  	tg3 := &structs.TaskGroup{Name: "bam"}
  1125  
  1126  	job := &structs.Job{
  1127  		Namespace: structs.DefaultNamespace,
  1128  		ID:        "foo",
  1129  		Constraints: []*structs.Constraint{
  1130  			{
  1131  				Operand: structs.ConstraintDistinctProperty,
  1132  				LTarget: "${meta.rack}",
  1133  			},
  1134  		},
  1135  		TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
  1136  	}
  1137  
  1138  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
  1139  	// job unsatisfiable for tg3.
  1140  	plan := ctx.Plan()
  1141  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1142  		{
  1143  			Namespace: structs.DefaultNamespace,
  1144  			TaskGroup: tg1.Name,
  1145  			JobID:     job.ID,
  1146  			Job:       job,
  1147  			ID:        uuid.Generate(),
  1148  			NodeID:    nodes[0].ID,
  1149  		},
  1150  	}
  1151  	upserting := []*structs.Allocation{
  1152  		{
  1153  			Namespace: structs.DefaultNamespace,
  1154  			TaskGroup: tg2.Name,
  1155  			JobID:     job.ID,
  1156  			Job:       job,
  1157  			ID:        uuid.Generate(),
  1158  			EvalID:    uuid.Generate(),
  1159  			NodeID:    nodes[1].ID,
  1160  		},
  1161  	}
  1162  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1163  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1164  	}
  1165  
  1166  	proposed := NewDistinctPropertyIterator(ctx, static)
  1167  	proposed.SetJob(job)
  1168  	proposed.SetTaskGroup(tg3)
  1169  	proposed.Reset()
  1170  
  1171  	out := collectFeasible(proposed)
  1172  	if len(out) != 0 {
  1173  		t.Fatalf("Bad: %#v", out)
  1174  	}
  1175  }
  1176  
  1177  // This test creates previous allocations selecting certain property values to
  1178  // test if it detects infeasibility of property values correctly and picks the
  1179  // only feasible one
  1180  func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testing.T) {
  1181  	state, ctx := testContext(t)
  1182  	nodes := []*structs.Node{
  1183  		mock.Node(),
  1184  		mock.Node(),
  1185  	}
  1186  
  1187  	for i, n := range nodes {
  1188  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1189  
  1190  		// Add to state store
  1191  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1192  			t.Fatalf("failed to upsert node: %v", err)
  1193  		}
  1194  	}
  1195  
  1196  	static := NewStaticIterator(ctx, nodes)
  1197  
  1198  	// Create a job with a distinct_property constraint and a task groups.
  1199  	tg1 := &structs.TaskGroup{Name: "bar"}
  1200  	tg2 := &structs.TaskGroup{Name: "baz"}
  1201  	tg3 := &structs.TaskGroup{Name: "bam"}
  1202  
  1203  	job := &structs.Job{
  1204  		Namespace: structs.DefaultNamespace,
  1205  		ID:        "foo",
  1206  		Constraints: []*structs.Constraint{
  1207  			{
  1208  				Operand: structs.ConstraintDistinctProperty,
  1209  				LTarget: "${meta.rack}",
  1210  				RTarget: "2",
  1211  			},
  1212  		},
  1213  		TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
  1214  	}
  1215  
  1216  	// Add allocs placing two tg1's on node1 and two tg2's on node2. This should
  1217  	// make the job unsatisfiable for tg3.
  1218  	plan := ctx.Plan()
  1219  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1220  		{
  1221  			Namespace: structs.DefaultNamespace,
  1222  			TaskGroup: tg1.Name,
  1223  			JobID:     job.ID,
  1224  			Job:       job,
  1225  			ID:        uuid.Generate(),
  1226  			NodeID:    nodes[0].ID,
  1227  		},
  1228  		{
  1229  			Namespace: structs.DefaultNamespace,
  1230  			TaskGroup: tg2.Name,
  1231  			JobID:     job.ID,
  1232  			Job:       job,
  1233  			ID:        uuid.Generate(),
  1234  			NodeID:    nodes[0].ID,
  1235  		},
  1236  	}
  1237  	upserting := []*structs.Allocation{
  1238  		{
  1239  			Namespace: structs.DefaultNamespace,
  1240  			TaskGroup: tg1.Name,
  1241  			JobID:     job.ID,
  1242  			Job:       job,
  1243  			ID:        uuid.Generate(),
  1244  			EvalID:    uuid.Generate(),
  1245  			NodeID:    nodes[1].ID,
  1246  		},
  1247  		{
  1248  			Namespace: structs.DefaultNamespace,
  1249  			TaskGroup: tg2.Name,
  1250  			JobID:     job.ID,
  1251  			Job:       job,
  1252  			ID:        uuid.Generate(),
  1253  			EvalID:    uuid.Generate(),
  1254  			NodeID:    nodes[1].ID,
  1255  		},
  1256  	}
  1257  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1258  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1259  	}
  1260  
  1261  	proposed := NewDistinctPropertyIterator(ctx, static)
  1262  	proposed.SetJob(job)
  1263  	proposed.SetTaskGroup(tg3)
  1264  	proposed.Reset()
  1265  
  1266  	out := collectFeasible(proposed)
  1267  	if len(out) != 0 {
  1268  		t.Fatalf("Bad: %#v", out)
  1269  	}
  1270  }
  1271  
  1272  // This test creates previous allocations selecting certain property values to
  1273  // test if it detects infeasibility of property values correctly and picks the
  1274  // only feasible one when the constraint is at the task group.
  1275  func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
  1276  	state, ctx := testContext(t)
  1277  	nodes := []*structs.Node{
  1278  		mock.Node(),
  1279  		mock.Node(),
  1280  		mock.Node(),
  1281  	}
  1282  
  1283  	for i, n := range nodes {
  1284  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1285  
  1286  		// Add to state store
  1287  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1288  			t.Fatalf("failed to upsert node: %v", err)
  1289  		}
  1290  	}
  1291  
  1292  	static := NewStaticIterator(ctx, nodes)
  1293  
  1294  	// Create a job with a task group with the distinct_property constraint
  1295  	tg1 := &structs.TaskGroup{
  1296  		Name: "example",
  1297  		Constraints: []*structs.Constraint{
  1298  			{
  1299  				Operand: structs.ConstraintDistinctProperty,
  1300  				LTarget: "${meta.rack}",
  1301  			},
  1302  		},
  1303  	}
  1304  	tg2 := &structs.TaskGroup{Name: "baz"}
  1305  
  1306  	job := &structs.Job{
  1307  		Namespace:  structs.DefaultNamespace,
  1308  		ID:         "foo",
  1309  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
  1310  	}
  1311  
  1312  	// Add allocs placing tg1 on node1 and 2. This should make the
  1313  	// job unsatisfiable on all nodes but node3. Also mix the allocations
  1314  	// existing in the plan and the state store.
  1315  	plan := ctx.Plan()
  1316  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1317  		{
  1318  			Namespace: structs.DefaultNamespace,
  1319  			TaskGroup: tg1.Name,
  1320  			JobID:     job.ID,
  1321  			Job:       job,
  1322  			ID:        uuid.Generate(),
  1323  			NodeID:    nodes[0].ID,
  1324  		},
  1325  	}
  1326  
  1327  	// Put an allocation on Node 3 but make it stopped in the plan
  1328  	stoppingAllocID := uuid.Generate()
  1329  	plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
  1330  		{
  1331  			Namespace: structs.DefaultNamespace,
  1332  			TaskGroup: tg1.Name,
  1333  			JobID:     job.ID,
  1334  			Job:       job,
  1335  			ID:        stoppingAllocID,
  1336  			NodeID:    nodes[2].ID,
  1337  		},
  1338  	}
  1339  
  1340  	upserting := []*structs.Allocation{
  1341  		{
  1342  			Namespace: structs.DefaultNamespace,
  1343  			TaskGroup: tg1.Name,
  1344  			JobID:     job.ID,
  1345  			Job:       job,
  1346  			ID:        uuid.Generate(),
  1347  			EvalID:    uuid.Generate(),
  1348  			NodeID:    nodes[1].ID,
  1349  		},
  1350  
  1351  		// Should be ignored as it is a different job.
  1352  		{
  1353  			Namespace: structs.DefaultNamespace,
  1354  			TaskGroup: tg1.Name,
  1355  			JobID:     "ignore 2",
  1356  			Job:       job,
  1357  			ID:        uuid.Generate(),
  1358  			EvalID:    uuid.Generate(),
  1359  			NodeID:    nodes[2].ID,
  1360  		},
  1361  
  1362  		{
  1363  			Namespace: structs.DefaultNamespace,
  1364  			TaskGroup: tg1.Name,
  1365  			JobID:     job.ID,
  1366  			Job:       job,
  1367  			ID:        stoppingAllocID,
  1368  			EvalID:    uuid.Generate(),
  1369  			NodeID:    nodes[2].ID,
  1370  		},
  1371  	}
  1372  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1373  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1374  	}
  1375  
  1376  	proposed := NewDistinctPropertyIterator(ctx, static)
  1377  	proposed.SetJob(job)
  1378  	proposed.SetTaskGroup(tg1)
  1379  	proposed.Reset()
  1380  
  1381  	out := collectFeasible(proposed)
  1382  	if len(out) != 1 {
  1383  		t.Fatalf("Bad: %#v", out)
  1384  	}
  1385  	if out[0].ID != nodes[2].ID {
  1386  		t.Fatalf("wrong node picked")
  1387  	}
  1388  
  1389  	// Since the other task group doesn't have the constraint, both nodes should
  1390  	// be feasible.
  1391  	proposed.SetTaskGroup(tg2)
  1392  	proposed.Reset()
  1393  
  1394  	out = collectFeasible(proposed)
  1395  	if len(out) != 3 {
  1396  		t.Fatalf("Bad: %#v", out)
  1397  	}
  1398  }
  1399  
  1400  func collectFeasible(iter FeasibleIterator) (out []*structs.Node) {
  1401  	for {
  1402  		next := iter.Next()
  1403  		if next == nil {
  1404  			break
  1405  		}
  1406  		out = append(out, next)
  1407  	}
  1408  	return
  1409  }
  1410  
  1411  // mockFeasibilityChecker is a FeasibilityChecker that returns predetermined
  1412  // feasibility values.
  1413  type mockFeasibilityChecker struct {
  1414  	retVals []bool
  1415  	i       int
  1416  }
  1417  
  1418  func newMockFeasiblityChecker(values ...bool) *mockFeasibilityChecker {
  1419  	return &mockFeasibilityChecker{retVals: values}
  1420  }
  1421  
  1422  func (c *mockFeasibilityChecker) Feasible(*structs.Node) bool {
  1423  	if c.i >= len(c.retVals) {
  1424  		c.i++
  1425  		return false
  1426  	}
  1427  
  1428  	f := c.retVals[c.i]
  1429  	c.i++
  1430  	return f
  1431  }
  1432  
  1433  // calls returns how many times the checker was called.
  1434  func (c *mockFeasibilityChecker) calls() int { return c.i }
  1435  
  1436  func TestFeasibilityWrapper_JobIneligible(t *testing.T) {
  1437  	_, ctx := testContext(t)
  1438  	nodes := []*structs.Node{mock.Node()}
  1439  	static := NewStaticIterator(ctx, nodes)
  1440  	mocked := newMockFeasiblityChecker(false)
  1441  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
  1442  
  1443  	// Set the job to ineligible
  1444  	ctx.Eligibility().SetJobEligibility(false, nodes[0].ComputedClass)
  1445  
  1446  	// Run the wrapper.
  1447  	out := collectFeasible(wrapper)
  1448  
  1449  	if out != nil || mocked.calls() != 0 {
  1450  		t.Fatalf("bad: %#v %d", out, mocked.calls())
  1451  	}
  1452  }
  1453  
  1454  func TestFeasibilityWrapper_JobEscapes(t *testing.T) {
  1455  	_, ctx := testContext(t)
  1456  	nodes := []*structs.Node{mock.Node()}
  1457  	static := NewStaticIterator(ctx, nodes)
  1458  	mocked := newMockFeasiblityChecker(false)
  1459  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
  1460  
  1461  	// Set the job to escaped
  1462  	cc := nodes[0].ComputedClass
  1463  	ctx.Eligibility().job[cc] = EvalComputedClassEscaped
  1464  
  1465  	// Run the wrapper.
  1466  	out := collectFeasible(wrapper)
  1467  
  1468  	if out != nil || mocked.calls() != 1 {
  1469  		t.Fatalf("bad: %#v", out)
  1470  	}
  1471  
  1472  	// Ensure that the job status didn't change from escaped even though the
  1473  	// option failed.
  1474  	if status := ctx.Eligibility().JobStatus(cc); status != EvalComputedClassEscaped {
  1475  		t.Fatalf("job status is %v; want %v", status, EvalComputedClassEscaped)
  1476  	}
  1477  }
  1478  
  1479  func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) {
  1480  	_, ctx := testContext(t)
  1481  	nodes := []*structs.Node{mock.Node()}
  1482  	static := NewStaticIterator(ctx, nodes)
  1483  	jobMock := newMockFeasiblityChecker(true)
  1484  	tgMock := newMockFeasiblityChecker(false)
  1485  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1486  
  1487  	// Set the job to escaped
  1488  	cc := nodes[0].ComputedClass
  1489  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1490  	ctx.Eligibility().SetTaskGroupEligibility(true, "foo", cc)
  1491  	wrapper.SetTaskGroup("foo")
  1492  
  1493  	// Run the wrapper.
  1494  	out := collectFeasible(wrapper)
  1495  
  1496  	if out == nil || tgMock.calls() != 0 {
  1497  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1498  	}
  1499  }
  1500  
  1501  func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) {
  1502  	_, ctx := testContext(t)
  1503  	nodes := []*structs.Node{mock.Node()}
  1504  	static := NewStaticIterator(ctx, nodes)
  1505  	jobMock := newMockFeasiblityChecker(true)
  1506  	tgMock := newMockFeasiblityChecker(false)
  1507  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1508  
  1509  	// Set the job to escaped
  1510  	cc := nodes[0].ComputedClass
  1511  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1512  	ctx.Eligibility().SetTaskGroupEligibility(false, "foo", cc)
  1513  	wrapper.SetTaskGroup("foo")
  1514  
  1515  	// Run the wrapper.
  1516  	out := collectFeasible(wrapper)
  1517  
  1518  	if out != nil || tgMock.calls() != 0 {
  1519  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1520  	}
  1521  }
  1522  
  1523  func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) {
  1524  	_, ctx := testContext(t)
  1525  	nodes := []*structs.Node{mock.Node()}
  1526  	static := NewStaticIterator(ctx, nodes)
  1527  	jobMock := newMockFeasiblityChecker(true)
  1528  	tgMock := newMockFeasiblityChecker(true)
  1529  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1530  
  1531  	// Set the job to escaped
  1532  	cc := nodes[0].ComputedClass
  1533  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1534  	ctx.Eligibility().taskGroups["foo"] =
  1535  		map[string]ComputedClassFeasibility{cc: EvalComputedClassEscaped}
  1536  	wrapper.SetTaskGroup("foo")
  1537  
  1538  	// Run the wrapper.
  1539  	out := collectFeasible(wrapper)
  1540  
  1541  	if out == nil || tgMock.calls() != 1 {
  1542  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1543  	}
  1544  
  1545  	if e, ok := ctx.Eligibility().taskGroups["foo"][cc]; !ok || e != EvalComputedClassEscaped {
  1546  		t.Fatalf("bad: %v %v", e, ok)
  1547  	}
  1548  }