github.com/smithx10/nomad@v0.9.1-rc1/scheduler/feasible_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"fmt"
     5  	"reflect"
     6  	"testing"
     7  	"time"
     8  
     9  	"github.com/hashicorp/nomad/helper/uuid"
    10  	"github.com/hashicorp/nomad/nomad/mock"
    11  	"github.com/hashicorp/nomad/nomad/structs"
    12  	psstructs "github.com/hashicorp/nomad/plugins/shared/structs"
    13  	"github.com/stretchr/testify/require"
    14  )
    15  
    16  func TestStaticIterator_Reset(t *testing.T) {
    17  	_, ctx := testContext(t)
    18  	var nodes []*structs.Node
    19  	for i := 0; i < 3; i++ {
    20  		nodes = append(nodes, mock.Node())
    21  	}
    22  	static := NewStaticIterator(ctx, nodes)
    23  
    24  	for i := 0; i < 6; i++ {
    25  		static.Reset()
    26  		for j := 0; j < i; j++ {
    27  			static.Next()
    28  		}
    29  		static.Reset()
    30  
    31  		out := collectFeasible(static)
    32  		if len(out) != len(nodes) {
    33  			t.Fatalf("out: %#v", out)
    34  			t.Fatalf("missing nodes %d %#v", i, static)
    35  		}
    36  
    37  		ids := make(map[string]struct{})
    38  		for _, o := range out {
    39  			if _, ok := ids[o.ID]; ok {
    40  				t.Fatalf("duplicate")
    41  			}
    42  			ids[o.ID] = struct{}{}
    43  		}
    44  	}
    45  }
    46  
    47  func TestStaticIterator_SetNodes(t *testing.T) {
    48  	_, ctx := testContext(t)
    49  	var nodes []*structs.Node
    50  	for i := 0; i < 3; i++ {
    51  		nodes = append(nodes, mock.Node())
    52  	}
    53  	static := NewStaticIterator(ctx, nodes)
    54  
    55  	newNodes := []*structs.Node{mock.Node()}
    56  	static.SetNodes(newNodes)
    57  
    58  	out := collectFeasible(static)
    59  	if !reflect.DeepEqual(out, newNodes) {
    60  		t.Fatalf("bad: %#v", out)
    61  	}
    62  }
    63  
    64  func TestRandomIterator(t *testing.T) {
    65  	_, ctx := testContext(t)
    66  	var nodes []*structs.Node
    67  	for i := 0; i < 10; i++ {
    68  		nodes = append(nodes, mock.Node())
    69  	}
    70  
    71  	nc := make([]*structs.Node, len(nodes))
    72  	copy(nc, nodes)
    73  	rand := NewRandomIterator(ctx, nc)
    74  
    75  	out := collectFeasible(rand)
    76  	if len(out) != len(nodes) {
    77  		t.Fatalf("missing nodes")
    78  	}
    79  	if reflect.DeepEqual(out, nodes) {
    80  		t.Fatalf("same order")
    81  	}
    82  }
    83  
    84  func TestDriverChecker(t *testing.T) {
    85  	_, ctx := testContext(t)
    86  	nodes := []*structs.Node{
    87  		mock.Node(),
    88  		mock.Node(),
    89  		mock.Node(),
    90  		mock.Node(),
    91  	}
    92  	nodes[0].Attributes["driver.foo"] = "1"
    93  	nodes[1].Attributes["driver.foo"] = "0"
    94  	nodes[2].Attributes["driver.foo"] = "true"
    95  	nodes[3].Attributes["driver.foo"] = "False"
    96  
    97  	drivers := map[string]struct{}{
    98  		"exec": {},
    99  		"foo":  {},
   100  	}
   101  	checker := NewDriverChecker(ctx, drivers)
   102  	cases := []struct {
   103  		Node   *structs.Node
   104  		Result bool
   105  	}{
   106  		{
   107  			Node:   nodes[0],
   108  			Result: true,
   109  		},
   110  		{
   111  			Node:   nodes[1],
   112  			Result: false,
   113  		},
   114  		{
   115  			Node:   nodes[2],
   116  			Result: true,
   117  		},
   118  		{
   119  			Node:   nodes[3],
   120  			Result: false,
   121  		},
   122  	}
   123  
   124  	for i, c := range cases {
   125  		if act := checker.Feasible(c.Node); act != c.Result {
   126  			t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
   127  		}
   128  	}
   129  }
   130  
   131  func Test_HealthChecks(t *testing.T) {
   132  	require := require.New(t)
   133  	_, ctx := testContext(t)
   134  
   135  	nodes := []*structs.Node{
   136  		mock.Node(),
   137  		mock.Node(),
   138  		mock.Node(),
   139  	}
   140  	for _, e := range nodes {
   141  		e.Drivers = make(map[string]*structs.DriverInfo)
   142  	}
   143  	nodes[0].Attributes["driver.foo"] = "1"
   144  	nodes[0].Drivers["foo"] = &structs.DriverInfo{
   145  		Detected:          true,
   146  		Healthy:           true,
   147  		HealthDescription: "running",
   148  		UpdateTime:        time.Now(),
   149  	}
   150  	nodes[1].Attributes["driver.bar"] = "1"
   151  	nodes[1].Drivers["bar"] = &structs.DriverInfo{
   152  		Detected:          true,
   153  		Healthy:           false,
   154  		HealthDescription: "not running",
   155  		UpdateTime:        time.Now(),
   156  	}
   157  	nodes[2].Attributes["driver.baz"] = "0"
   158  	nodes[2].Drivers["baz"] = &structs.DriverInfo{
   159  		Detected:          false,
   160  		Healthy:           false,
   161  		HealthDescription: "not running",
   162  		UpdateTime:        time.Now(),
   163  	}
   164  
   165  	testDrivers := []string{"foo", "bar", "baz"}
   166  	cases := []struct {
   167  		Node   *structs.Node
   168  		Result bool
   169  	}{
   170  		{
   171  			Node:   nodes[0],
   172  			Result: true,
   173  		},
   174  		{
   175  			Node:   nodes[1],
   176  			Result: false,
   177  		},
   178  		{
   179  			Node:   nodes[2],
   180  			Result: false,
   181  		},
   182  	}
   183  
   184  	for i, c := range cases {
   185  		drivers := map[string]struct{}{
   186  			testDrivers[i]: {},
   187  		}
   188  		checker := NewDriverChecker(ctx, drivers)
   189  		act := checker.Feasible(c.Node)
   190  		require.Equal(act, c.Result)
   191  	}
   192  }
   193  
   194  func TestConstraintChecker(t *testing.T) {
   195  	_, ctx := testContext(t)
   196  	nodes := []*structs.Node{
   197  		mock.Node(),
   198  		mock.Node(),
   199  		mock.Node(),
   200  	}
   201  
   202  	nodes[0].Attributes["kernel.name"] = "freebsd"
   203  	nodes[1].Datacenter = "dc2"
   204  	nodes[2].NodeClass = "large"
   205  	nodes[2].Attributes["foo"] = "bar"
   206  
   207  	constraints := []*structs.Constraint{
   208  		{
   209  			Operand: "=",
   210  			LTarget: "${node.datacenter}",
   211  			RTarget: "dc1",
   212  		},
   213  		{
   214  			Operand: "is",
   215  			LTarget: "${attr.kernel.name}",
   216  			RTarget: "linux",
   217  		},
   218  		{
   219  			Operand: "!=",
   220  			LTarget: "${node.class}",
   221  			RTarget: "linux-medium-pci",
   222  		},
   223  		{
   224  			Operand: "is_set",
   225  			LTarget: "${attr.foo}",
   226  		},
   227  	}
   228  	checker := NewConstraintChecker(ctx, constraints)
   229  	cases := []struct {
   230  		Node   *structs.Node
   231  		Result bool
   232  	}{
   233  		{
   234  			Node:   nodes[0],
   235  			Result: false,
   236  		},
   237  		{
   238  			Node:   nodes[1],
   239  			Result: false,
   240  		},
   241  		{
   242  			Node:   nodes[2],
   243  			Result: true,
   244  		},
   245  	}
   246  
   247  	for i, c := range cases {
   248  		if act := checker.Feasible(c.Node); act != c.Result {
   249  			t.Fatalf("case(%d) failed: got %v; want %v", i, act, c.Result)
   250  		}
   251  	}
   252  }
   253  
   254  func TestResolveConstraintTarget(t *testing.T) {
   255  	type tcase struct {
   256  		target string
   257  		node   *structs.Node
   258  		val    interface{}
   259  		result bool
   260  	}
   261  	node := mock.Node()
   262  	cases := []tcase{
   263  		{
   264  			target: "${node.unique.id}",
   265  			node:   node,
   266  			val:    node.ID,
   267  			result: true,
   268  		},
   269  		{
   270  			target: "${node.datacenter}",
   271  			node:   node,
   272  			val:    node.Datacenter,
   273  			result: true,
   274  		},
   275  		{
   276  			target: "${node.unique.name}",
   277  			node:   node,
   278  			val:    node.Name,
   279  			result: true,
   280  		},
   281  		{
   282  			target: "${node.class}",
   283  			node:   node,
   284  			val:    node.NodeClass,
   285  			result: true,
   286  		},
   287  		{
   288  			target: "${node.foo}",
   289  			node:   node,
   290  			result: false,
   291  		},
   292  		{
   293  			target: "${attr.kernel.name}",
   294  			node:   node,
   295  			val:    node.Attributes["kernel.name"],
   296  			result: true,
   297  		},
   298  		{
   299  			target: "${attr.rand}",
   300  			node:   node,
   301  			val:    "",
   302  			result: false,
   303  		},
   304  		{
   305  			target: "${meta.pci-dss}",
   306  			node:   node,
   307  			val:    node.Meta["pci-dss"],
   308  			result: true,
   309  		},
   310  		{
   311  			target: "${meta.rand}",
   312  			node:   node,
   313  			val:    "",
   314  			result: false,
   315  		},
   316  	}
   317  
   318  	for _, tc := range cases {
   319  		res, ok := resolveTarget(tc.target, tc.node)
   320  		if ok != tc.result {
   321  			t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
   322  		}
   323  		if ok && !reflect.DeepEqual(res, tc.val) {
   324  			t.Fatalf("TC: %#v, Result: %v %v", tc, res, ok)
   325  		}
   326  	}
   327  }
   328  
   329  func TestCheckConstraint(t *testing.T) {
   330  	type tcase struct {
   331  		op         string
   332  		lVal, rVal interface{}
   333  		result     bool
   334  	}
   335  	cases := []tcase{
   336  		{
   337  			op:   "=",
   338  			lVal: "foo", rVal: "foo",
   339  			result: true,
   340  		},
   341  		{
   342  			op:   "is",
   343  			lVal: "foo", rVal: "foo",
   344  			result: true,
   345  		},
   346  		{
   347  			op:   "==",
   348  			lVal: "foo", rVal: "foo",
   349  			result: true,
   350  		},
   351  		{
   352  			op:   "==",
   353  			lVal: "foo", rVal: nil,
   354  			result: false,
   355  		},
   356  		{
   357  			op:   "==",
   358  			lVal: nil, rVal: "foo",
   359  			result: false,
   360  		},
   361  		{
   362  			op:   "==",
   363  			lVal: nil, rVal: nil,
   364  			result: false,
   365  		},
   366  		{
   367  			op:   "!=",
   368  			lVal: "foo", rVal: "foo",
   369  			result: false,
   370  		},
   371  		{
   372  			op:   "!=",
   373  			lVal: "foo", rVal: "bar",
   374  			result: true,
   375  		},
   376  		{
   377  			op:   "!=",
   378  			lVal: nil, rVal: "foo",
   379  			result: true,
   380  		},
   381  		{
   382  			op:   "!=",
   383  			lVal: "foo", rVal: nil,
   384  			result: true,
   385  		},
   386  		{
   387  			op:   "!=",
   388  			lVal: nil, rVal: nil,
   389  			result: false,
   390  		},
   391  		{
   392  			op:   "not",
   393  			lVal: "foo", rVal: "bar",
   394  			result: true,
   395  		},
   396  		{
   397  			op:   structs.ConstraintVersion,
   398  			lVal: "1.2.3", rVal: "~> 1.0",
   399  			result: true,
   400  		},
   401  		{
   402  			op:   structs.ConstraintVersion,
   403  			lVal: nil, rVal: "~> 1.0",
   404  			result: false,
   405  		},
   406  		{
   407  			op:   structs.ConstraintRegex,
   408  			lVal: "foobarbaz", rVal: "[\\w]+",
   409  			result: true,
   410  		},
   411  		{
   412  			op:   structs.ConstraintRegex,
   413  			lVal: nil, rVal: "[\\w]+",
   414  			result: false,
   415  		},
   416  		{
   417  			op:   "<",
   418  			lVal: "foo", rVal: "bar",
   419  			result: false,
   420  		},
   421  		{
   422  			op:   "<",
   423  			lVal: nil, rVal: "bar",
   424  			result: false,
   425  		},
   426  		{
   427  			op:   structs.ConstraintSetContains,
   428  			lVal: "foo,bar,baz", rVal: "foo,  bar  ",
   429  			result: true,
   430  		},
   431  		{
   432  			op:   structs.ConstraintSetContains,
   433  			lVal: "foo,bar,baz", rVal: "foo,bam",
   434  			result: false,
   435  		},
   436  		{
   437  			op:     structs.ConstraintAttributeIsSet,
   438  			lVal:   "foo",
   439  			result: true,
   440  		},
   441  		{
   442  			op:     structs.ConstraintAttributeIsSet,
   443  			lVal:   nil,
   444  			result: false,
   445  		},
   446  		{
   447  			op:     structs.ConstraintAttributeIsNotSet,
   448  			lVal:   nil,
   449  			result: true,
   450  		},
   451  		{
   452  			op:     structs.ConstraintAttributeIsNotSet,
   453  			lVal:   "foo",
   454  			result: false,
   455  		},
   456  	}
   457  
   458  	for _, tc := range cases {
   459  		_, ctx := testContext(t)
   460  		if res := checkConstraint(ctx, tc.op, tc.lVal, tc.rVal, tc.lVal != nil, tc.rVal != nil); res != tc.result {
   461  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   462  		}
   463  	}
   464  }
   465  
   466  func TestCheckLexicalOrder(t *testing.T) {
   467  	type tcase struct {
   468  		op         string
   469  		lVal, rVal interface{}
   470  		result     bool
   471  	}
   472  	cases := []tcase{
   473  		{
   474  			op:   "<",
   475  			lVal: "bar", rVal: "foo",
   476  			result: true,
   477  		},
   478  		{
   479  			op:   "<=",
   480  			lVal: "foo", rVal: "foo",
   481  			result: true,
   482  		},
   483  		{
   484  			op:   ">",
   485  			lVal: "bar", rVal: "foo",
   486  			result: false,
   487  		},
   488  		{
   489  			op:   ">=",
   490  			lVal: "bar", rVal: "bar",
   491  			result: true,
   492  		},
   493  		{
   494  			op:   ">",
   495  			lVal: 1, rVal: "foo",
   496  			result: false,
   497  		},
   498  	}
   499  	for _, tc := range cases {
   500  		if res := checkLexicalOrder(tc.op, tc.lVal, tc.rVal); res != tc.result {
   501  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   502  		}
   503  	}
   504  }
   505  
   506  func TestCheckVersionConstraint(t *testing.T) {
   507  	type tcase struct {
   508  		lVal, rVal interface{}
   509  		result     bool
   510  	}
   511  	cases := []tcase{
   512  		{
   513  			lVal: "1.2.3", rVal: "~> 1.0",
   514  			result: true,
   515  		},
   516  		{
   517  			lVal: "1.2.3", rVal: ">= 1.0, < 1.4",
   518  			result: true,
   519  		},
   520  		{
   521  			lVal: "2.0.1", rVal: "~> 1.0",
   522  			result: false,
   523  		},
   524  		{
   525  			lVal: "1.4", rVal: ">= 1.0, < 1.4",
   526  			result: false,
   527  		},
   528  		{
   529  			lVal: 1, rVal: "~> 1.0",
   530  			result: true,
   531  		},
   532  	}
   533  	for _, tc := range cases {
   534  		_, ctx := testContext(t)
   535  		if res := checkVersionMatch(ctx, tc.lVal, tc.rVal); res != tc.result {
   536  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   537  		}
   538  	}
   539  }
   540  
   541  func TestCheckRegexpConstraint(t *testing.T) {
   542  	type tcase struct {
   543  		lVal, rVal interface{}
   544  		result     bool
   545  	}
   546  	cases := []tcase{
   547  		{
   548  			lVal: "foobar", rVal: "bar",
   549  			result: true,
   550  		},
   551  		{
   552  			lVal: "foobar", rVal: "^foo",
   553  			result: true,
   554  		},
   555  		{
   556  			lVal: "foobar", rVal: "^bar",
   557  			result: false,
   558  		},
   559  		{
   560  			lVal: "zipzap", rVal: "foo",
   561  			result: false,
   562  		},
   563  		{
   564  			lVal: 1, rVal: "foo",
   565  			result: false,
   566  		},
   567  	}
   568  	for _, tc := range cases {
   569  		_, ctx := testContext(t)
   570  		if res := checkRegexpMatch(ctx, tc.lVal, tc.rVal); res != tc.result {
   571  			t.Fatalf("TC: %#v, Result: %v", tc, res)
   572  		}
   573  	}
   574  }
   575  
   576  // This test puts allocations on the node to test if it detects infeasibility of
   577  // nodes correctly and picks the only feasible one
   578  func TestDistinctHostsIterator_JobDistinctHosts(t *testing.T) {
   579  	_, ctx := testContext(t)
   580  	nodes := []*structs.Node{
   581  		mock.Node(),
   582  		mock.Node(),
   583  		mock.Node(),
   584  	}
   585  	static := NewStaticIterator(ctx, nodes)
   586  
   587  	// Create a job with a distinct_hosts constraint and two task groups.
   588  	tg1 := &structs.TaskGroup{Name: "bar"}
   589  	tg2 := &structs.TaskGroup{Name: "baz"}
   590  
   591  	job := &structs.Job{
   592  		ID:          "foo",
   593  		Namespace:   structs.DefaultNamespace,
   594  		Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
   595  		TaskGroups:  []*structs.TaskGroup{tg1, tg2},
   596  	}
   597  
   598  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
   599  	// job unsatisfiable on all nodes but node3
   600  	plan := ctx.Plan()
   601  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   602  		{
   603  			Namespace: structs.DefaultNamespace,
   604  			TaskGroup: tg1.Name,
   605  			JobID:     job.ID,
   606  			Job:       job,
   607  			ID:        uuid.Generate(),
   608  		},
   609  
   610  		// Should be ignored as it is a different job.
   611  		{
   612  			Namespace: structs.DefaultNamespace,
   613  			TaskGroup: tg2.Name,
   614  			JobID:     "ignore 2",
   615  			Job:       job,
   616  			ID:        uuid.Generate(),
   617  		},
   618  	}
   619  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   620  		{
   621  			Namespace: structs.DefaultNamespace,
   622  			TaskGroup: tg2.Name,
   623  			JobID:     job.ID,
   624  			Job:       job,
   625  			ID:        uuid.Generate(),
   626  		},
   627  
   628  		// Should be ignored as it is a different job.
   629  		{
   630  			Namespace: structs.DefaultNamespace,
   631  			TaskGroup: tg1.Name,
   632  			JobID:     "ignore 2",
   633  			Job:       job,
   634  			ID:        uuid.Generate(),
   635  		},
   636  	}
   637  
   638  	proposed := NewDistinctHostsIterator(ctx, static)
   639  	proposed.SetTaskGroup(tg1)
   640  	proposed.SetJob(job)
   641  
   642  	out := collectFeasible(proposed)
   643  	if len(out) != 1 {
   644  		t.Fatalf("Bad: %#v", out)
   645  	}
   646  
   647  	if out[0].ID != nodes[2].ID {
   648  		t.Fatalf("wrong node picked")
   649  	}
   650  }
   651  
   652  func TestDistinctHostsIterator_JobDistinctHosts_InfeasibleCount(t *testing.T) {
   653  	_, ctx := testContext(t)
   654  	nodes := []*structs.Node{
   655  		mock.Node(),
   656  		mock.Node(),
   657  	}
   658  	static := NewStaticIterator(ctx, nodes)
   659  
   660  	// Create a job with a distinct_hosts constraint and three task groups.
   661  	tg1 := &structs.TaskGroup{Name: "bar"}
   662  	tg2 := &structs.TaskGroup{Name: "baz"}
   663  	tg3 := &structs.TaskGroup{Name: "bam"}
   664  
   665  	job := &structs.Job{
   666  		ID:          "foo",
   667  		Namespace:   structs.DefaultNamespace,
   668  		Constraints: []*structs.Constraint{{Operand: structs.ConstraintDistinctHosts}},
   669  		TaskGroups:  []*structs.TaskGroup{tg1, tg2, tg3},
   670  	}
   671  
   672  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
   673  	// job unsatisfiable for tg3
   674  	plan := ctx.Plan()
   675  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   676  		{
   677  			Namespace: structs.DefaultNamespace,
   678  			TaskGroup: tg1.Name,
   679  			JobID:     job.ID,
   680  			ID:        uuid.Generate(),
   681  		},
   682  	}
   683  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   684  		{
   685  			Namespace: structs.DefaultNamespace,
   686  			TaskGroup: tg2.Name,
   687  			JobID:     job.ID,
   688  			ID:        uuid.Generate(),
   689  		},
   690  	}
   691  
   692  	proposed := NewDistinctHostsIterator(ctx, static)
   693  	proposed.SetTaskGroup(tg3)
   694  	proposed.SetJob(job)
   695  
   696  	// It should not be able to place 3 tasks with only two nodes.
   697  	out := collectFeasible(proposed)
   698  	if len(out) != 0 {
   699  		t.Fatalf("Bad: %#v", out)
   700  	}
   701  }
   702  
   703  func TestDistinctHostsIterator_TaskGroupDistinctHosts(t *testing.T) {
   704  	_, ctx := testContext(t)
   705  	nodes := []*structs.Node{
   706  		mock.Node(),
   707  		mock.Node(),
   708  	}
   709  	static := NewStaticIterator(ctx, nodes)
   710  
   711  	// Create a task group with a distinct_hosts constraint.
   712  	tg1 := &structs.TaskGroup{
   713  		Name: "example",
   714  		Constraints: []*structs.Constraint{
   715  			{Operand: structs.ConstraintDistinctHosts},
   716  		},
   717  	}
   718  	tg2 := &structs.TaskGroup{Name: "baz"}
   719  
   720  	// Add a planned alloc to node1.
   721  	plan := ctx.Plan()
   722  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   723  		{
   724  			Namespace: structs.DefaultNamespace,
   725  			TaskGroup: tg1.Name,
   726  			JobID:     "foo",
   727  		},
   728  	}
   729  
   730  	// Add a planned alloc to node2 with the same task group name but a
   731  	// different job.
   732  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
   733  		{
   734  			Namespace: structs.DefaultNamespace,
   735  			TaskGroup: tg1.Name,
   736  			JobID:     "bar",
   737  		},
   738  	}
   739  
   740  	proposed := NewDistinctHostsIterator(ctx, static)
   741  	proposed.SetTaskGroup(tg1)
   742  	proposed.SetJob(&structs.Job{
   743  		ID:        "foo",
   744  		Namespace: structs.DefaultNamespace,
   745  	})
   746  
   747  	out := collectFeasible(proposed)
   748  	if len(out) != 1 {
   749  		t.Fatalf("Bad: %#v", out)
   750  	}
   751  
   752  	// Expect it to skip the first node as there is a previous alloc on it for
   753  	// the same task group.
   754  	if out[0] != nodes[1] {
   755  		t.Fatalf("Bad: %v", out)
   756  	}
   757  
   758  	// Since the other task group doesn't have the constraint, both nodes should
   759  	// be feasible.
   760  	proposed.Reset()
   761  	proposed.SetTaskGroup(tg2)
   762  	out = collectFeasible(proposed)
   763  	if len(out) != 2 {
   764  		t.Fatalf("Bad: %#v", out)
   765  	}
   766  }
   767  
   768  // This test puts creates allocations across task groups that use a property
   769  // value to detect if the constraint at the job level properly considers all
   770  // task groups.
   771  func TestDistinctPropertyIterator_JobDistinctProperty(t *testing.T) {
   772  	state, ctx := testContext(t)
   773  	nodes := []*structs.Node{
   774  		mock.Node(),
   775  		mock.Node(),
   776  		mock.Node(),
   777  		mock.Node(),
   778  		mock.Node(),
   779  	}
   780  
   781  	for i, n := range nodes {
   782  		n.Meta["rack"] = fmt.Sprintf("%d", i)
   783  
   784  		// Add to state store
   785  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
   786  			t.Fatalf("failed to upsert node: %v", err)
   787  		}
   788  	}
   789  
   790  	static := NewStaticIterator(ctx, nodes)
   791  
   792  	// Create a job with a distinct_property constraint and a task groups.
   793  	tg1 := &structs.TaskGroup{Name: "bar"}
   794  	tg2 := &structs.TaskGroup{Name: "baz"}
   795  
   796  	job := &structs.Job{
   797  		ID:        "foo",
   798  		Namespace: structs.DefaultNamespace,
   799  		Constraints: []*structs.Constraint{
   800  			{
   801  				Operand: structs.ConstraintDistinctProperty,
   802  				LTarget: "${meta.rack}",
   803  			},
   804  		},
   805  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
   806  	}
   807  
   808  	// Add allocs placing tg1 on node1 and 2 and tg2 on node3 and 4. This should make the
   809  	// job unsatisfiable on all nodes but node5. Also mix the allocations
   810  	// existing in the plan and the state store.
   811  	plan := ctx.Plan()
   812  	alloc1ID := uuid.Generate()
   813  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   814  		{
   815  			Namespace: structs.DefaultNamespace,
   816  			TaskGroup: tg1.Name,
   817  			JobID:     job.ID,
   818  			Job:       job,
   819  			ID:        alloc1ID,
   820  			NodeID:    nodes[0].ID,
   821  		},
   822  
   823  		// Should be ignored as it is a different job.
   824  		{
   825  			Namespace: structs.DefaultNamespace,
   826  			TaskGroup: tg2.Name,
   827  			JobID:     "ignore 2",
   828  			Job:       job,
   829  			ID:        uuid.Generate(),
   830  			NodeID:    nodes[0].ID,
   831  		},
   832  	}
   833  	plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
   834  		{
   835  			Namespace: structs.DefaultNamespace,
   836  			TaskGroup: tg2.Name,
   837  			JobID:     job.ID,
   838  			Job:       job,
   839  			ID:        uuid.Generate(),
   840  			NodeID:    nodes[2].ID,
   841  		},
   842  
   843  		// Should be ignored as it is a different job.
   844  		{
   845  			Namespace: structs.DefaultNamespace,
   846  			TaskGroup: tg1.Name,
   847  			JobID:     "ignore 2",
   848  			Job:       job,
   849  			ID:        uuid.Generate(),
   850  			NodeID:    nodes[2].ID,
   851  		},
   852  	}
   853  
   854  	// Put an allocation on Node 5 but make it stopped in the plan
   855  	stoppingAllocID := uuid.Generate()
   856  	plan.NodeUpdate[nodes[4].ID] = []*structs.Allocation{
   857  		{
   858  			Namespace: structs.DefaultNamespace,
   859  			TaskGroup: tg2.Name,
   860  			JobID:     job.ID,
   861  			Job:       job,
   862  			ID:        stoppingAllocID,
   863  			NodeID:    nodes[4].ID,
   864  		},
   865  	}
   866  
   867  	upserting := []*structs.Allocation{
   868  		// Have one of the allocations exist in both the plan and the state
   869  		// store. This resembles an allocation update
   870  		{
   871  			Namespace: structs.DefaultNamespace,
   872  			TaskGroup: tg1.Name,
   873  			JobID:     job.ID,
   874  			Job:       job,
   875  			ID:        alloc1ID,
   876  			EvalID:    uuid.Generate(),
   877  			NodeID:    nodes[0].ID,
   878  		},
   879  
   880  		{
   881  			Namespace: structs.DefaultNamespace,
   882  			TaskGroup: tg1.Name,
   883  			JobID:     job.ID,
   884  			Job:       job,
   885  			ID:        uuid.Generate(),
   886  			EvalID:    uuid.Generate(),
   887  			NodeID:    nodes[1].ID,
   888  		},
   889  
   890  		// Should be ignored as it is a different job.
   891  		{
   892  			Namespace: structs.DefaultNamespace,
   893  			TaskGroup: tg2.Name,
   894  			JobID:     "ignore 2",
   895  			Job:       job,
   896  			ID:        uuid.Generate(),
   897  			EvalID:    uuid.Generate(),
   898  			NodeID:    nodes[1].ID,
   899  		},
   900  		{
   901  			Namespace: structs.DefaultNamespace,
   902  			TaskGroup: tg2.Name,
   903  			JobID:     job.ID,
   904  			Job:       job,
   905  			ID:        uuid.Generate(),
   906  			EvalID:    uuid.Generate(),
   907  			NodeID:    nodes[3].ID,
   908  		},
   909  
   910  		// Should be ignored as it is a different job.
   911  		{
   912  			Namespace: structs.DefaultNamespace,
   913  			TaskGroup: tg1.Name,
   914  			JobID:     "ignore 2",
   915  			Job:       job,
   916  			ID:        uuid.Generate(),
   917  			EvalID:    uuid.Generate(),
   918  			NodeID:    nodes[3].ID,
   919  		},
   920  		{
   921  			Namespace: structs.DefaultNamespace,
   922  			TaskGroup: tg2.Name,
   923  			JobID:     job.ID,
   924  			Job:       job,
   925  			ID:        stoppingAllocID,
   926  			EvalID:    uuid.Generate(),
   927  			NodeID:    nodes[4].ID,
   928  		},
   929  	}
   930  	if err := state.UpsertAllocs(1000, upserting); err != nil {
   931  		t.Fatalf("failed to UpsertAllocs: %v", err)
   932  	}
   933  
   934  	proposed := NewDistinctPropertyIterator(ctx, static)
   935  	proposed.SetJob(job)
   936  	proposed.SetTaskGroup(tg2)
   937  	proposed.Reset()
   938  
   939  	out := collectFeasible(proposed)
   940  	if len(out) != 1 {
   941  		t.Fatalf("Bad: %#v", out)
   942  	}
   943  	if out[0].ID != nodes[4].ID {
   944  		t.Fatalf("wrong node picked")
   945  	}
   946  }
   947  
   948  // This test creates allocations across task groups that use a property value to
   949  // detect if the constraint at the job level properly considers all task groups
   950  // when the constraint allows a count greater than one
   951  func TestDistinctPropertyIterator_JobDistinctProperty_Count(t *testing.T) {
   952  	state, ctx := testContext(t)
   953  	nodes := []*structs.Node{
   954  		mock.Node(),
   955  		mock.Node(),
   956  		mock.Node(),
   957  	}
   958  
   959  	for i, n := range nodes {
   960  		n.Meta["rack"] = fmt.Sprintf("%d", i)
   961  
   962  		// Add to state store
   963  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
   964  			t.Fatalf("failed to upsert node: %v", err)
   965  		}
   966  	}
   967  
   968  	static := NewStaticIterator(ctx, nodes)
   969  
   970  	// Create a job with a distinct_property constraint and a task groups.
   971  	tg1 := &structs.TaskGroup{Name: "bar"}
   972  	tg2 := &structs.TaskGroup{Name: "baz"}
   973  
   974  	job := &structs.Job{
   975  		ID:        "foo",
   976  		Namespace: structs.DefaultNamespace,
   977  		Constraints: []*structs.Constraint{
   978  			{
   979  				Operand: structs.ConstraintDistinctProperty,
   980  				LTarget: "${meta.rack}",
   981  				RTarget: "2",
   982  			},
   983  		},
   984  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
   985  	}
   986  
   987  	// Add allocs placing two allocations on both node 1 and 2 and only one on
   988  	// node 3. This should make the job unsatisfiable on all nodes but node5.
   989  	// Also mix the allocations existing in the plan and the state store.
   990  	plan := ctx.Plan()
   991  	alloc1ID := uuid.Generate()
   992  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
   993  		{
   994  			Namespace: structs.DefaultNamespace,
   995  			TaskGroup: tg1.Name,
   996  			JobID:     job.ID,
   997  			Job:       job,
   998  			ID:        alloc1ID,
   999  			NodeID:    nodes[0].ID,
  1000  		},
  1001  
  1002  		{
  1003  			Namespace: structs.DefaultNamespace,
  1004  			TaskGroup: tg2.Name,
  1005  			JobID:     job.ID,
  1006  			Job:       job,
  1007  			ID:        alloc1ID,
  1008  			NodeID:    nodes[0].ID,
  1009  		},
  1010  
  1011  		// Should be ignored as it is a different job.
  1012  		{
  1013  			Namespace: structs.DefaultNamespace,
  1014  			TaskGroup: tg2.Name,
  1015  			JobID:     "ignore 2",
  1016  			Job:       job,
  1017  			ID:        uuid.Generate(),
  1018  			NodeID:    nodes[0].ID,
  1019  		},
  1020  	}
  1021  	plan.NodeAllocation[nodes[1].ID] = []*structs.Allocation{
  1022  		{
  1023  			Namespace: structs.DefaultNamespace,
  1024  			TaskGroup: tg1.Name,
  1025  			JobID:     job.ID,
  1026  			Job:       job,
  1027  			ID:        uuid.Generate(),
  1028  			NodeID:    nodes[1].ID,
  1029  		},
  1030  
  1031  		{
  1032  			Namespace: structs.DefaultNamespace,
  1033  			TaskGroup: tg2.Name,
  1034  			JobID:     job.ID,
  1035  			Job:       job,
  1036  			ID:        uuid.Generate(),
  1037  			NodeID:    nodes[1].ID,
  1038  		},
  1039  
  1040  		// Should be ignored as it is a different job.
  1041  		{
  1042  			Namespace: structs.DefaultNamespace,
  1043  			TaskGroup: tg1.Name,
  1044  			JobID:     "ignore 2",
  1045  			Job:       job,
  1046  			ID:        uuid.Generate(),
  1047  			NodeID:    nodes[1].ID,
  1048  		},
  1049  	}
  1050  	plan.NodeAllocation[nodes[2].ID] = []*structs.Allocation{
  1051  		{
  1052  			Namespace: structs.DefaultNamespace,
  1053  			TaskGroup: tg1.Name,
  1054  			JobID:     job.ID,
  1055  			Job:       job,
  1056  			ID:        uuid.Generate(),
  1057  			NodeID:    nodes[2].ID,
  1058  		},
  1059  
  1060  		// Should be ignored as it is a different job.
  1061  		{
  1062  			Namespace: structs.DefaultNamespace,
  1063  			TaskGroup: tg1.Name,
  1064  			JobID:     "ignore 2",
  1065  			Job:       job,
  1066  			ID:        uuid.Generate(),
  1067  			NodeID:    nodes[2].ID,
  1068  		},
  1069  	}
  1070  
  1071  	// Put an allocation on Node 3 but make it stopped in the plan
  1072  	stoppingAllocID := uuid.Generate()
  1073  	plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
  1074  		{
  1075  			Namespace: structs.DefaultNamespace,
  1076  			TaskGroup: tg2.Name,
  1077  			JobID:     job.ID,
  1078  			Job:       job,
  1079  			ID:        stoppingAllocID,
  1080  			NodeID:    nodes[2].ID,
  1081  		},
  1082  	}
  1083  
  1084  	upserting := []*structs.Allocation{
  1085  		// Have one of the allocations exist in both the plan and the state
  1086  		// store. This resembles an allocation update
  1087  		{
  1088  			Namespace: structs.DefaultNamespace,
  1089  			TaskGroup: tg1.Name,
  1090  			JobID:     job.ID,
  1091  			Job:       job,
  1092  			ID:        alloc1ID,
  1093  			EvalID:    uuid.Generate(),
  1094  			NodeID:    nodes[0].ID,
  1095  		},
  1096  
  1097  		{
  1098  			Namespace: structs.DefaultNamespace,
  1099  			TaskGroup: tg1.Name,
  1100  			JobID:     job.ID,
  1101  			Job:       job,
  1102  			ID:        uuid.Generate(),
  1103  			EvalID:    uuid.Generate(),
  1104  			NodeID:    nodes[1].ID,
  1105  		},
  1106  
  1107  		{
  1108  			Namespace: structs.DefaultNamespace,
  1109  			TaskGroup: tg2.Name,
  1110  			JobID:     job.ID,
  1111  			Job:       job,
  1112  			ID:        uuid.Generate(),
  1113  			EvalID:    uuid.Generate(),
  1114  			NodeID:    nodes[0].ID,
  1115  		},
  1116  
  1117  		// Should be ignored as it is a different job.
  1118  		{
  1119  			Namespace: structs.DefaultNamespace,
  1120  			TaskGroup: tg1.Name,
  1121  			JobID:     "ignore 2",
  1122  			Job:       job,
  1123  			ID:        uuid.Generate(),
  1124  			EvalID:    uuid.Generate(),
  1125  			NodeID:    nodes[1].ID,
  1126  		},
  1127  		{
  1128  			Namespace: structs.DefaultNamespace,
  1129  			TaskGroup: tg2.Name,
  1130  			JobID:     "ignore 2",
  1131  			Job:       job,
  1132  			ID:        uuid.Generate(),
  1133  			EvalID:    uuid.Generate(),
  1134  			NodeID:    nodes[1].ID,
  1135  		},
  1136  	}
  1137  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1138  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1139  	}
  1140  
  1141  	proposed := NewDistinctPropertyIterator(ctx, static)
  1142  	proposed.SetJob(job)
  1143  	proposed.SetTaskGroup(tg2)
  1144  	proposed.Reset()
  1145  
  1146  	out := collectFeasible(proposed)
  1147  	if len(out) != 1 {
  1148  		t.Fatalf("Bad: %#v", out)
  1149  	}
  1150  	if out[0].ID != nodes[2].ID {
  1151  		t.Fatalf("wrong node picked")
  1152  	}
  1153  }
  1154  
  1155  // This test checks that if a node has an allocation on it that gets stopped,
  1156  // there is a plan to re-use that for a new allocation, that the next select
  1157  // won't select that node.
  1158  func TestDistinctPropertyIterator_JobDistinctProperty_RemoveAndReplace(t *testing.T) {
  1159  	state, ctx := testContext(t)
  1160  	nodes := []*structs.Node{
  1161  		mock.Node(),
  1162  	}
  1163  
  1164  	nodes[0].Meta["rack"] = "1"
  1165  
  1166  	// Add to state store
  1167  	if err := state.UpsertNode(uint64(100), nodes[0]); err != nil {
  1168  		t.Fatalf("failed to upsert node: %v", err)
  1169  	}
  1170  
  1171  	static := NewStaticIterator(ctx, nodes)
  1172  
  1173  	// Create a job with a distinct_property constraint and a task groups.
  1174  	tg1 := &structs.TaskGroup{Name: "bar"}
  1175  	job := &structs.Job{
  1176  		Namespace: structs.DefaultNamespace,
  1177  		ID:        "foo",
  1178  		Constraints: []*structs.Constraint{
  1179  			{
  1180  				Operand: structs.ConstraintDistinctProperty,
  1181  				LTarget: "${meta.rack}",
  1182  			},
  1183  		},
  1184  		TaskGroups: []*structs.TaskGroup{tg1},
  1185  	}
  1186  
  1187  	plan := ctx.Plan()
  1188  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1189  		{
  1190  			Namespace: structs.DefaultNamespace,
  1191  			TaskGroup: tg1.Name,
  1192  			JobID:     job.ID,
  1193  			Job:       job,
  1194  			ID:        uuid.Generate(),
  1195  			NodeID:    nodes[0].ID,
  1196  		},
  1197  	}
  1198  
  1199  	stoppingAllocID := uuid.Generate()
  1200  	plan.NodeUpdate[nodes[0].ID] = []*structs.Allocation{
  1201  		{
  1202  			Namespace: structs.DefaultNamespace,
  1203  			TaskGroup: tg1.Name,
  1204  			JobID:     job.ID,
  1205  			Job:       job,
  1206  			ID:        stoppingAllocID,
  1207  			NodeID:    nodes[0].ID,
  1208  		},
  1209  	}
  1210  
  1211  	upserting := []*structs.Allocation{
  1212  		{
  1213  			Namespace: structs.DefaultNamespace,
  1214  			TaskGroup: tg1.Name,
  1215  			JobID:     job.ID,
  1216  			Job:       job,
  1217  			ID:        stoppingAllocID,
  1218  			EvalID:    uuid.Generate(),
  1219  			NodeID:    nodes[0].ID,
  1220  		},
  1221  	}
  1222  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1223  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1224  	}
  1225  
  1226  	proposed := NewDistinctPropertyIterator(ctx, static)
  1227  	proposed.SetJob(job)
  1228  	proposed.SetTaskGroup(tg1)
  1229  	proposed.Reset()
  1230  
  1231  	out := collectFeasible(proposed)
  1232  	if len(out) != 0 {
  1233  		t.Fatalf("Bad: %#v", out)
  1234  	}
  1235  }
  1236  
  1237  // This test creates previous allocations selecting certain property values to
  1238  // test if it detects infeasibility of property values correctly and picks the
  1239  // only feasible one
  1240  func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible(t *testing.T) {
  1241  	state, ctx := testContext(t)
  1242  	nodes := []*structs.Node{
  1243  		mock.Node(),
  1244  		mock.Node(),
  1245  	}
  1246  
  1247  	for i, n := range nodes {
  1248  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1249  
  1250  		// Add to state store
  1251  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1252  			t.Fatalf("failed to upsert node: %v", err)
  1253  		}
  1254  	}
  1255  
  1256  	static := NewStaticIterator(ctx, nodes)
  1257  
  1258  	// Create a job with a distinct_property constraint and a task groups.
  1259  	tg1 := &structs.TaskGroup{Name: "bar"}
  1260  	tg2 := &structs.TaskGroup{Name: "baz"}
  1261  	tg3 := &structs.TaskGroup{Name: "bam"}
  1262  
  1263  	job := &structs.Job{
  1264  		Namespace: structs.DefaultNamespace,
  1265  		ID:        "foo",
  1266  		Constraints: []*structs.Constraint{
  1267  			{
  1268  				Operand: structs.ConstraintDistinctProperty,
  1269  				LTarget: "${meta.rack}",
  1270  			},
  1271  		},
  1272  		TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
  1273  	}
  1274  
  1275  	// Add allocs placing tg1 on node1 and tg2 on node2. This should make the
  1276  	// job unsatisfiable for tg3.
  1277  	plan := ctx.Plan()
  1278  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1279  		{
  1280  			Namespace: structs.DefaultNamespace,
  1281  			TaskGroup: tg1.Name,
  1282  			JobID:     job.ID,
  1283  			Job:       job,
  1284  			ID:        uuid.Generate(),
  1285  			NodeID:    nodes[0].ID,
  1286  		},
  1287  	}
  1288  	upserting := []*structs.Allocation{
  1289  		{
  1290  			Namespace: structs.DefaultNamespace,
  1291  			TaskGroup: tg2.Name,
  1292  			JobID:     job.ID,
  1293  			Job:       job,
  1294  			ID:        uuid.Generate(),
  1295  			EvalID:    uuid.Generate(),
  1296  			NodeID:    nodes[1].ID,
  1297  		},
  1298  	}
  1299  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1300  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1301  	}
  1302  
  1303  	proposed := NewDistinctPropertyIterator(ctx, static)
  1304  	proposed.SetJob(job)
  1305  	proposed.SetTaskGroup(tg3)
  1306  	proposed.Reset()
  1307  
  1308  	out := collectFeasible(proposed)
  1309  	if len(out) != 0 {
  1310  		t.Fatalf("Bad: %#v", out)
  1311  	}
  1312  }
  1313  
  1314  // This test creates previous allocations selecting certain property values to
  1315  // test if it detects infeasibility of property values correctly and picks the
  1316  // only feasible one
  1317  func TestDistinctPropertyIterator_JobDistinctProperty_Infeasible_Count(t *testing.T) {
  1318  	state, ctx := testContext(t)
  1319  	nodes := []*structs.Node{
  1320  		mock.Node(),
  1321  		mock.Node(),
  1322  	}
  1323  
  1324  	for i, n := range nodes {
  1325  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1326  
  1327  		// Add to state store
  1328  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1329  			t.Fatalf("failed to upsert node: %v", err)
  1330  		}
  1331  	}
  1332  
  1333  	static := NewStaticIterator(ctx, nodes)
  1334  
  1335  	// Create a job with a distinct_property constraint and a task groups.
  1336  	tg1 := &structs.TaskGroup{Name: "bar"}
  1337  	tg2 := &structs.TaskGroup{Name: "baz"}
  1338  	tg3 := &structs.TaskGroup{Name: "bam"}
  1339  
  1340  	job := &structs.Job{
  1341  		Namespace: structs.DefaultNamespace,
  1342  		ID:        "foo",
  1343  		Constraints: []*structs.Constraint{
  1344  			{
  1345  				Operand: structs.ConstraintDistinctProperty,
  1346  				LTarget: "${meta.rack}",
  1347  				RTarget: "2",
  1348  			},
  1349  		},
  1350  		TaskGroups: []*structs.TaskGroup{tg1, tg2, tg3},
  1351  	}
  1352  
  1353  	// Add allocs placing two tg1's on node1 and two tg2's on node2. This should
  1354  	// make the job unsatisfiable for tg3.
  1355  	plan := ctx.Plan()
  1356  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1357  		{
  1358  			Namespace: structs.DefaultNamespace,
  1359  			TaskGroup: tg1.Name,
  1360  			JobID:     job.ID,
  1361  			Job:       job,
  1362  			ID:        uuid.Generate(),
  1363  			NodeID:    nodes[0].ID,
  1364  		},
  1365  		{
  1366  			Namespace: structs.DefaultNamespace,
  1367  			TaskGroup: tg2.Name,
  1368  			JobID:     job.ID,
  1369  			Job:       job,
  1370  			ID:        uuid.Generate(),
  1371  			NodeID:    nodes[0].ID,
  1372  		},
  1373  	}
  1374  	upserting := []*structs.Allocation{
  1375  		{
  1376  			Namespace: structs.DefaultNamespace,
  1377  			TaskGroup: tg1.Name,
  1378  			JobID:     job.ID,
  1379  			Job:       job,
  1380  			ID:        uuid.Generate(),
  1381  			EvalID:    uuid.Generate(),
  1382  			NodeID:    nodes[1].ID,
  1383  		},
  1384  		{
  1385  			Namespace: structs.DefaultNamespace,
  1386  			TaskGroup: tg2.Name,
  1387  			JobID:     job.ID,
  1388  			Job:       job,
  1389  			ID:        uuid.Generate(),
  1390  			EvalID:    uuid.Generate(),
  1391  			NodeID:    nodes[1].ID,
  1392  		},
  1393  	}
  1394  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1395  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1396  	}
  1397  
  1398  	proposed := NewDistinctPropertyIterator(ctx, static)
  1399  	proposed.SetJob(job)
  1400  	proposed.SetTaskGroup(tg3)
  1401  	proposed.Reset()
  1402  
  1403  	out := collectFeasible(proposed)
  1404  	if len(out) != 0 {
  1405  		t.Fatalf("Bad: %#v", out)
  1406  	}
  1407  }
  1408  
  1409  // This test creates previous allocations selecting certain property values to
  1410  // test if it detects infeasibility of property values correctly and picks the
  1411  // only feasible one when the constraint is at the task group.
  1412  func TestDistinctPropertyIterator_TaskGroupDistinctProperty(t *testing.T) {
  1413  	state, ctx := testContext(t)
  1414  	nodes := []*structs.Node{
  1415  		mock.Node(),
  1416  		mock.Node(),
  1417  		mock.Node(),
  1418  	}
  1419  
  1420  	for i, n := range nodes {
  1421  		n.Meta["rack"] = fmt.Sprintf("%d", i)
  1422  
  1423  		// Add to state store
  1424  		if err := state.UpsertNode(uint64(100+i), n); err != nil {
  1425  			t.Fatalf("failed to upsert node: %v", err)
  1426  		}
  1427  	}
  1428  
  1429  	static := NewStaticIterator(ctx, nodes)
  1430  
  1431  	// Create a job with a task group with the distinct_property constraint
  1432  	tg1 := &structs.TaskGroup{
  1433  		Name: "example",
  1434  		Constraints: []*structs.Constraint{
  1435  			{
  1436  				Operand: structs.ConstraintDistinctProperty,
  1437  				LTarget: "${meta.rack}",
  1438  			},
  1439  		},
  1440  	}
  1441  	tg2 := &structs.TaskGroup{Name: "baz"}
  1442  
  1443  	job := &structs.Job{
  1444  		Namespace:  structs.DefaultNamespace,
  1445  		ID:         "foo",
  1446  		TaskGroups: []*structs.TaskGroup{tg1, tg2},
  1447  	}
  1448  
  1449  	// Add allocs placing tg1 on node1 and 2. This should make the
  1450  	// job unsatisfiable on all nodes but node3. Also mix the allocations
  1451  	// existing in the plan and the state store.
  1452  	plan := ctx.Plan()
  1453  	plan.NodeAllocation[nodes[0].ID] = []*structs.Allocation{
  1454  		{
  1455  			Namespace: structs.DefaultNamespace,
  1456  			TaskGroup: tg1.Name,
  1457  			JobID:     job.ID,
  1458  			Job:       job,
  1459  			ID:        uuid.Generate(),
  1460  			NodeID:    nodes[0].ID,
  1461  		},
  1462  	}
  1463  
  1464  	// Put an allocation on Node 3 but make it stopped in the plan
  1465  	stoppingAllocID := uuid.Generate()
  1466  	plan.NodeUpdate[nodes[2].ID] = []*structs.Allocation{
  1467  		{
  1468  			Namespace: structs.DefaultNamespace,
  1469  			TaskGroup: tg1.Name,
  1470  			JobID:     job.ID,
  1471  			Job:       job,
  1472  			ID:        stoppingAllocID,
  1473  			NodeID:    nodes[2].ID,
  1474  		},
  1475  	}
  1476  
  1477  	upserting := []*structs.Allocation{
  1478  		{
  1479  			Namespace: structs.DefaultNamespace,
  1480  			TaskGroup: tg1.Name,
  1481  			JobID:     job.ID,
  1482  			Job:       job,
  1483  			ID:        uuid.Generate(),
  1484  			EvalID:    uuid.Generate(),
  1485  			NodeID:    nodes[1].ID,
  1486  		},
  1487  
  1488  		// Should be ignored as it is a different job.
  1489  		{
  1490  			Namespace: structs.DefaultNamespace,
  1491  			TaskGroup: tg1.Name,
  1492  			JobID:     "ignore 2",
  1493  			Job:       job,
  1494  			ID:        uuid.Generate(),
  1495  			EvalID:    uuid.Generate(),
  1496  			NodeID:    nodes[2].ID,
  1497  		},
  1498  
  1499  		{
  1500  			Namespace: structs.DefaultNamespace,
  1501  			TaskGroup: tg1.Name,
  1502  			JobID:     job.ID,
  1503  			Job:       job,
  1504  			ID:        stoppingAllocID,
  1505  			EvalID:    uuid.Generate(),
  1506  			NodeID:    nodes[2].ID,
  1507  		},
  1508  	}
  1509  	if err := state.UpsertAllocs(1000, upserting); err != nil {
  1510  		t.Fatalf("failed to UpsertAllocs: %v", err)
  1511  	}
  1512  
  1513  	proposed := NewDistinctPropertyIterator(ctx, static)
  1514  	proposed.SetJob(job)
  1515  	proposed.SetTaskGroup(tg1)
  1516  	proposed.Reset()
  1517  
  1518  	out := collectFeasible(proposed)
  1519  	if len(out) != 1 {
  1520  		t.Fatalf("Bad: %#v", out)
  1521  	}
  1522  	if out[0].ID != nodes[2].ID {
  1523  		t.Fatalf("wrong node picked")
  1524  	}
  1525  
  1526  	// Since the other task group doesn't have the constraint, both nodes should
  1527  	// be feasible.
  1528  	proposed.SetTaskGroup(tg2)
  1529  	proposed.Reset()
  1530  
  1531  	out = collectFeasible(proposed)
  1532  	if len(out) != 3 {
  1533  		t.Fatalf("Bad: %#v", out)
  1534  	}
  1535  }
  1536  
  1537  func collectFeasible(iter FeasibleIterator) (out []*structs.Node) {
  1538  	for {
  1539  		next := iter.Next()
  1540  		if next == nil {
  1541  			break
  1542  		}
  1543  		out = append(out, next)
  1544  	}
  1545  	return
  1546  }
  1547  
  1548  // mockFeasibilityChecker is a FeasibilityChecker that returns predetermined
  1549  // feasibility values.
  1550  type mockFeasibilityChecker struct {
  1551  	retVals []bool
  1552  	i       int
  1553  }
  1554  
  1555  func newMockFeasibilityChecker(values ...bool) *mockFeasibilityChecker {
  1556  	return &mockFeasibilityChecker{retVals: values}
  1557  }
  1558  
  1559  func (c *mockFeasibilityChecker) Feasible(*structs.Node) bool {
  1560  	if c.i >= len(c.retVals) {
  1561  		c.i++
  1562  		return false
  1563  	}
  1564  
  1565  	f := c.retVals[c.i]
  1566  	c.i++
  1567  	return f
  1568  }
  1569  
  1570  // calls returns how many times the checker was called.
  1571  func (c *mockFeasibilityChecker) calls() int { return c.i }
  1572  
  1573  func TestFeasibilityWrapper_JobIneligible(t *testing.T) {
  1574  	_, ctx := testContext(t)
  1575  	nodes := []*structs.Node{mock.Node()}
  1576  	static := NewStaticIterator(ctx, nodes)
  1577  	mocked := newMockFeasibilityChecker(false)
  1578  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
  1579  
  1580  	// Set the job to ineligible
  1581  	ctx.Eligibility().SetJobEligibility(false, nodes[0].ComputedClass)
  1582  
  1583  	// Run the wrapper.
  1584  	out := collectFeasible(wrapper)
  1585  
  1586  	if out != nil || mocked.calls() != 0 {
  1587  		t.Fatalf("bad: %#v %d", out, mocked.calls())
  1588  	}
  1589  }
  1590  
  1591  func TestFeasibilityWrapper_JobEscapes(t *testing.T) {
  1592  	_, ctx := testContext(t)
  1593  	nodes := []*structs.Node{mock.Node()}
  1594  	static := NewStaticIterator(ctx, nodes)
  1595  	mocked := newMockFeasibilityChecker(false)
  1596  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{mocked}, nil)
  1597  
  1598  	// Set the job to escaped
  1599  	cc := nodes[0].ComputedClass
  1600  	ctx.Eligibility().job[cc] = EvalComputedClassEscaped
  1601  
  1602  	// Run the wrapper.
  1603  	out := collectFeasible(wrapper)
  1604  
  1605  	if out != nil || mocked.calls() != 1 {
  1606  		t.Fatalf("bad: %#v", out)
  1607  	}
  1608  
  1609  	// Ensure that the job status didn't change from escaped even though the
  1610  	// option failed.
  1611  	if status := ctx.Eligibility().JobStatus(cc); status != EvalComputedClassEscaped {
  1612  		t.Fatalf("job status is %v; want %v", status, EvalComputedClassEscaped)
  1613  	}
  1614  }
  1615  
  1616  func TestFeasibilityWrapper_JobAndTg_Eligible(t *testing.T) {
  1617  	_, ctx := testContext(t)
  1618  	nodes := []*structs.Node{mock.Node()}
  1619  	static := NewStaticIterator(ctx, nodes)
  1620  	jobMock := newMockFeasibilityChecker(true)
  1621  	tgMock := newMockFeasibilityChecker(false)
  1622  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1623  
  1624  	// Set the job to escaped
  1625  	cc := nodes[0].ComputedClass
  1626  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1627  	ctx.Eligibility().SetTaskGroupEligibility(true, "foo", cc)
  1628  	wrapper.SetTaskGroup("foo")
  1629  
  1630  	// Run the wrapper.
  1631  	out := collectFeasible(wrapper)
  1632  
  1633  	if out == nil || tgMock.calls() != 0 {
  1634  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1635  	}
  1636  }
  1637  
  1638  func TestFeasibilityWrapper_JobEligible_TgIneligible(t *testing.T) {
  1639  	_, ctx := testContext(t)
  1640  	nodes := []*structs.Node{mock.Node()}
  1641  	static := NewStaticIterator(ctx, nodes)
  1642  	jobMock := newMockFeasibilityChecker(true)
  1643  	tgMock := newMockFeasibilityChecker(false)
  1644  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1645  
  1646  	// Set the job to escaped
  1647  	cc := nodes[0].ComputedClass
  1648  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1649  	ctx.Eligibility().SetTaskGroupEligibility(false, "foo", cc)
  1650  	wrapper.SetTaskGroup("foo")
  1651  
  1652  	// Run the wrapper.
  1653  	out := collectFeasible(wrapper)
  1654  
  1655  	if out != nil || tgMock.calls() != 0 {
  1656  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1657  	}
  1658  }
  1659  
  1660  func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) {
  1661  	_, ctx := testContext(t)
  1662  	nodes := []*structs.Node{mock.Node()}
  1663  	static := NewStaticIterator(ctx, nodes)
  1664  	jobMock := newMockFeasibilityChecker(true)
  1665  	tgMock := newMockFeasibilityChecker(true)
  1666  	wrapper := NewFeasibilityWrapper(ctx, static, []FeasibilityChecker{jobMock}, []FeasibilityChecker{tgMock})
  1667  
  1668  	// Set the job to escaped
  1669  	cc := nodes[0].ComputedClass
  1670  	ctx.Eligibility().job[cc] = EvalComputedClassEligible
  1671  	ctx.Eligibility().taskGroups["foo"] =
  1672  		map[string]ComputedClassFeasibility{cc: EvalComputedClassEscaped}
  1673  	wrapper.SetTaskGroup("foo")
  1674  
  1675  	// Run the wrapper.
  1676  	out := collectFeasible(wrapper)
  1677  
  1678  	if out == nil || tgMock.calls() != 1 {
  1679  		t.Fatalf("bad: %#v %v", out, tgMock.calls())
  1680  	}
  1681  
  1682  	if e, ok := ctx.Eligibility().taskGroups["foo"][cc]; !ok || e != EvalComputedClassEscaped {
  1683  		t.Fatalf("bad: %v %v", e, ok)
  1684  	}
  1685  }
  1686  
  1687  func TestSetContainsAny(t *testing.T) {
  1688  	require.True(t, checkSetContainsAny("a", "a"))
  1689  	require.True(t, checkSetContainsAny("a,b", "a"))
  1690  	require.True(t, checkSetContainsAny("  a,b  ", "a "))
  1691  	require.True(t, checkSetContainsAny("a", "a"))
  1692  	require.False(t, checkSetContainsAny("b", "a"))
  1693  }
  1694  
  1695  func TestDeviceChecker(t *testing.T) {
  1696  	getTg := func(devices ...*structs.RequestedDevice) *structs.TaskGroup {
  1697  		return &structs.TaskGroup{
  1698  			Name: "example",
  1699  			Tasks: []*structs.Task{
  1700  				{
  1701  					Resources: &structs.Resources{
  1702  						Devices: devices,
  1703  					},
  1704  				},
  1705  			},
  1706  		}
  1707  	}
  1708  
  1709  	// Just type
  1710  	gpuTypeReq := &structs.RequestedDevice{
  1711  		Name:  "gpu",
  1712  		Count: 1,
  1713  	}
  1714  	fpgaTypeReq := &structs.RequestedDevice{
  1715  		Name:  "fpga",
  1716  		Count: 1,
  1717  	}
  1718  
  1719  	// vendor/type
  1720  	gpuVendorTypeReq := &structs.RequestedDevice{
  1721  		Name:  "nvidia/gpu",
  1722  		Count: 1,
  1723  	}
  1724  	fpgaVendorTypeReq := &structs.RequestedDevice{
  1725  		Name:  "nvidia/fpga",
  1726  		Count: 1,
  1727  	}
  1728  
  1729  	// vendor/type/model
  1730  	gpuFullReq := &structs.RequestedDevice{
  1731  		Name:  "nvidia/gpu/1080ti",
  1732  		Count: 1,
  1733  	}
  1734  	fpgaFullReq := &structs.RequestedDevice{
  1735  		Name:  "nvidia/fpga/F100",
  1736  		Count: 1,
  1737  	}
  1738  
  1739  	// Just type but high count
  1740  	gpuTypeHighCountReq := &structs.RequestedDevice{
  1741  		Name:  "gpu",
  1742  		Count: 3,
  1743  	}
  1744  
  1745  	getNode := func(devices ...*structs.NodeDeviceResource) *structs.Node {
  1746  		n := mock.Node()
  1747  		n.NodeResources.Devices = devices
  1748  		return n
  1749  	}
  1750  
  1751  	nvidia := &structs.NodeDeviceResource{
  1752  		Vendor: "nvidia",
  1753  		Type:   "gpu",
  1754  		Name:   "1080ti",
  1755  		Attributes: map[string]*psstructs.Attribute{
  1756  			"memory":        psstructs.NewIntAttribute(4, psstructs.UnitGiB),
  1757  			"pci_bandwidth": psstructs.NewIntAttribute(995, psstructs.UnitMiBPerS),
  1758  			"cores_clock":   psstructs.NewIntAttribute(800, psstructs.UnitMHz),
  1759  		},
  1760  		Instances: []*structs.NodeDevice{
  1761  			{
  1762  				ID:      uuid.Generate(),
  1763  				Healthy: true,
  1764  			},
  1765  			{
  1766  				ID:      uuid.Generate(),
  1767  				Healthy: true,
  1768  			},
  1769  		},
  1770  	}
  1771  
  1772  	nvidiaUnhealthy := &structs.NodeDeviceResource{
  1773  		Vendor: "nvidia",
  1774  		Type:   "gpu",
  1775  		Name:   "1080ti",
  1776  		Instances: []*structs.NodeDevice{
  1777  			{
  1778  				ID:      uuid.Generate(),
  1779  				Healthy: false,
  1780  			},
  1781  			{
  1782  				ID:      uuid.Generate(),
  1783  				Healthy: false,
  1784  			},
  1785  		},
  1786  	}
  1787  
  1788  	cases := []struct {
  1789  		Name             string
  1790  		Result           bool
  1791  		NodeDevices      []*structs.NodeDeviceResource
  1792  		RequestedDevices []*structs.RequestedDevice
  1793  	}{
  1794  		{
  1795  			Name:             "no devices on node",
  1796  			Result:           false,
  1797  			NodeDevices:      nil,
  1798  			RequestedDevices: []*structs.RequestedDevice{gpuTypeReq},
  1799  		},
  1800  		{
  1801  			Name:             "no requested devices on empty node",
  1802  			Result:           true,
  1803  			NodeDevices:      nil,
  1804  			RequestedDevices: nil,
  1805  		},
  1806  		{
  1807  			Name:             "gpu devices by type",
  1808  			Result:           true,
  1809  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1810  			RequestedDevices: []*structs.RequestedDevice{gpuTypeReq},
  1811  		},
  1812  		{
  1813  			Name:             "wrong devices by type",
  1814  			Result:           false,
  1815  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1816  			RequestedDevices: []*structs.RequestedDevice{fpgaTypeReq},
  1817  		},
  1818  		{
  1819  			Name:             "devices by type unhealthy node",
  1820  			Result:           false,
  1821  			NodeDevices:      []*structs.NodeDeviceResource{nvidiaUnhealthy},
  1822  			RequestedDevices: []*structs.RequestedDevice{gpuTypeReq},
  1823  		},
  1824  		{
  1825  			Name:             "gpu devices by vendor/type",
  1826  			Result:           true,
  1827  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1828  			RequestedDevices: []*structs.RequestedDevice{gpuVendorTypeReq},
  1829  		},
  1830  		{
  1831  			Name:             "wrong devices by vendor/type",
  1832  			Result:           false,
  1833  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1834  			RequestedDevices: []*structs.RequestedDevice{fpgaVendorTypeReq},
  1835  		},
  1836  		{
  1837  			Name:             "gpu devices by vendor/type/model",
  1838  			Result:           true,
  1839  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1840  			RequestedDevices: []*structs.RequestedDevice{gpuFullReq},
  1841  		},
  1842  		{
  1843  			Name:             "wrong devices by vendor/type/model",
  1844  			Result:           false,
  1845  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1846  			RequestedDevices: []*structs.RequestedDevice{fpgaFullReq},
  1847  		},
  1848  		{
  1849  			Name:             "too many requested",
  1850  			Result:           false,
  1851  			NodeDevices:      []*structs.NodeDeviceResource{nvidia},
  1852  			RequestedDevices: []*structs.RequestedDevice{gpuTypeHighCountReq},
  1853  		},
  1854  		{
  1855  			Name:        "meets constraints requirement",
  1856  			Result:      true,
  1857  			NodeDevices: []*structs.NodeDeviceResource{nvidia},
  1858  			RequestedDevices: []*structs.RequestedDevice{
  1859  				{
  1860  					Name:  "nvidia/gpu",
  1861  					Count: 1,
  1862  					Constraints: []*structs.Constraint{
  1863  						{
  1864  							Operand: "=",
  1865  							LTarget: "${device.model}",
  1866  							RTarget: "1080ti",
  1867  						},
  1868  						{
  1869  							Operand: ">",
  1870  							LTarget: "${device.attr.memory}",
  1871  							RTarget: "1320.5 MB",
  1872  						},
  1873  						{
  1874  							Operand: "<=",
  1875  							LTarget: "${device.attr.pci_bandwidth}",
  1876  							RTarget: ".98   GiB/s",
  1877  						},
  1878  						{
  1879  							Operand: "=",
  1880  							LTarget: "${device.attr.cores_clock}",
  1881  							RTarget: "800MHz",
  1882  						},
  1883  					},
  1884  				},
  1885  			},
  1886  		},
  1887  		{
  1888  			Name:        "meets constraints requirement multiple count",
  1889  			Result:      true,
  1890  			NodeDevices: []*structs.NodeDeviceResource{nvidia},
  1891  			RequestedDevices: []*structs.RequestedDevice{
  1892  				{
  1893  					Name:  "nvidia/gpu",
  1894  					Count: 2,
  1895  					Constraints: []*structs.Constraint{
  1896  						{
  1897  							Operand: "=",
  1898  							LTarget: "${device.model}",
  1899  							RTarget: "1080ti",
  1900  						},
  1901  						{
  1902  							Operand: ">",
  1903  							LTarget: "${device.attr.memory}",
  1904  							RTarget: "1320.5 MB",
  1905  						},
  1906  						{
  1907  							Operand: "<=",
  1908  							LTarget: "${device.attr.pci_bandwidth}",
  1909  							RTarget: ".98   GiB/s",
  1910  						},
  1911  						{
  1912  							Operand: "=",
  1913  							LTarget: "${device.attr.cores_clock}",
  1914  							RTarget: "800MHz",
  1915  						},
  1916  					},
  1917  				},
  1918  			},
  1919  		},
  1920  		{
  1921  			Name:        "meets constraints requirement over count",
  1922  			Result:      false,
  1923  			NodeDevices: []*structs.NodeDeviceResource{nvidia},
  1924  			RequestedDevices: []*structs.RequestedDevice{
  1925  				{
  1926  					Name:  "nvidia/gpu",
  1927  					Count: 5,
  1928  					Constraints: []*structs.Constraint{
  1929  						{
  1930  							Operand: "=",
  1931  							LTarget: "${device.model}",
  1932  							RTarget: "1080ti",
  1933  						},
  1934  						{
  1935  							Operand: ">",
  1936  							LTarget: "${device.attr.memory}",
  1937  							RTarget: "1320.5 MB",
  1938  						},
  1939  						{
  1940  							Operand: "<=",
  1941  							LTarget: "${device.attr.pci_bandwidth}",
  1942  							RTarget: ".98   GiB/s",
  1943  						},
  1944  						{
  1945  							Operand: "=",
  1946  							LTarget: "${device.attr.cores_clock}",
  1947  							RTarget: "800MHz",
  1948  						},
  1949  					},
  1950  				},
  1951  			},
  1952  		},
  1953  		{
  1954  			Name:        "does not meet first constraint",
  1955  			Result:      false,
  1956  			NodeDevices: []*structs.NodeDeviceResource{nvidia},
  1957  			RequestedDevices: []*structs.RequestedDevice{
  1958  				{
  1959  					Name:  "nvidia/gpu",
  1960  					Count: 1,
  1961  					Constraints: []*structs.Constraint{
  1962  						{
  1963  							Operand: "=",
  1964  							LTarget: "${device.model}",
  1965  							RTarget: "2080ti",
  1966  						},
  1967  						{
  1968  							Operand: ">",
  1969  							LTarget: "${device.attr.memory}",
  1970  							RTarget: "1320.5 MB",
  1971  						},
  1972  						{
  1973  							Operand: "<=",
  1974  							LTarget: "${device.attr.pci_bandwidth}",
  1975  							RTarget: ".98   GiB/s",
  1976  						},
  1977  						{
  1978  							Operand: "=",
  1979  							LTarget: "${device.attr.cores_clock}",
  1980  							RTarget: "800MHz",
  1981  						},
  1982  					},
  1983  				},
  1984  			},
  1985  		},
  1986  		{
  1987  			Name:        "does not meet second constraint",
  1988  			Result:      false,
  1989  			NodeDevices: []*structs.NodeDeviceResource{nvidia},
  1990  			RequestedDevices: []*structs.RequestedDevice{
  1991  				{
  1992  					Name:  "nvidia/gpu",
  1993  					Count: 1,
  1994  					Constraints: []*structs.Constraint{
  1995  						{
  1996  							Operand: "=",
  1997  							LTarget: "${device.model}",
  1998  							RTarget: "1080ti",
  1999  						},
  2000  						{
  2001  							Operand: "<",
  2002  							LTarget: "${device.attr.memory}",
  2003  							RTarget: "1320.5 MB",
  2004  						},
  2005  						{
  2006  							Operand: "<=",
  2007  							LTarget: "${device.attr.pci_bandwidth}",
  2008  							RTarget: ".98   GiB/s",
  2009  						},
  2010  						{
  2011  							Operand: "=",
  2012  							LTarget: "${device.attr.cores_clock}",
  2013  							RTarget: "800MHz",
  2014  						},
  2015  					},
  2016  				},
  2017  			},
  2018  		},
  2019  	}
  2020  
  2021  	for _, c := range cases {
  2022  		t.Run(c.Name, func(t *testing.T) {
  2023  			_, ctx := testContext(t)
  2024  			checker := NewDeviceChecker(ctx)
  2025  			checker.SetTaskGroup(getTg(c.RequestedDevices...))
  2026  			if act := checker.Feasible(getNode(c.NodeDevices...)); act != c.Result {
  2027  				t.Fatalf("got %v; want %v", act, c.Result)
  2028  			}
  2029  		})
  2030  	}
  2031  }
  2032  
  2033  func TestCheckAttributeConstraint(t *testing.T) {
  2034  	type tcase struct {
  2035  		op         string
  2036  		lVal, rVal *psstructs.Attribute
  2037  		result     bool
  2038  	}
  2039  	cases := []tcase{
  2040  		{
  2041  			op:     "=",
  2042  			lVal:   psstructs.NewStringAttribute("foo"),
  2043  			rVal:   psstructs.NewStringAttribute("foo"),
  2044  			result: true,
  2045  		},
  2046  		{
  2047  			op:     "=",
  2048  			lVal:   nil,
  2049  			rVal:   nil,
  2050  			result: false,
  2051  		},
  2052  		{
  2053  			op:     "is",
  2054  			lVal:   psstructs.NewStringAttribute("foo"),
  2055  			rVal:   psstructs.NewStringAttribute("foo"),
  2056  			result: true,
  2057  		},
  2058  		{
  2059  			op:     "==",
  2060  			lVal:   psstructs.NewStringAttribute("foo"),
  2061  			rVal:   psstructs.NewStringAttribute("foo"),
  2062  			result: true,
  2063  		},
  2064  		{
  2065  			op:     "!=",
  2066  			lVal:   psstructs.NewStringAttribute("foo"),
  2067  			rVal:   psstructs.NewStringAttribute("foo"),
  2068  			result: false,
  2069  		},
  2070  		{
  2071  			op:     "!=",
  2072  			lVal:   nil,
  2073  			rVal:   psstructs.NewStringAttribute("foo"),
  2074  			result: true,
  2075  		},
  2076  		{
  2077  			op:     "!=",
  2078  			lVal:   psstructs.NewStringAttribute("foo"),
  2079  			rVal:   nil,
  2080  			result: true,
  2081  		},
  2082  		{
  2083  			op:     "!=",
  2084  			lVal:   psstructs.NewStringAttribute("foo"),
  2085  			rVal:   psstructs.NewStringAttribute("bar"),
  2086  			result: true,
  2087  		},
  2088  		{
  2089  			op:     "not",
  2090  			lVal:   psstructs.NewStringAttribute("foo"),
  2091  			rVal:   psstructs.NewStringAttribute("bar"),
  2092  			result: true,
  2093  		},
  2094  		{
  2095  			op:     structs.ConstraintVersion,
  2096  			lVal:   psstructs.NewStringAttribute("1.2.3"),
  2097  			rVal:   psstructs.NewStringAttribute("~> 1.0"),
  2098  			result: true,
  2099  		},
  2100  		{
  2101  			op:     structs.ConstraintRegex,
  2102  			lVal:   psstructs.NewStringAttribute("foobarbaz"),
  2103  			rVal:   psstructs.NewStringAttribute("[\\w]+"),
  2104  			result: true,
  2105  		},
  2106  		{
  2107  			op:     "<",
  2108  			lVal:   psstructs.NewStringAttribute("foo"),
  2109  			rVal:   psstructs.NewStringAttribute("bar"),
  2110  			result: false,
  2111  		},
  2112  		{
  2113  			op:     structs.ConstraintSetContains,
  2114  			lVal:   psstructs.NewStringAttribute("foo,bar,baz"),
  2115  			rVal:   psstructs.NewStringAttribute("foo,  bar  "),
  2116  			result: true,
  2117  		},
  2118  		{
  2119  			op:     structs.ConstraintSetContainsAll,
  2120  			lVal:   psstructs.NewStringAttribute("foo,bar,baz"),
  2121  			rVal:   psstructs.NewStringAttribute("foo,  bar  "),
  2122  			result: true,
  2123  		},
  2124  		{
  2125  			op:     structs.ConstraintSetContains,
  2126  			lVal:   psstructs.NewStringAttribute("foo,bar,baz"),
  2127  			rVal:   psstructs.NewStringAttribute("foo,bam"),
  2128  			result: false,
  2129  		},
  2130  		{
  2131  			op:     structs.ConstraintSetContainsAny,
  2132  			lVal:   psstructs.NewStringAttribute("foo,bar,baz"),
  2133  			rVal:   psstructs.NewStringAttribute("foo,bam"),
  2134  			result: true,
  2135  		},
  2136  		{
  2137  			op:     structs.ConstraintAttributeIsSet,
  2138  			lVal:   psstructs.NewStringAttribute("foo,bar,baz"),
  2139  			result: true,
  2140  		},
  2141  		{
  2142  			op:     structs.ConstraintAttributeIsSet,
  2143  			lVal:   nil,
  2144  			result: false,
  2145  		},
  2146  		{
  2147  			op:     structs.ConstraintAttributeIsNotSet,
  2148  			lVal:   psstructs.NewStringAttribute("foo,bar,baz"),
  2149  			result: false,
  2150  		},
  2151  		{
  2152  			op:     structs.ConstraintAttributeIsNotSet,
  2153  			lVal:   nil,
  2154  			result: true,
  2155  		},
  2156  	}
  2157  
  2158  	for _, tc := range cases {
  2159  		_, ctx := testContext(t)
  2160  		if res := checkAttributeConstraint(ctx, tc.op, tc.lVal, tc.rVal, tc.lVal != nil, tc.rVal != nil); res != tc.result {
  2161  			t.Fatalf("TC: %#v, Result: %v", tc, res)
  2162  		}
  2163  	}
  2164  }