github.com/banmanh482/nomad@v0.11.8/scheduler/rank_test.go (about)

     1  package scheduler
     2  
     3  import (
     4  	"sort"
     5  	"testing"
     6  
     7  	"github.com/hashicorp/nomad/helper/uuid"
     8  	"github.com/hashicorp/nomad/nomad/mock"
     9  	"github.com/hashicorp/nomad/nomad/structs"
    10  	"github.com/stretchr/testify/require"
    11  )
    12  
    13  func TestFeasibleRankIterator(t *testing.T) {
    14  	_, ctx := testContext(t)
    15  	var nodes []*structs.Node
    16  	for i := 0; i < 10; i++ {
    17  		nodes = append(nodes, mock.Node())
    18  	}
    19  	static := NewStaticIterator(ctx, nodes)
    20  
    21  	feasible := NewFeasibleRankIterator(ctx, static)
    22  
    23  	out := collectRanked(feasible)
    24  	if len(out) != len(nodes) {
    25  		t.Fatalf("bad: %v", out)
    26  	}
    27  }
    28  
    29  func TestBinPackIterator_NoExistingAlloc(t *testing.T) {
    30  	_, ctx := testContext(t)
    31  	nodes := []*RankedNode{
    32  		{
    33  			Node: &structs.Node{
    34  				// Perfect fit
    35  				NodeResources: &structs.NodeResources{
    36  					Cpu: structs.NodeCpuResources{
    37  						CpuShares: 2048,
    38  					},
    39  					Memory: structs.NodeMemoryResources{
    40  						MemoryMB: 2048,
    41  					},
    42  				},
    43  				ReservedResources: &structs.NodeReservedResources{
    44  					Cpu: structs.NodeReservedCpuResources{
    45  						CpuShares: 1024,
    46  					},
    47  					Memory: structs.NodeReservedMemoryResources{
    48  						MemoryMB: 1024,
    49  					},
    50  				},
    51  			},
    52  		},
    53  		{
    54  			Node: &structs.Node{
    55  				// Overloaded
    56  				NodeResources: &structs.NodeResources{
    57  					Cpu: structs.NodeCpuResources{
    58  						CpuShares: 1024,
    59  					},
    60  					Memory: structs.NodeMemoryResources{
    61  						MemoryMB: 1024,
    62  					},
    63  				},
    64  				ReservedResources: &structs.NodeReservedResources{
    65  					Cpu: structs.NodeReservedCpuResources{
    66  						CpuShares: 512,
    67  					},
    68  					Memory: structs.NodeReservedMemoryResources{
    69  						MemoryMB: 512,
    70  					},
    71  				},
    72  			},
    73  		},
    74  		{
    75  			Node: &structs.Node{
    76  				// 50% fit
    77  				NodeResources: &structs.NodeResources{
    78  					Cpu: structs.NodeCpuResources{
    79  						CpuShares: 4096,
    80  					},
    81  					Memory: structs.NodeMemoryResources{
    82  						MemoryMB: 4096,
    83  					},
    84  				},
    85  				ReservedResources: &structs.NodeReservedResources{
    86  					Cpu: structs.NodeReservedCpuResources{
    87  						CpuShares: 1024,
    88  					},
    89  					Memory: structs.NodeReservedMemoryResources{
    90  						MemoryMB: 1024,
    91  					},
    92  				},
    93  			},
    94  		},
    95  	}
    96  	static := NewStaticRankIterator(ctx, nodes)
    97  
    98  	taskGroup := &structs.TaskGroup{
    99  		EphemeralDisk: &structs.EphemeralDisk{},
   100  		Tasks: []*structs.Task{
   101  			{
   102  				Name: "web",
   103  				Resources: &structs.Resources{
   104  					CPU:      1024,
   105  					MemoryMB: 1024,
   106  				},
   107  			},
   108  		},
   109  	}
   110  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   111  	binp.SetTaskGroup(taskGroup)
   112  
   113  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   114  
   115  	out := collectRanked(scoreNorm)
   116  	if len(out) != 2 {
   117  		t.Fatalf("Bad: %v", out)
   118  	}
   119  	if out[0] != nodes[0] || out[1] != nodes[2] {
   120  		t.Fatalf("Bad: %v", out)
   121  	}
   122  
   123  	if out[0].FinalScore != 1.0 {
   124  		t.Fatalf("Bad Score: %v", out[0].FinalScore)
   125  	}
   126  	if out[1].FinalScore < 0.50 || out[1].FinalScore > 0.60 {
   127  		t.Fatalf("Bad Score: %v", out[1].FinalScore)
   128  	}
   129  }
   130  
   131  // TestBinPackIterator_NoExistingAlloc_MixedReserve asserts that node's with
   132  // reserved resources are scored equivalent to as if they had a lower amount of
   133  // resources.
   134  func TestBinPackIterator_NoExistingAlloc_MixedReserve(t *testing.T) {
   135  	_, ctx := testContext(t)
   136  	nodes := []*RankedNode{
   137  		{
   138  			// Best fit
   139  			Node: &structs.Node{
   140  				Name: "no-reserved",
   141  				NodeResources: &structs.NodeResources{
   142  					Cpu: structs.NodeCpuResources{
   143  						CpuShares: 1100,
   144  					},
   145  					Memory: structs.NodeMemoryResources{
   146  						MemoryMB: 1100,
   147  					},
   148  				},
   149  			},
   150  		},
   151  		{
   152  			// Not best fit if reserve is calculated properly
   153  			Node: &structs.Node{
   154  				Name: "reserved",
   155  				NodeResources: &structs.NodeResources{
   156  					Cpu: structs.NodeCpuResources{
   157  						CpuShares: 2000,
   158  					},
   159  					Memory: structs.NodeMemoryResources{
   160  						MemoryMB: 2000,
   161  					},
   162  				},
   163  				ReservedResources: &structs.NodeReservedResources{
   164  					Cpu: structs.NodeReservedCpuResources{
   165  						CpuShares: 800,
   166  					},
   167  					Memory: structs.NodeReservedMemoryResources{
   168  						MemoryMB: 800,
   169  					},
   170  				},
   171  			},
   172  		},
   173  		{
   174  			// Even worse fit due to reservations
   175  			Node: &structs.Node{
   176  				Name: "reserved2",
   177  				NodeResources: &structs.NodeResources{
   178  					Cpu: structs.NodeCpuResources{
   179  						CpuShares: 2000,
   180  					},
   181  					Memory: structs.NodeMemoryResources{
   182  						MemoryMB: 2000,
   183  					},
   184  				},
   185  				ReservedResources: &structs.NodeReservedResources{
   186  					Cpu: structs.NodeReservedCpuResources{
   187  						CpuShares: 500,
   188  					},
   189  					Memory: structs.NodeReservedMemoryResources{
   190  						MemoryMB: 500,
   191  					},
   192  				},
   193  			},
   194  		},
   195  		{
   196  			Node: &structs.Node{
   197  				Name: "overloaded",
   198  				NodeResources: &structs.NodeResources{
   199  					Cpu: structs.NodeCpuResources{
   200  						CpuShares: 900,
   201  					},
   202  					Memory: structs.NodeMemoryResources{
   203  						MemoryMB: 900,
   204  					},
   205  				},
   206  			},
   207  		},
   208  	}
   209  	static := NewStaticRankIterator(ctx, nodes)
   210  
   211  	taskGroup := &structs.TaskGroup{
   212  		EphemeralDisk: &structs.EphemeralDisk{},
   213  		Tasks: []*structs.Task{
   214  			{
   215  				Name: "web",
   216  				Resources: &structs.Resources{
   217  					CPU:      1000,
   218  					MemoryMB: 1000,
   219  				},
   220  			},
   221  		},
   222  	}
   223  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   224  	binp.SetTaskGroup(taskGroup)
   225  
   226  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   227  
   228  	out := collectRanked(scoreNorm)
   229  
   230  	// Sort descending (highest score to lowest) and log for debugging
   231  	sort.Slice(out, func(i, j int) bool { return out[i].FinalScore > out[j].FinalScore })
   232  	for i := range out {
   233  		t.Logf("Node: %-12s Score: %-1.4f", out[i].Node.Name, out[i].FinalScore)
   234  	}
   235  
   236  	// 3 nodes should be feasible
   237  	require.Len(t, out, 3)
   238  
   239  	// Node without reservations is the best fit
   240  	require.Equal(t, nodes[0].Node.Name, out[0].Node.Name)
   241  
   242  	// Node with smallest remaining resources ("best fit") should get a
   243  	// higher score than node with more remaining resources ("worse fit")
   244  	require.Equal(t, nodes[1].Node.Name, out[1].Node.Name)
   245  	require.Equal(t, nodes[2].Node.Name, out[2].Node.Name)
   246  }
   247  
   248  // Tests bin packing iterator with network resources at task and task group level
   249  func TestBinPackIterator_Network_Success(t *testing.T) {
   250  	_, ctx := testContext(t)
   251  	nodes := []*RankedNode{
   252  		{
   253  			Node: &structs.Node{
   254  				// Perfect fit
   255  				NodeResources: &structs.NodeResources{
   256  					Cpu: structs.NodeCpuResources{
   257  						CpuShares: 2048,
   258  					},
   259  					Memory: structs.NodeMemoryResources{
   260  						MemoryMB: 2048,
   261  					},
   262  					Networks: []*structs.NetworkResource{
   263  						{
   264  							Mode:   "host",
   265  							Device: "eth0",
   266  							CIDR:   "192.168.0.100/32",
   267  							MBits:  1000,
   268  						},
   269  					},
   270  				},
   271  				ReservedResources: &structs.NodeReservedResources{
   272  					Cpu: structs.NodeReservedCpuResources{
   273  						CpuShares: 1024,
   274  					},
   275  					Memory: structs.NodeReservedMemoryResources{
   276  						MemoryMB: 1024,
   277  					},
   278  					Networks: structs.NodeReservedNetworkResources{
   279  						ReservedHostPorts: "1000-2000",
   280  					},
   281  				},
   282  			},
   283  		},
   284  		{
   285  			Node: &structs.Node{
   286  				// 50% fit
   287  				NodeResources: &structs.NodeResources{
   288  					Cpu: structs.NodeCpuResources{
   289  						CpuShares: 4096,
   290  					},
   291  					Memory: structs.NodeMemoryResources{
   292  						MemoryMB: 4096,
   293  					},
   294  					Networks: []*structs.NetworkResource{
   295  						{
   296  							Mode:   "host",
   297  							Device: "eth0",
   298  							CIDR:   "192.168.0.100/32",
   299  							MBits:  1000,
   300  						},
   301  					},
   302  				},
   303  				ReservedResources: &structs.NodeReservedResources{
   304  					Cpu: structs.NodeReservedCpuResources{
   305  						CpuShares: 1024,
   306  					},
   307  					Memory: structs.NodeReservedMemoryResources{
   308  						MemoryMB: 1024,
   309  					},
   310  					Networks: structs.NodeReservedNetworkResources{
   311  						ReservedHostPorts: "1000-2000",
   312  					},
   313  				},
   314  			},
   315  		},
   316  	}
   317  	static := NewStaticRankIterator(ctx, nodes)
   318  
   319  	// Create a task group with networks specified at task and task group level
   320  	taskGroup := &structs.TaskGroup{
   321  		EphemeralDisk: &structs.EphemeralDisk{},
   322  		Tasks: []*structs.Task{
   323  			{
   324  				Name: "web",
   325  				Resources: &structs.Resources{
   326  					CPU:      1024,
   327  					MemoryMB: 1024,
   328  					Networks: []*structs.NetworkResource{
   329  						{
   330  							Device: "eth0",
   331  							MBits:  300,
   332  						},
   333  					},
   334  				},
   335  			},
   336  		},
   337  		Networks: []*structs.NetworkResource{
   338  			{
   339  				Device: "eth0",
   340  				MBits:  500,
   341  			},
   342  		},
   343  	}
   344  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   345  	binp.SetTaskGroup(taskGroup)
   346  
   347  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   348  
   349  	out := collectRanked(scoreNorm)
   350  	require := require.New(t)
   351  
   352  	// We expect both nodes to be eligible to place
   353  	require.Len(out, 2)
   354  	require.Equal(out[0], nodes[0])
   355  	require.Equal(out[1], nodes[1])
   356  
   357  	// First node should have a perfect score
   358  	require.Equal(1.0, out[0].FinalScore)
   359  
   360  	if out[1].FinalScore < 0.50 || out[1].FinalScore > 0.60 {
   361  		t.Fatalf("Bad Score: %v", out[1].FinalScore)
   362  	}
   363  
   364  	// Verify network information at taskgroup level
   365  	require.Equal(500, out[0].AllocResources.Networks[0].MBits)
   366  	require.Equal(500, out[1].AllocResources.Networks[0].MBits)
   367  
   368  	// Verify network information at task level
   369  	require.Equal(300, out[0].TaskResources["web"].Networks[0].MBits)
   370  	require.Equal(300, out[1].TaskResources["web"].Networks[0].MBits)
   371  }
   372  
   373  // Tests that bin packing iterator fails due to overprovisioning of network
   374  // This test has network resources at task group and task level
   375  func TestBinPackIterator_Network_Failure(t *testing.T) {
   376  	_, ctx := testContext(t)
   377  	nodes := []*RankedNode{
   378  		{
   379  			Node: &structs.Node{
   380  				// 50% fit
   381  				NodeResources: &structs.NodeResources{
   382  					Cpu: structs.NodeCpuResources{
   383  						CpuShares: 4096,
   384  					},
   385  					Memory: structs.NodeMemoryResources{
   386  						MemoryMB: 4096,
   387  					},
   388  					Networks: []*structs.NetworkResource{
   389  						{
   390  							Mode:   "host",
   391  							Device: "eth0",
   392  							CIDR:   "192.168.0.100/32",
   393  							MBits:  1000,
   394  						},
   395  					},
   396  				},
   397  				ReservedResources: &structs.NodeReservedResources{
   398  					Cpu: structs.NodeReservedCpuResources{
   399  						CpuShares: 1024,
   400  					},
   401  					Memory: structs.NodeReservedMemoryResources{
   402  						MemoryMB: 1024,
   403  					},
   404  					Networks: structs.NodeReservedNetworkResources{
   405  						ReservedHostPorts: "1000-2000",
   406  					},
   407  				},
   408  			},
   409  		},
   410  	}
   411  
   412  	// Add a planned alloc that takes up some network mbits at task and task group level
   413  	plan := ctx.Plan()
   414  	plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{
   415  		{
   416  			AllocatedResources: &structs.AllocatedResources{
   417  				Tasks: map[string]*structs.AllocatedTaskResources{
   418  					"web": {
   419  						Cpu: structs.AllocatedCpuResources{
   420  							CpuShares: 2048,
   421  						},
   422  						Memory: structs.AllocatedMemoryResources{
   423  							MemoryMB: 2048,
   424  						},
   425  						Networks: []*structs.NetworkResource{
   426  							{
   427  								Device: "eth0",
   428  								IP:     "192.168.0.1",
   429  								MBits:  300,
   430  							},
   431  						},
   432  					},
   433  				},
   434  				Shared: structs.AllocatedSharedResources{
   435  					Networks: []*structs.NetworkResource{
   436  						{
   437  							Device: "eth0",
   438  							IP:     "192.168.0.1",
   439  							MBits:  400,
   440  						},
   441  					},
   442  				},
   443  			},
   444  		},
   445  	}
   446  	static := NewStaticRankIterator(ctx, nodes)
   447  
   448  	// Create a task group with networks specified at task and task group level
   449  	taskGroup := &structs.TaskGroup{
   450  		EphemeralDisk: &structs.EphemeralDisk{},
   451  		Tasks: []*structs.Task{
   452  			{
   453  				Name: "web",
   454  				Resources: &structs.Resources{
   455  					CPU:      1024,
   456  					MemoryMB: 1024,
   457  					Networks: []*structs.NetworkResource{
   458  						{
   459  							Device: "eth0",
   460  							MBits:  300,
   461  						},
   462  					},
   463  				},
   464  			},
   465  		},
   466  		Networks: []*structs.NetworkResource{
   467  			{
   468  				Device: "eth0",
   469  				MBits:  250,
   470  			},
   471  		},
   472  	}
   473  
   474  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   475  	binp.SetTaskGroup(taskGroup)
   476  
   477  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   478  
   479  	out := collectRanked(scoreNorm)
   480  	require := require.New(t)
   481  
   482  	// We expect a placement failure because we need 800 mbits of network
   483  	// and only 300 is free
   484  	require.Len(out, 0)
   485  	require.Equal(1, ctx.metrics.DimensionExhausted["network: bandwidth exceeded"])
   486  }
   487  
   488  func TestBinPackIterator_PlannedAlloc(t *testing.T) {
   489  	_, ctx := testContext(t)
   490  	nodes := []*RankedNode{
   491  		{
   492  			Node: &structs.Node{
   493  				// Perfect fit
   494  				ID: uuid.Generate(),
   495  				NodeResources: &structs.NodeResources{
   496  					Cpu: structs.NodeCpuResources{
   497  						CpuShares: 2048,
   498  					},
   499  					Memory: structs.NodeMemoryResources{
   500  						MemoryMB: 2048,
   501  					},
   502  				},
   503  			},
   504  		},
   505  		{
   506  			Node: &structs.Node{
   507  				// Perfect fit
   508  				ID: uuid.Generate(),
   509  				NodeResources: &structs.NodeResources{
   510  					Cpu: structs.NodeCpuResources{
   511  						CpuShares: 2048,
   512  					},
   513  					Memory: structs.NodeMemoryResources{
   514  						MemoryMB: 2048,
   515  					},
   516  				},
   517  			},
   518  		},
   519  	}
   520  	static := NewStaticRankIterator(ctx, nodes)
   521  
   522  	// Add a planned alloc to node1 that fills it
   523  	plan := ctx.Plan()
   524  	plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{
   525  		{
   526  			AllocatedResources: &structs.AllocatedResources{
   527  				Tasks: map[string]*structs.AllocatedTaskResources{
   528  					"web": {
   529  						Cpu: structs.AllocatedCpuResources{
   530  							CpuShares: 2048,
   531  						},
   532  						Memory: structs.AllocatedMemoryResources{
   533  							MemoryMB: 2048,
   534  						},
   535  					},
   536  				},
   537  			},
   538  		},
   539  	}
   540  
   541  	// Add a planned alloc to node2 that half fills it
   542  	plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
   543  		{
   544  			AllocatedResources: &structs.AllocatedResources{
   545  				Tasks: map[string]*structs.AllocatedTaskResources{
   546  					"web": {
   547  						Cpu: structs.AllocatedCpuResources{
   548  							CpuShares: 1024,
   549  						},
   550  						Memory: structs.AllocatedMemoryResources{
   551  							MemoryMB: 1024,
   552  						},
   553  					},
   554  				},
   555  			},
   556  		},
   557  	}
   558  
   559  	taskGroup := &structs.TaskGroup{
   560  		EphemeralDisk: &structs.EphemeralDisk{},
   561  		Tasks: []*structs.Task{
   562  			{
   563  				Name: "web",
   564  				Resources: &structs.Resources{
   565  					CPU:      1024,
   566  					MemoryMB: 1024,
   567  				},
   568  			},
   569  		},
   570  	}
   571  
   572  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   573  	binp.SetTaskGroup(taskGroup)
   574  
   575  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   576  
   577  	out := collectRanked(scoreNorm)
   578  	if len(out) != 1 {
   579  		t.Fatalf("Bad: %#v", out)
   580  	}
   581  	if out[0] != nodes[1] {
   582  		t.Fatalf("Bad Score: %v", out)
   583  	}
   584  
   585  	if out[0].FinalScore != 1.0 {
   586  		t.Fatalf("Bad Score: %v", out[0].FinalScore)
   587  	}
   588  }
   589  
   590  func TestBinPackIterator_ExistingAlloc(t *testing.T) {
   591  	state, ctx := testContext(t)
   592  	nodes := []*RankedNode{
   593  		{
   594  			Node: &structs.Node{
   595  				// Perfect fit
   596  				ID: uuid.Generate(),
   597  				NodeResources: &structs.NodeResources{
   598  					Cpu: structs.NodeCpuResources{
   599  						CpuShares: 2048,
   600  					},
   601  					Memory: structs.NodeMemoryResources{
   602  						MemoryMB: 2048,
   603  					},
   604  				},
   605  			},
   606  		},
   607  		{
   608  			Node: &structs.Node{
   609  				// Perfect fit
   610  				ID: uuid.Generate(),
   611  				NodeResources: &structs.NodeResources{
   612  					Cpu: structs.NodeCpuResources{
   613  						CpuShares: 2048,
   614  					},
   615  					Memory: structs.NodeMemoryResources{
   616  						MemoryMB: 2048,
   617  					},
   618  				},
   619  			},
   620  		},
   621  	}
   622  	static := NewStaticRankIterator(ctx, nodes)
   623  
   624  	// Add existing allocations
   625  	j1, j2 := mock.Job(), mock.Job()
   626  	alloc1 := &structs.Allocation{
   627  		Namespace: structs.DefaultNamespace,
   628  		ID:        uuid.Generate(),
   629  		EvalID:    uuid.Generate(),
   630  		NodeID:    nodes[0].Node.ID,
   631  		JobID:     j1.ID,
   632  		Job:       j1,
   633  		AllocatedResources: &structs.AllocatedResources{
   634  			Tasks: map[string]*structs.AllocatedTaskResources{
   635  				"web": {
   636  					Cpu: structs.AllocatedCpuResources{
   637  						CpuShares: 2048,
   638  					},
   639  					Memory: structs.AllocatedMemoryResources{
   640  						MemoryMB: 2048,
   641  					},
   642  				},
   643  			},
   644  		},
   645  		DesiredStatus: structs.AllocDesiredStatusRun,
   646  		ClientStatus:  structs.AllocClientStatusPending,
   647  		TaskGroup:     "web",
   648  	}
   649  	alloc2 := &structs.Allocation{
   650  		Namespace: structs.DefaultNamespace,
   651  		ID:        uuid.Generate(),
   652  		EvalID:    uuid.Generate(),
   653  		NodeID:    nodes[1].Node.ID,
   654  		JobID:     j2.ID,
   655  		Job:       j2,
   656  		AllocatedResources: &structs.AllocatedResources{
   657  			Tasks: map[string]*structs.AllocatedTaskResources{
   658  				"web": {
   659  					Cpu: structs.AllocatedCpuResources{
   660  						CpuShares: 1024,
   661  					},
   662  					Memory: structs.AllocatedMemoryResources{
   663  						MemoryMB: 1024,
   664  					},
   665  				},
   666  			},
   667  		},
   668  		DesiredStatus: structs.AllocDesiredStatusRun,
   669  		ClientStatus:  structs.AllocClientStatusPending,
   670  		TaskGroup:     "web",
   671  	}
   672  	require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
   673  	require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
   674  	require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
   675  
   676  	taskGroup := &structs.TaskGroup{
   677  		EphemeralDisk: &structs.EphemeralDisk{},
   678  		Tasks: []*structs.Task{
   679  			{
   680  				Name: "web",
   681  				Resources: &structs.Resources{
   682  					CPU:      1024,
   683  					MemoryMB: 1024,
   684  				},
   685  			},
   686  		},
   687  	}
   688  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   689  	binp.SetTaskGroup(taskGroup)
   690  
   691  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   692  
   693  	out := collectRanked(scoreNorm)
   694  	if len(out) != 1 {
   695  		t.Fatalf("Bad: %#v", out)
   696  	}
   697  	if out[0] != nodes[1] {
   698  		t.Fatalf("Bad: %v", out)
   699  	}
   700  	if out[0].FinalScore != 1.0 {
   701  		t.Fatalf("Bad Score: %v", out[0].FinalScore)
   702  	}
   703  }
   704  
   705  func TestBinPackIterator_ExistingAlloc_PlannedEvict(t *testing.T) {
   706  	state, ctx := testContext(t)
   707  	nodes := []*RankedNode{
   708  		{
   709  			Node: &structs.Node{
   710  				// Perfect fit
   711  				ID: uuid.Generate(),
   712  				NodeResources: &structs.NodeResources{
   713  					Cpu: structs.NodeCpuResources{
   714  						CpuShares: 2048,
   715  					},
   716  					Memory: structs.NodeMemoryResources{
   717  						MemoryMB: 2048,
   718  					},
   719  				},
   720  			},
   721  		},
   722  		{
   723  			Node: &structs.Node{
   724  				// Perfect fit
   725  				ID: uuid.Generate(),
   726  				NodeResources: &structs.NodeResources{
   727  					Cpu: structs.NodeCpuResources{
   728  						CpuShares: 2048,
   729  					},
   730  					Memory: structs.NodeMemoryResources{
   731  						MemoryMB: 2048,
   732  					},
   733  				},
   734  			},
   735  		},
   736  	}
   737  	static := NewStaticRankIterator(ctx, nodes)
   738  
   739  	// Add existing allocations
   740  	j1, j2 := mock.Job(), mock.Job()
   741  	alloc1 := &structs.Allocation{
   742  		Namespace: structs.DefaultNamespace,
   743  		ID:        uuid.Generate(),
   744  		EvalID:    uuid.Generate(),
   745  		NodeID:    nodes[0].Node.ID,
   746  		JobID:     j1.ID,
   747  		Job:       j1,
   748  		AllocatedResources: &structs.AllocatedResources{
   749  			Tasks: map[string]*structs.AllocatedTaskResources{
   750  				"web": {
   751  					Cpu: structs.AllocatedCpuResources{
   752  						CpuShares: 2048,
   753  					},
   754  					Memory: structs.AllocatedMemoryResources{
   755  						MemoryMB: 2048,
   756  					},
   757  				},
   758  			},
   759  		},
   760  		DesiredStatus: structs.AllocDesiredStatusRun,
   761  		ClientStatus:  structs.AllocClientStatusPending,
   762  		TaskGroup:     "web",
   763  	}
   764  	alloc2 := &structs.Allocation{
   765  		Namespace: structs.DefaultNamespace,
   766  		ID:        uuid.Generate(),
   767  		EvalID:    uuid.Generate(),
   768  		NodeID:    nodes[1].Node.ID,
   769  		JobID:     j2.ID,
   770  		Job:       j2,
   771  		AllocatedResources: &structs.AllocatedResources{
   772  			Tasks: map[string]*structs.AllocatedTaskResources{
   773  				"web": {
   774  					Cpu: structs.AllocatedCpuResources{
   775  						CpuShares: 1024,
   776  					},
   777  					Memory: structs.AllocatedMemoryResources{
   778  						MemoryMB: 1024,
   779  					},
   780  				},
   781  			},
   782  		},
   783  		DesiredStatus: structs.AllocDesiredStatusRun,
   784  		ClientStatus:  structs.AllocClientStatusPending,
   785  		TaskGroup:     "web",
   786  	}
   787  	require.NoError(t, state.UpsertJobSummary(998, mock.JobSummary(alloc1.JobID)))
   788  	require.NoError(t, state.UpsertJobSummary(999, mock.JobSummary(alloc2.JobID)))
   789  	require.NoError(t, state.UpsertAllocs(1000, []*structs.Allocation{alloc1, alloc2}))
   790  
   791  	// Add a planned eviction to alloc1
   792  	plan := ctx.Plan()
   793  	plan.NodeUpdate[nodes[0].Node.ID] = []*structs.Allocation{alloc1}
   794  
   795  	taskGroup := &structs.TaskGroup{
   796  		EphemeralDisk: &structs.EphemeralDisk{},
   797  		Tasks: []*structs.Task{
   798  			{
   799  				Name: "web",
   800  				Resources: &structs.Resources{
   801  					CPU:      1024,
   802  					MemoryMB: 1024,
   803  				},
   804  			},
   805  		},
   806  	}
   807  
   808  	binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
   809  	binp.SetTaskGroup(taskGroup)
   810  
   811  	scoreNorm := NewScoreNormalizationIterator(ctx, binp)
   812  
   813  	out := collectRanked(scoreNorm)
   814  	if len(out) != 2 {
   815  		t.Fatalf("Bad: %#v", out)
   816  	}
   817  	if out[0] != nodes[0] || out[1] != nodes[1] {
   818  		t.Fatalf("Bad: %v", out)
   819  	}
   820  	if out[0].FinalScore < 0.50 || out[0].FinalScore > 0.95 {
   821  		t.Fatalf("Bad Score: %v", out[0].FinalScore)
   822  	}
   823  	if out[1].FinalScore != 1 {
   824  		t.Fatalf("Bad Score: %v", out[1].FinalScore)
   825  	}
   826  }
   827  
   828  // This is a fairly high level test that asserts the bin packer uses the device
   829  // allocator properly. It is not intended to handle every possible device
   830  // request versus availability scenario. That should be covered in device
   831  // allocator tests.
   832  func TestBinPackIterator_Devices(t *testing.T) {
   833  	nvidiaNode := mock.NvidiaNode()
   834  	devs := nvidiaNode.NodeResources.Devices[0].Instances
   835  	nvidiaDevices := []string{devs[0].ID, devs[1].ID}
   836  
   837  	nvidiaDev0 := mock.Alloc()
   838  	nvidiaDev0.AllocatedResources.Tasks["web"].Devices = []*structs.AllocatedDeviceResource{
   839  		{
   840  			Type:      "gpu",
   841  			Vendor:    "nvidia",
   842  			Name:      "1080ti",
   843  			DeviceIDs: []string{nvidiaDevices[0]},
   844  		},
   845  	}
   846  
   847  	type devPlacementTuple struct {
   848  		Count      int
   849  		ExcludeIDs []string
   850  	}
   851  
   852  	cases := []struct {
   853  		Name               string
   854  		Node               *structs.Node
   855  		PlannedAllocs      []*structs.Allocation
   856  		ExistingAllocs     []*structs.Allocation
   857  		TaskGroup          *structs.TaskGroup
   858  		NoPlace            bool
   859  		ExpectedPlacements map[string]map[structs.DeviceIdTuple]devPlacementTuple
   860  		DeviceScore        float64
   861  	}{
   862  		{
   863  			Name: "single request, match",
   864  			Node: nvidiaNode,
   865  			TaskGroup: &structs.TaskGroup{
   866  				EphemeralDisk: &structs.EphemeralDisk{},
   867  				Tasks: []*structs.Task{
   868  					{
   869  						Name: "web",
   870  						Resources: &structs.Resources{
   871  							CPU:      1024,
   872  							MemoryMB: 1024,
   873  							Devices: []*structs.RequestedDevice{
   874  								{
   875  									Name:  "nvidia/gpu",
   876  									Count: 1,
   877  								},
   878  							},
   879  						},
   880  					},
   881  				},
   882  			},
   883  			ExpectedPlacements: map[string]map[structs.DeviceIdTuple]devPlacementTuple{
   884  				"web": {
   885  					{
   886  						Vendor: "nvidia",
   887  						Type:   "gpu",
   888  						Name:   "1080ti",
   889  					}: {
   890  						Count: 1,
   891  					},
   892  				},
   893  			},
   894  		},
   895  		{
   896  			Name: "single request multiple count, match",
   897  			Node: nvidiaNode,
   898  			TaskGroup: &structs.TaskGroup{
   899  				EphemeralDisk: &structs.EphemeralDisk{},
   900  				Tasks: []*structs.Task{
   901  					{
   902  						Name: "web",
   903  						Resources: &structs.Resources{
   904  							CPU:      1024,
   905  							MemoryMB: 1024,
   906  							Devices: []*structs.RequestedDevice{
   907  								{
   908  									Name:  "nvidia/gpu",
   909  									Count: 2,
   910  								},
   911  							},
   912  						},
   913  					},
   914  				},
   915  			},
   916  			ExpectedPlacements: map[string]map[structs.DeviceIdTuple]devPlacementTuple{
   917  				"web": {
   918  					{
   919  						Vendor: "nvidia",
   920  						Type:   "gpu",
   921  						Name:   "1080ti",
   922  					}: {
   923  						Count: 2,
   924  					},
   925  				},
   926  			},
   927  		},
   928  		{
   929  			Name: "single request, with affinities",
   930  			Node: nvidiaNode,
   931  			TaskGroup: &structs.TaskGroup{
   932  				EphemeralDisk: &structs.EphemeralDisk{},
   933  				Tasks: []*structs.Task{
   934  					{
   935  						Name: "web",
   936  						Resources: &structs.Resources{
   937  							CPU:      1024,
   938  							MemoryMB: 1024,
   939  							Devices: []*structs.RequestedDevice{
   940  								{
   941  									Name:  "nvidia/gpu",
   942  									Count: 1,
   943  									Affinities: []*structs.Affinity{
   944  										{
   945  											LTarget: "${device.attr.graphics_clock}",
   946  											Operand: ">",
   947  											RTarget: "1.4 GHz",
   948  											Weight:  90,
   949  										},
   950  									},
   951  								},
   952  							},
   953  						},
   954  					},
   955  				},
   956  			},
   957  			ExpectedPlacements: map[string]map[structs.DeviceIdTuple]devPlacementTuple{
   958  				"web": {
   959  					{
   960  						Vendor: "nvidia",
   961  						Type:   "gpu",
   962  						Name:   "1080ti",
   963  					}: {
   964  						Count: 1,
   965  					},
   966  				},
   967  			},
   968  			DeviceScore: 1.0,
   969  		},
   970  		{
   971  			Name: "single request over count, no match",
   972  			Node: nvidiaNode,
   973  			TaskGroup: &structs.TaskGroup{
   974  				EphemeralDisk: &structs.EphemeralDisk{},
   975  				Tasks: []*structs.Task{
   976  					{
   977  						Name: "web",
   978  						Resources: &structs.Resources{
   979  							CPU:      1024,
   980  							MemoryMB: 1024,
   981  							Devices: []*structs.RequestedDevice{
   982  								{
   983  									Name:  "nvidia/gpu",
   984  									Count: 6,
   985  								},
   986  							},
   987  						},
   988  					},
   989  				},
   990  			},
   991  			NoPlace: true,
   992  		},
   993  		{
   994  			Name: "single request no device of matching type",
   995  			Node: nvidiaNode,
   996  			TaskGroup: &structs.TaskGroup{
   997  				EphemeralDisk: &structs.EphemeralDisk{},
   998  				Tasks: []*structs.Task{
   999  					{
  1000  						Name: "web",
  1001  						Resources: &structs.Resources{
  1002  							CPU:      1024,
  1003  							MemoryMB: 1024,
  1004  							Devices: []*structs.RequestedDevice{
  1005  								{
  1006  									Name:  "fpga",
  1007  									Count: 1,
  1008  								},
  1009  							},
  1010  						},
  1011  					},
  1012  				},
  1013  			},
  1014  			NoPlace: true,
  1015  		},
  1016  		{
  1017  			Name: "single request with previous uses",
  1018  			Node: nvidiaNode,
  1019  			TaskGroup: &structs.TaskGroup{
  1020  				EphemeralDisk: &structs.EphemeralDisk{},
  1021  				Tasks: []*structs.Task{
  1022  					{
  1023  						Name: "web",
  1024  						Resources: &structs.Resources{
  1025  							CPU:      1024,
  1026  							MemoryMB: 1024,
  1027  							Devices: []*structs.RequestedDevice{
  1028  								{
  1029  									Name:  "nvidia/gpu",
  1030  									Count: 1,
  1031  								},
  1032  							},
  1033  						},
  1034  					},
  1035  				},
  1036  			},
  1037  			ExpectedPlacements: map[string]map[structs.DeviceIdTuple]devPlacementTuple{
  1038  				"web": {
  1039  					{
  1040  						Vendor: "nvidia",
  1041  						Type:   "gpu",
  1042  						Name:   "1080ti",
  1043  					}: {
  1044  						Count:      1,
  1045  						ExcludeIDs: []string{nvidiaDevices[0]},
  1046  					},
  1047  				},
  1048  			},
  1049  			ExistingAllocs: []*structs.Allocation{nvidiaDev0},
  1050  		},
  1051  		{
  1052  			Name: "single request with planned uses",
  1053  			Node: nvidiaNode,
  1054  			TaskGroup: &structs.TaskGroup{
  1055  				EphemeralDisk: &structs.EphemeralDisk{},
  1056  				Tasks: []*structs.Task{
  1057  					{
  1058  						Name: "web",
  1059  						Resources: &structs.Resources{
  1060  							CPU:      1024,
  1061  							MemoryMB: 1024,
  1062  							Devices: []*structs.RequestedDevice{
  1063  								{
  1064  									Name:  "nvidia/gpu",
  1065  									Count: 1,
  1066  								},
  1067  							},
  1068  						},
  1069  					},
  1070  				},
  1071  			},
  1072  			ExpectedPlacements: map[string]map[structs.DeviceIdTuple]devPlacementTuple{
  1073  				"web": {
  1074  					{
  1075  						Vendor: "nvidia",
  1076  						Type:   "gpu",
  1077  						Name:   "1080ti",
  1078  					}: {
  1079  						Count:      1,
  1080  						ExcludeIDs: []string{nvidiaDevices[0]},
  1081  					},
  1082  				},
  1083  			},
  1084  			PlannedAllocs: []*structs.Allocation{nvidiaDev0},
  1085  		},
  1086  	}
  1087  
  1088  	for _, c := range cases {
  1089  		t.Run(c.Name, func(t *testing.T) {
  1090  			require := require.New(t)
  1091  
  1092  			// Setup the context
  1093  			state, ctx := testContext(t)
  1094  
  1095  			// Add the planned allocs
  1096  			if len(c.PlannedAllocs) != 0 {
  1097  				for _, alloc := range c.PlannedAllocs {
  1098  					alloc.NodeID = c.Node.ID
  1099  				}
  1100  				plan := ctx.Plan()
  1101  				plan.NodeAllocation[c.Node.ID] = c.PlannedAllocs
  1102  			}
  1103  
  1104  			// Add the existing allocs
  1105  			if len(c.ExistingAllocs) != 0 {
  1106  				for _, alloc := range c.ExistingAllocs {
  1107  					alloc.NodeID = c.Node.ID
  1108  				}
  1109  				require.NoError(state.UpsertAllocs(1000, c.ExistingAllocs))
  1110  			}
  1111  
  1112  			static := NewStaticRankIterator(ctx, []*RankedNode{{Node: c.Node}})
  1113  			binp := NewBinPackIterator(ctx, static, false, 0, structs.SchedulerAlgorithmBinpack)
  1114  			binp.SetTaskGroup(c.TaskGroup)
  1115  
  1116  			out := binp.Next()
  1117  			if out == nil && !c.NoPlace {
  1118  				t.Fatalf("expected placement")
  1119  			}
  1120  
  1121  			// Check we got the placements we are expecting
  1122  			for tname, devices := range c.ExpectedPlacements {
  1123  				tr, ok := out.TaskResources[tname]
  1124  				require.True(ok)
  1125  
  1126  				want := len(devices)
  1127  				got := 0
  1128  				for _, placed := range tr.Devices {
  1129  					got++
  1130  
  1131  					expected, ok := devices[*placed.ID()]
  1132  					require.True(ok)
  1133  					require.Equal(expected.Count, len(placed.DeviceIDs))
  1134  					for _, id := range expected.ExcludeIDs {
  1135  						require.NotContains(placed.DeviceIDs, id)
  1136  					}
  1137  				}
  1138  
  1139  				require.Equal(want, got)
  1140  			}
  1141  
  1142  			// Check potential affinity scores
  1143  			if c.DeviceScore != 0.0 {
  1144  				require.Len(out.Scores, 2)
  1145  				require.Equal(c.DeviceScore, out.Scores[1])
  1146  			}
  1147  		})
  1148  	}
  1149  }
  1150  
  1151  func TestJobAntiAffinity_PlannedAlloc(t *testing.T) {
  1152  	_, ctx := testContext(t)
  1153  	nodes := []*RankedNode{
  1154  		{
  1155  			Node: &structs.Node{
  1156  				ID: uuid.Generate(),
  1157  			},
  1158  		},
  1159  		{
  1160  			Node: &structs.Node{
  1161  				ID: uuid.Generate(),
  1162  			},
  1163  		},
  1164  	}
  1165  	static := NewStaticRankIterator(ctx, nodes)
  1166  
  1167  	job := mock.Job()
  1168  	job.ID = "foo"
  1169  	tg := job.TaskGroups[0]
  1170  	tg.Count = 4
  1171  
  1172  	// Add a planned alloc to node1 that fills it
  1173  	plan := ctx.Plan()
  1174  	plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{
  1175  		{
  1176  			ID:        uuid.Generate(),
  1177  			JobID:     "foo",
  1178  			TaskGroup: tg.Name,
  1179  		},
  1180  		{
  1181  			ID:        uuid.Generate(),
  1182  			JobID:     "foo",
  1183  			TaskGroup: tg.Name,
  1184  		},
  1185  	}
  1186  
  1187  	// Add a planned alloc to node2 that half fills it
  1188  	plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
  1189  		{
  1190  			JobID: "bar",
  1191  		},
  1192  	}
  1193  
  1194  	jobAntiAff := NewJobAntiAffinityIterator(ctx, static, "foo")
  1195  	jobAntiAff.SetJob(job)
  1196  	jobAntiAff.SetTaskGroup(tg)
  1197  
  1198  	scoreNorm := NewScoreNormalizationIterator(ctx, jobAntiAff)
  1199  
  1200  	out := collectRanked(scoreNorm)
  1201  	if len(out) != 2 {
  1202  		t.Fatalf("Bad: %#v", out)
  1203  	}
  1204  	if out[0] != nodes[0] {
  1205  		t.Fatalf("Bad: %v", out)
  1206  	}
  1207  	// Score should be -(#collissions+1/desired_count) => -(3/4)
  1208  	if out[0].FinalScore != -0.75 {
  1209  		t.Fatalf("Bad Score: %#v", out[0].FinalScore)
  1210  	}
  1211  
  1212  	if out[1] != nodes[1] {
  1213  		t.Fatalf("Bad: %v", out)
  1214  	}
  1215  	if out[1].FinalScore != 0.0 {
  1216  		t.Fatalf("Bad Score: %v", out[1].FinalScore)
  1217  	}
  1218  }
  1219  
  1220  func collectRanked(iter RankIterator) (out []*RankedNode) {
  1221  	for {
  1222  		next := iter.Next()
  1223  		if next == nil {
  1224  			break
  1225  		}
  1226  		out = append(out, next)
  1227  	}
  1228  	return
  1229  }
  1230  
  1231  func TestNodeAntiAffinity_PenaltyNodes(t *testing.T) {
  1232  	_, ctx := testContext(t)
  1233  	node1 := &structs.Node{
  1234  		ID: uuid.Generate(),
  1235  	}
  1236  	node2 := &structs.Node{
  1237  		ID: uuid.Generate(),
  1238  	}
  1239  
  1240  	nodes := []*RankedNode{
  1241  		{
  1242  			Node: node1,
  1243  		},
  1244  		{
  1245  			Node: node2,
  1246  		},
  1247  	}
  1248  	static := NewStaticRankIterator(ctx, nodes)
  1249  
  1250  	nodeAntiAffIter := NewNodeReschedulingPenaltyIterator(ctx, static)
  1251  	nodeAntiAffIter.SetPenaltyNodes(map[string]struct{}{node1.ID: {}})
  1252  
  1253  	scoreNorm := NewScoreNormalizationIterator(ctx, nodeAntiAffIter)
  1254  
  1255  	out := collectRanked(scoreNorm)
  1256  
  1257  	require := require.New(t)
  1258  	require.Equal(2, len(out))
  1259  	require.Equal(node1.ID, out[0].Node.ID)
  1260  	require.Equal(-1.0, out[0].FinalScore)
  1261  
  1262  	require.Equal(node2.ID, out[1].Node.ID)
  1263  	require.Equal(0.0, out[1].FinalScore)
  1264  
  1265  }
  1266  
  1267  func TestScoreNormalizationIterator(t *testing.T) {
  1268  	// Test normalized scores when there is more than one scorer
  1269  	_, ctx := testContext(t)
  1270  	nodes := []*RankedNode{
  1271  		{
  1272  			Node: &structs.Node{
  1273  				ID: uuid.Generate(),
  1274  			},
  1275  		},
  1276  		{
  1277  			Node: &structs.Node{
  1278  				ID: uuid.Generate(),
  1279  			},
  1280  		},
  1281  	}
  1282  	static := NewStaticRankIterator(ctx, nodes)
  1283  
  1284  	job := mock.Job()
  1285  	job.ID = "foo"
  1286  	tg := job.TaskGroups[0]
  1287  	tg.Count = 4
  1288  
  1289  	// Add a planned alloc to node1 that fills it
  1290  	plan := ctx.Plan()
  1291  	plan.NodeAllocation[nodes[0].Node.ID] = []*structs.Allocation{
  1292  		{
  1293  			ID:        uuid.Generate(),
  1294  			JobID:     "foo",
  1295  			TaskGroup: tg.Name,
  1296  		},
  1297  		{
  1298  			ID:        uuid.Generate(),
  1299  			JobID:     "foo",
  1300  			TaskGroup: tg.Name,
  1301  		},
  1302  	}
  1303  
  1304  	// Add a planned alloc to node2 that half fills it
  1305  	plan.NodeAllocation[nodes[1].Node.ID] = []*structs.Allocation{
  1306  		{
  1307  			JobID: "bar",
  1308  		},
  1309  	}
  1310  
  1311  	jobAntiAff := NewJobAntiAffinityIterator(ctx, static, "foo")
  1312  	jobAntiAff.SetJob(job)
  1313  	jobAntiAff.SetTaskGroup(tg)
  1314  
  1315  	nodeReschedulePenaltyIter := NewNodeReschedulingPenaltyIterator(ctx, jobAntiAff)
  1316  	nodeReschedulePenaltyIter.SetPenaltyNodes(map[string]struct{}{nodes[0].Node.ID: {}})
  1317  
  1318  	scoreNorm := NewScoreNormalizationIterator(ctx, nodeReschedulePenaltyIter)
  1319  
  1320  	out := collectRanked(scoreNorm)
  1321  	require := require.New(t)
  1322  
  1323  	require.Equal(2, len(out))
  1324  	require.Equal(out[0], nodes[0])
  1325  	// Score should be averaged between both scorers
  1326  	// -0.75 from job anti affinity and -1 from node rescheduling penalty
  1327  	require.Equal(-0.875, out[0].FinalScore)
  1328  	require.Equal(out[1], nodes[1])
  1329  	require.Equal(out[1].FinalScore, 0.0)
  1330  }
  1331  
  1332  func TestNodeAffinityIterator(t *testing.T) {
  1333  	_, ctx := testContext(t)
  1334  	nodes := []*RankedNode{
  1335  		{Node: mock.Node()},
  1336  		{Node: mock.Node()},
  1337  		{Node: mock.Node()},
  1338  		{Node: mock.Node()},
  1339  	}
  1340  
  1341  	nodes[0].Node.Attributes["kernel.version"] = "4.9"
  1342  	nodes[1].Node.Datacenter = "dc2"
  1343  	nodes[2].Node.Datacenter = "dc2"
  1344  	nodes[2].Node.NodeClass = "large"
  1345  
  1346  	affinities := []*structs.Affinity{
  1347  		{
  1348  			Operand: "=",
  1349  			LTarget: "${node.datacenter}",
  1350  			RTarget: "dc1",
  1351  			Weight:  100,
  1352  		},
  1353  		{
  1354  			Operand: "=",
  1355  			LTarget: "${node.datacenter}",
  1356  			RTarget: "dc2",
  1357  			Weight:  -100,
  1358  		},
  1359  		{
  1360  			Operand: "version",
  1361  			LTarget: "${attr.kernel.version}",
  1362  			RTarget: ">4.0",
  1363  			Weight:  50,
  1364  		},
  1365  		{
  1366  			Operand: "is",
  1367  			LTarget: "${node.class}",
  1368  			RTarget: "large",
  1369  			Weight:  50,
  1370  		},
  1371  	}
  1372  
  1373  	static := NewStaticRankIterator(ctx, nodes)
  1374  
  1375  	job := mock.Job()
  1376  	job.ID = "foo"
  1377  	tg := job.TaskGroups[0]
  1378  	tg.Affinities = affinities
  1379  
  1380  	nodeAffinity := NewNodeAffinityIterator(ctx, static)
  1381  	nodeAffinity.SetTaskGroup(tg)
  1382  
  1383  	scoreNorm := NewScoreNormalizationIterator(ctx, nodeAffinity)
  1384  
  1385  	out := collectRanked(scoreNorm)
  1386  	expectedScores := make(map[string]float64)
  1387  	// Total weight = 300
  1388  	// Node 0 matches two affinities(dc and kernel version), total weight = 150
  1389  	expectedScores[nodes[0].Node.ID] = 0.5
  1390  
  1391  	// Node 1 matches an anti affinity, weight = -100
  1392  	expectedScores[nodes[1].Node.ID] = -(1.0 / 3.0)
  1393  
  1394  	// Node 2 matches one affinity(node class) with weight 50
  1395  	expectedScores[nodes[2].Node.ID] = -(1.0 / 6.0)
  1396  
  1397  	// Node 3 matches one affinity (dc) with weight = 100
  1398  	expectedScores[nodes[3].Node.ID] = 1.0 / 3.0
  1399  
  1400  	require := require.New(t)
  1401  	for _, n := range out {
  1402  		require.Equal(expectedScores[n.Node.ID], n.FinalScore)
  1403  	}
  1404  
  1405  }