github.com/hernad/nomad@v1.6.112/nomad/structs/funcs_test.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package structs
     5  
     6  import (
     7  	"encoding/base64"
     8  	"fmt"
     9  	"testing"
    10  
    11  	"github.com/hernad/nomad/acl"
    12  	"github.com/hernad/nomad/ci"
    13  	"github.com/hernad/nomad/helper/uuid"
    14  	"github.com/stretchr/testify/assert"
    15  	"github.com/stretchr/testify/require"
    16  )
    17  
    18  func TestRemoveAllocs(t *testing.T) {
    19  	ci.Parallel(t)
    20  
    21  	l := []*Allocation{
    22  		{ID: "foo"},
    23  		{ID: "bar"},
    24  		{ID: "baz"},
    25  		{ID: "zip"},
    26  	}
    27  
    28  	out := RemoveAllocs(l, []*Allocation{l[1], l[3]})
    29  	if len(out) != 2 {
    30  		t.Fatalf("bad: %#v", out)
    31  	}
    32  	if out[0].ID != "foo" && out[1].ID != "baz" {
    33  		t.Fatalf("bad: %#v", out)
    34  	}
    35  }
    36  
    37  func TestFilterTerminalAllocs(t *testing.T) {
    38  	ci.Parallel(t)
    39  
    40  	l := []*Allocation{
    41  		{
    42  			ID:            "bar",
    43  			Name:          "myname1",
    44  			DesiredStatus: AllocDesiredStatusEvict,
    45  		},
    46  		{ID: "baz", DesiredStatus: AllocDesiredStatusStop},
    47  		{
    48  			ID:            "foo",
    49  			DesiredStatus: AllocDesiredStatusRun,
    50  			ClientStatus:  AllocClientStatusPending,
    51  		},
    52  		{
    53  			ID:            "bam",
    54  			Name:          "myname",
    55  			DesiredStatus: AllocDesiredStatusRun,
    56  			ClientStatus:  AllocClientStatusComplete,
    57  			CreateIndex:   5,
    58  		},
    59  		{
    60  			ID:            "lol",
    61  			Name:          "myname",
    62  			DesiredStatus: AllocDesiredStatusRun,
    63  			ClientStatus:  AllocClientStatusComplete,
    64  			CreateIndex:   2,
    65  		},
    66  	}
    67  
    68  	out, terminalAllocs := FilterTerminalAllocs(l)
    69  	if len(out) != 1 {
    70  		t.Fatalf("bad: %#v", out)
    71  	}
    72  	if out[0].ID != "foo" {
    73  		t.Fatalf("bad: %#v", out)
    74  	}
    75  
    76  	if len(terminalAllocs) != 3 {
    77  		for _, o := range terminalAllocs {
    78  			fmt.Printf("%#v \n", o)
    79  		}
    80  
    81  		t.Fatalf("bad: %#v", terminalAllocs)
    82  	}
    83  
    84  	if terminalAllocs["myname"].ID != "bam" {
    85  		t.Fatalf("bad: %#v", terminalAllocs["myname"])
    86  	}
    87  }
    88  
    89  // COMPAT(0.11): Remove in 0.11
    90  func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) {
    91  	ci.Parallel(t)
    92  
    93  	n := &Node{
    94  		Resources: &Resources{
    95  			Networks: []*NetworkResource{
    96  				{
    97  					Device: "eth0",
    98  					CIDR:   "10.0.0.0/8",
    99  					MBits:  100,
   100  				},
   101  			},
   102  		},
   103  	}
   104  
   105  	a1 := &Allocation{
   106  		Job: &Job{
   107  			TaskGroups: []*TaskGroup{
   108  				{
   109  					Name:          "web",
   110  					EphemeralDisk: DefaultEphemeralDisk(),
   111  				},
   112  			},
   113  		},
   114  		TaskResources: map[string]*Resources{
   115  			"web": {
   116  				Networks: []*NetworkResource{
   117  					{
   118  						Device:        "eth0",
   119  						IP:            "10.0.0.1",
   120  						MBits:         50,
   121  						ReservedPorts: []Port{{"main", 8000, 80, ""}},
   122  					},
   123  				},
   124  			},
   125  		},
   126  	}
   127  
   128  	// Should fit one allocation
   129  	fit, dim, _, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   130  	if err != nil {
   131  		t.Fatalf("err: %v", err)
   132  	}
   133  	if !fit {
   134  		t.Fatalf("Bad: %s", dim)
   135  	}
   136  
   137  	// Should not fit second allocation
   138  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   139  	if err != nil {
   140  		t.Fatalf("err: %v", err)
   141  	}
   142  	if fit {
   143  		t.Fatalf("Bad")
   144  	}
   145  }
   146  
   147  // COMPAT(0.11): Remove in 0.11
   148  func TestAllocsFit_Old(t *testing.T) {
   149  	ci.Parallel(t)
   150  
   151  	require := require.New(t)
   152  
   153  	n := &Node{
   154  		Resources: &Resources{
   155  			CPU:      2000,
   156  			MemoryMB: 2048,
   157  			DiskMB:   10000,
   158  			Networks: []*NetworkResource{
   159  				{
   160  					Device: "eth0",
   161  					CIDR:   "10.0.0.0/8",
   162  					MBits:  100,
   163  				},
   164  			},
   165  		},
   166  		Reserved: &Resources{
   167  			CPU:      1000,
   168  			MemoryMB: 1024,
   169  			DiskMB:   5000,
   170  			Networks: []*NetworkResource{
   171  				{
   172  					Device:        "eth0",
   173  					IP:            "10.0.0.1",
   174  					MBits:         50,
   175  					ReservedPorts: []Port{{"main", 80, 0, ""}},
   176  				},
   177  			},
   178  		},
   179  	}
   180  
   181  	a1 := &Allocation{
   182  		Resources: &Resources{
   183  			CPU:      1000,
   184  			MemoryMB: 1024,
   185  			DiskMB:   5000,
   186  			Networks: []*NetworkResource{
   187  				{
   188  					Device:        "eth0",
   189  					IP:            "10.0.0.1",
   190  					MBits:         50,
   191  					ReservedPorts: []Port{{"main", 8000, 80, ""}},
   192  				},
   193  			},
   194  		},
   195  	}
   196  
   197  	// Should fit one allocation
   198  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   199  	require.NoError(err)
   200  	require.True(fit)
   201  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   202  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   203  
   204  	// Should not fit second allocation
   205  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   206  	require.NoError(err)
   207  	require.False(fit)
   208  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   209  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   210  }
   211  
   212  // COMPAT(0.11): Remove in 0.11
   213  func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
   214  	ci.Parallel(t)
   215  
   216  	require := require.New(t)
   217  
   218  	n := &Node{
   219  		Resources: &Resources{
   220  			CPU:      2000,
   221  			MemoryMB: 2048,
   222  			DiskMB:   10000,
   223  			Networks: []*NetworkResource{
   224  				{
   225  					Device: "eth0",
   226  					CIDR:   "10.0.0.0/8",
   227  					MBits:  100,
   228  				},
   229  			},
   230  		},
   231  		Reserved: &Resources{
   232  			CPU:      1000,
   233  			MemoryMB: 1024,
   234  			DiskMB:   5000,
   235  			Networks: []*NetworkResource{
   236  				{
   237  					Device:        "eth0",
   238  					IP:            "10.0.0.1",
   239  					MBits:         50,
   240  					ReservedPorts: []Port{{"main", 80, 0, ""}},
   241  				},
   242  			},
   243  		},
   244  	}
   245  
   246  	a1 := &Allocation{
   247  		Resources: &Resources{
   248  			CPU:      1000,
   249  			MemoryMB: 1024,
   250  			DiskMB:   5000,
   251  			Networks: []*NetworkResource{
   252  				{
   253  					Device:        "eth0",
   254  					IP:            "10.0.0.1",
   255  					MBits:         50,
   256  					ReservedPorts: []Port{{"main", 8000, 0, ""}},
   257  				},
   258  			},
   259  		},
   260  	}
   261  
   262  	// Should fit one allocation
   263  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   264  	require.NoError(err)
   265  	require.True(fit)
   266  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   267  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   268  
   269  	// Should fit second allocation since it is terminal
   270  	a2 := a1.Copy()
   271  	a2.DesiredStatus = AllocDesiredStatusStop
   272  	a2.ClientStatus = AllocClientStatusComplete
   273  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   274  	require.NoError(err)
   275  	require.True(fit)
   276  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   277  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   278  }
   279  
   280  func TestAllocsFit(t *testing.T) {
   281  	ci.Parallel(t)
   282  
   283  	require := require.New(t)
   284  
   285  	n := &Node{
   286  		NodeResources: &NodeResources{
   287  			Cpu: NodeCpuResources{
   288  				CpuShares:          2000,
   289  				TotalCpuCores:      2,
   290  				ReservableCpuCores: []uint16{0, 1},
   291  			},
   292  			Memory: NodeMemoryResources{
   293  				MemoryMB: 2048,
   294  			},
   295  			Disk: NodeDiskResources{
   296  				DiskMB: 10000,
   297  			},
   298  			Networks: []*NetworkResource{
   299  				{
   300  					Device: "eth0",
   301  					CIDR:   "10.0.0.0/8",
   302  					MBits:  100,
   303  				},
   304  			},
   305  			NodeNetworks: []*NodeNetworkResource{
   306  				{
   307  					Mode:   "host",
   308  					Device: "eth0",
   309  					Addresses: []NodeNetworkAddress{
   310  						{
   311  							Address: "10.0.0.1",
   312  						},
   313  					},
   314  				},
   315  			},
   316  		},
   317  		ReservedResources: &NodeReservedResources{
   318  			Cpu: NodeReservedCpuResources{
   319  				CpuShares: 1000,
   320  			},
   321  			Memory: NodeReservedMemoryResources{
   322  				MemoryMB: 1024,
   323  			},
   324  			Disk: NodeReservedDiskResources{
   325  				DiskMB: 5000,
   326  			},
   327  			Networks: NodeReservedNetworkResources{
   328  				ReservedHostPorts: "80",
   329  			},
   330  		},
   331  	}
   332  
   333  	a1 := &Allocation{
   334  		AllocatedResources: &AllocatedResources{
   335  			Tasks: map[string]*AllocatedTaskResources{
   336  				"web": {
   337  					Cpu: AllocatedCpuResources{
   338  						CpuShares:     1000,
   339  						ReservedCores: []uint16{},
   340  					},
   341  					Memory: AllocatedMemoryResources{
   342  						MemoryMB: 1024,
   343  					},
   344  				},
   345  			},
   346  			Shared: AllocatedSharedResources{
   347  				DiskMB: 5000,
   348  				Networks: Networks{
   349  					{
   350  						Mode:          "host",
   351  						IP:            "10.0.0.1",
   352  						ReservedPorts: []Port{{"main", 8000, 0, ""}},
   353  					},
   354  				},
   355  				Ports: AllocatedPorts{
   356  					{
   357  						Label:  "main",
   358  						Value:  8000,
   359  						HostIP: "10.0.0.1",
   360  					},
   361  				},
   362  			},
   363  		},
   364  	}
   365  
   366  	// Should fit one allocation
   367  	fit, dim, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   368  	require.NoError(err)
   369  	require.True(fit, "failed for dimension %q", dim)
   370  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   371  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   372  
   373  	// Should not fit second allocation
   374  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   375  	require.NoError(err)
   376  	require.False(fit)
   377  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   378  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   379  
   380  	a2 := &Allocation{
   381  		AllocatedResources: &AllocatedResources{
   382  			Tasks: map[string]*AllocatedTaskResources{
   383  				"web": {
   384  					Cpu: AllocatedCpuResources{
   385  						CpuShares:     500,
   386  						ReservedCores: []uint16{0},
   387  					},
   388  					Memory: AllocatedMemoryResources{
   389  						MemoryMB: 512,
   390  					},
   391  				},
   392  			},
   393  			Shared: AllocatedSharedResources{
   394  				DiskMB: 1000,
   395  				Networks: Networks{
   396  					{
   397  						Mode: "host",
   398  						IP:   "10.0.0.1",
   399  					},
   400  				},
   401  			},
   402  		},
   403  	}
   404  
   405  	// Should fit one allocation
   406  	fit, dim, used, err = AllocsFit(n, []*Allocation{a2}, nil, false)
   407  	require.NoError(err)
   408  	require.True(fit, "failed for dimension %q", dim)
   409  	require.EqualValues(500, used.Flattened.Cpu.CpuShares)
   410  	require.EqualValues([]uint16{0}, used.Flattened.Cpu.ReservedCores)
   411  	require.EqualValues(512, used.Flattened.Memory.MemoryMB)
   412  
   413  	// Should not fit second allocation
   414  	fit, dim, used, err = AllocsFit(n, []*Allocation{a2, a2}, nil, false)
   415  	require.NoError(err)
   416  	require.False(fit)
   417  	require.EqualValues("cores", dim)
   418  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   419  	require.EqualValues([]uint16{0}, used.Flattened.Cpu.ReservedCores)
   420  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   421  }
   422  
   423  func TestAllocsFit_TerminalAlloc(t *testing.T) {
   424  	ci.Parallel(t)
   425  
   426  	require := require.New(t)
   427  
   428  	n := &Node{
   429  		NodeResources: &NodeResources{
   430  			Cpu: NodeCpuResources{
   431  				CpuShares: 2000,
   432  			},
   433  			Memory: NodeMemoryResources{
   434  				MemoryMB: 2048,
   435  			},
   436  			Disk: NodeDiskResources{
   437  				DiskMB: 10000,
   438  			},
   439  			Networks: []*NetworkResource{
   440  				{
   441  					Device: "eth0",
   442  					CIDR:   "10.0.0.0/8",
   443  					IP:     "10.0.0.1",
   444  					MBits:  100,
   445  				},
   446  			},
   447  		},
   448  		ReservedResources: &NodeReservedResources{
   449  			Cpu: NodeReservedCpuResources{
   450  				CpuShares: 1000,
   451  			},
   452  			Memory: NodeReservedMemoryResources{
   453  				MemoryMB: 1024,
   454  			},
   455  			Disk: NodeReservedDiskResources{
   456  				DiskMB: 5000,
   457  			},
   458  			Networks: NodeReservedNetworkResources{
   459  				ReservedHostPorts: "80",
   460  			},
   461  		},
   462  	}
   463  
   464  	a1 := &Allocation{
   465  		AllocatedResources: &AllocatedResources{
   466  			Tasks: map[string]*AllocatedTaskResources{
   467  				"web": {
   468  					Cpu: AllocatedCpuResources{
   469  						CpuShares: 1000,
   470  					},
   471  					Memory: AllocatedMemoryResources{
   472  						MemoryMB: 1024,
   473  					},
   474  					Networks: []*NetworkResource{
   475  						{
   476  							Device:        "eth0",
   477  							IP:            "10.0.0.1",
   478  							MBits:         50,
   479  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   480  						},
   481  					},
   482  				},
   483  			},
   484  			Shared: AllocatedSharedResources{
   485  				DiskMB: 5000,
   486  			},
   487  		},
   488  	}
   489  
   490  	// Should fit one allocation
   491  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   492  	require.NoError(err)
   493  	require.True(fit)
   494  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   495  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   496  
   497  	// Should fit second allocation since it is terminal
   498  	a2 := a1.Copy()
   499  	a2.DesiredStatus = AllocDesiredStatusStop
   500  	a2.ClientStatus = AllocClientStatusComplete
   501  	fit, dim, used, err := AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   502  	require.NoError(err)
   503  	require.True(fit, dim)
   504  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   505  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   506  }
   507  
   508  // TestAllocsFit_ClientTerminalAlloc asserts that allocs which have a terminal
   509  // ClientStatus *do not* have their resources counted as in-use.
   510  func TestAllocsFit_ClientTerminalAlloc(t *testing.T) {
   511  	ci.Parallel(t)
   512  
   513  	n := &Node{
   514  		ID: "test-node",
   515  		NodeResources: &NodeResources{
   516  			Cpu: NodeCpuResources{
   517  				CpuShares: 2000,
   518  			},
   519  			Memory: NodeMemoryResources{
   520  				MemoryMB: 2048,
   521  			},
   522  			Disk: NodeDiskResources{
   523  				DiskMB: 10000,
   524  			},
   525  			Networks: []*NetworkResource{
   526  				{
   527  					Device: "eth0",
   528  					CIDR:   "10.0.0.0/8",
   529  					IP:     "10.0.0.1",
   530  					MBits:  100,
   531  				},
   532  			},
   533  		},
   534  		ReservedResources: &NodeReservedResources{
   535  			Cpu: NodeReservedCpuResources{
   536  				CpuShares: 1000,
   537  			},
   538  			Memory: NodeReservedMemoryResources{
   539  				MemoryMB: 1024,
   540  			},
   541  			Disk: NodeReservedDiskResources{
   542  				DiskMB: 5000,
   543  			},
   544  			Networks: NodeReservedNetworkResources{
   545  				ReservedHostPorts: "80",
   546  			},
   547  		},
   548  	}
   549  
   550  	liveAlloc := &Allocation{
   551  		ID:            "test-alloc-live",
   552  		ClientStatus:  AllocClientStatusPending,
   553  		DesiredStatus: AllocDesiredStatusRun,
   554  		AllocatedResources: &AllocatedResources{
   555  			Tasks: map[string]*AllocatedTaskResources{
   556  				"web": {
   557  					Cpu: AllocatedCpuResources{
   558  						CpuShares: 1000,
   559  					},
   560  					Memory: AllocatedMemoryResources{
   561  						MemoryMB: 1024,
   562  					},
   563  					Networks: []*NetworkResource{
   564  						{
   565  							Device:        "eth0",
   566  							IP:            "10.0.0.1",
   567  							MBits:         50,
   568  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   569  						},
   570  					},
   571  				},
   572  			},
   573  			Shared: AllocatedSharedResources{
   574  				DiskMB: 5000,
   575  			},
   576  		},
   577  	}
   578  
   579  	deadAlloc := liveAlloc.Copy()
   580  	deadAlloc.ID = "test-alloc-dead"
   581  	deadAlloc.ClientStatus = AllocClientStatusFailed
   582  	deadAlloc.DesiredStatus = AllocDesiredStatusRun
   583  
   584  	// *Should* fit both allocations since deadAlloc is not running on the
   585  	// client
   586  	fit, _, used, err := AllocsFit(n, []*Allocation{liveAlloc, deadAlloc}, nil, false)
   587  	require.NoError(t, err)
   588  	require.True(t, fit)
   589  	require.EqualValues(t, 1000, used.Flattened.Cpu.CpuShares)
   590  	require.EqualValues(t, 1024, used.Flattened.Memory.MemoryMB)
   591  }
   592  
   593  // TestAllocsFit_ServerTerminalAlloc asserts that allocs which have a terminal
   594  // DesiredStatus but are still running on clients *do* have their resources
   595  // counted as in-use.
   596  func TestAllocsFit_ServerTerminalAlloc(t *testing.T) {
   597  	ci.Parallel(t)
   598  
   599  	n := &Node{
   600  		ID: "test-node",
   601  		NodeResources: &NodeResources{
   602  			Cpu: NodeCpuResources{
   603  				CpuShares: 2000,
   604  			},
   605  			Memory: NodeMemoryResources{
   606  				MemoryMB: 2048,
   607  			},
   608  			Disk: NodeDiskResources{
   609  				DiskMB: 10000,
   610  			},
   611  			Networks: []*NetworkResource{
   612  				{
   613  					Device: "eth0",
   614  					CIDR:   "10.0.0.0/8",
   615  					IP:     "10.0.0.1",
   616  					MBits:  100,
   617  				},
   618  			},
   619  		},
   620  		ReservedResources: &NodeReservedResources{
   621  			Cpu: NodeReservedCpuResources{
   622  				CpuShares: 1000,
   623  			},
   624  			Memory: NodeReservedMemoryResources{
   625  				MemoryMB: 1024,
   626  			},
   627  			Disk: NodeReservedDiskResources{
   628  				DiskMB: 5000,
   629  			},
   630  			Networks: NodeReservedNetworkResources{
   631  				ReservedHostPorts: "80",
   632  			},
   633  		},
   634  	}
   635  
   636  	liveAlloc := &Allocation{
   637  		ID:            "test-alloc-live",
   638  		ClientStatus:  AllocClientStatusPending,
   639  		DesiredStatus: AllocDesiredStatusRun,
   640  		AllocatedResources: &AllocatedResources{
   641  			Tasks: map[string]*AllocatedTaskResources{
   642  				"web": {
   643  					Cpu: AllocatedCpuResources{
   644  						CpuShares: 1000,
   645  					},
   646  					Memory: AllocatedMemoryResources{
   647  						MemoryMB: 1024,
   648  					},
   649  					Networks: []*NetworkResource{
   650  						{
   651  							Device:        "eth0",
   652  							IP:            "10.0.0.1",
   653  							MBits:         50,
   654  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   655  						},
   656  					},
   657  				},
   658  			},
   659  			Shared: AllocatedSharedResources{
   660  				DiskMB: 5000,
   661  			},
   662  		},
   663  	}
   664  
   665  	deadAlloc := liveAlloc.Copy()
   666  	deadAlloc.ID = "test-alloc-dead"
   667  	deadAlloc.ClientStatus = AllocClientStatusRunning
   668  	deadAlloc.DesiredStatus = AllocDesiredStatusStop
   669  
   670  	// Should *not* fit both allocations since deadAlloc is still running
   671  	fit, _, used, err := AllocsFit(n, []*Allocation{liveAlloc, deadAlloc}, nil, false)
   672  	require.NoError(t, err)
   673  	require.False(t, fit)
   674  	require.EqualValues(t, 2000, used.Flattened.Cpu.CpuShares)
   675  	require.EqualValues(t, 2048, used.Flattened.Memory.MemoryMB)
   676  }
   677  
   678  // Tests that AllocsFit detects device collisions
   679  func TestAllocsFit_Devices(t *testing.T) {
   680  	ci.Parallel(t)
   681  
   682  	require := require.New(t)
   683  
   684  	n := MockNvidiaNode()
   685  	a1 := &Allocation{
   686  		AllocatedResources: &AllocatedResources{
   687  			Tasks: map[string]*AllocatedTaskResources{
   688  				"web": {
   689  					Cpu: AllocatedCpuResources{
   690  						CpuShares: 1000,
   691  					},
   692  					Memory: AllocatedMemoryResources{
   693  						MemoryMB: 1024,
   694  					},
   695  					Devices: []*AllocatedDeviceResource{
   696  						{
   697  							Type:      "gpu",
   698  							Vendor:    "nvidia",
   699  							Name:      "1080ti",
   700  							DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID},
   701  						},
   702  					},
   703  				},
   704  			},
   705  			Shared: AllocatedSharedResources{
   706  				DiskMB: 5000,
   707  			},
   708  		},
   709  	}
   710  	a2 := a1.Copy()
   711  	a2.AllocatedResources.Tasks["web"] = &AllocatedTaskResources{
   712  		Cpu: AllocatedCpuResources{
   713  			CpuShares: 1000,
   714  		},
   715  		Memory: AllocatedMemoryResources{
   716  			MemoryMB: 1024,
   717  		},
   718  		Devices: []*AllocatedDeviceResource{
   719  			{
   720  				Type:      "gpu",
   721  				Vendor:    "nvidia",
   722  				Name:      "1080ti",
   723  				DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID}, // Use the same ID
   724  			},
   725  		},
   726  	}
   727  
   728  	// Should fit one allocation
   729  	fit, _, _, err := AllocsFit(n, []*Allocation{a1}, nil, true)
   730  	require.NoError(err)
   731  	require.True(fit)
   732  
   733  	// Should not fit second allocation
   734  	fit, msg, _, err := AllocsFit(n, []*Allocation{a1, a2}, nil, true)
   735  	require.NoError(err)
   736  	require.False(fit)
   737  	require.Equal("device oversubscribed", msg)
   738  
   739  	// Should not fit second allocation but won't detect since we disabled
   740  	// devices
   741  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   742  	require.NoError(err)
   743  	require.True(fit)
   744  }
   745  
   746  // TestAllocsFit_MemoryOversubscription asserts that only reserved memory is
   747  // used for capacity
   748  func TestAllocsFit_MemoryOversubscription(t *testing.T) {
   749  	ci.Parallel(t)
   750  
   751  	n := &Node{
   752  		NodeResources: &NodeResources{
   753  			Cpu: NodeCpuResources{
   754  				CpuShares: 2000,
   755  			},
   756  			Memory: NodeMemoryResources{
   757  				MemoryMB: 2048,
   758  			},
   759  		},
   760  	}
   761  
   762  	a1 := &Allocation{
   763  		AllocatedResources: &AllocatedResources{
   764  			Tasks: map[string]*AllocatedTaskResources{
   765  				"web": {
   766  					Cpu: AllocatedCpuResources{
   767  						CpuShares: 100,
   768  					},
   769  					Memory: AllocatedMemoryResources{
   770  						MemoryMB:    1000,
   771  						MemoryMaxMB: 4000,
   772  					},
   773  				},
   774  			},
   775  		},
   776  	}
   777  
   778  	// Should fit one allocation
   779  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   780  	require.NoError(t, err)
   781  	require.True(t, fit)
   782  	require.EqualValues(t, 100, used.Flattened.Cpu.CpuShares)
   783  	require.EqualValues(t, 1000, used.Flattened.Memory.MemoryMB)
   784  	require.EqualValues(t, 4000, used.Flattened.Memory.MemoryMaxMB)
   785  
   786  	// Should fit second allocation
   787  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   788  	require.NoError(t, err)
   789  	require.True(t, fit)
   790  	require.EqualValues(t, 200, used.Flattened.Cpu.CpuShares)
   791  	require.EqualValues(t, 2000, used.Flattened.Memory.MemoryMB)
   792  	require.EqualValues(t, 8000, used.Flattened.Memory.MemoryMaxMB)
   793  
   794  	// Should not fit a third allocation
   795  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1, a1}, nil, false)
   796  	require.NoError(t, err)
   797  	require.False(t, fit)
   798  	require.EqualValues(t, 300, used.Flattened.Cpu.CpuShares)
   799  	require.EqualValues(t, 3000, used.Flattened.Memory.MemoryMB)
   800  	require.EqualValues(t, 12000, used.Flattened.Memory.MemoryMaxMB)
   801  }
   802  
   803  // COMPAT(0.11): Remove in 0.11
   804  func TestScoreFitBinPack_Old(t *testing.T) {
   805  	ci.Parallel(t)
   806  
   807  	node := &Node{}
   808  	node.Resources = &Resources{
   809  		CPU:      4096,
   810  		MemoryMB: 8192,
   811  	}
   812  	node.Reserved = &Resources{
   813  		CPU:      2048,
   814  		MemoryMB: 4096,
   815  	}
   816  
   817  	// Test a perfect fit
   818  	util := &ComparableResources{
   819  		Flattened: AllocatedTaskResources{
   820  			Cpu: AllocatedCpuResources{
   821  				CpuShares: 2048,
   822  			},
   823  			Memory: AllocatedMemoryResources{
   824  				MemoryMB: 4096,
   825  			},
   826  		},
   827  	}
   828  	score := ScoreFitBinPack(node, util)
   829  	if score != 18.0 {
   830  		t.Fatalf("bad: %v", score)
   831  	}
   832  
   833  	// Test the worst fit
   834  	util = &ComparableResources{
   835  		Flattened: AllocatedTaskResources{
   836  			Cpu: AllocatedCpuResources{
   837  				CpuShares: 0,
   838  			},
   839  			Memory: AllocatedMemoryResources{
   840  				MemoryMB: 0,
   841  			},
   842  		},
   843  	}
   844  	score = ScoreFitBinPack(node, util)
   845  	if score != 0.0 {
   846  		t.Fatalf("bad: %v", score)
   847  	}
   848  
   849  	// Test a mid-case scenario
   850  	util = &ComparableResources{
   851  		Flattened: AllocatedTaskResources{
   852  			Cpu: AllocatedCpuResources{
   853  				CpuShares: 1024,
   854  			},
   855  			Memory: AllocatedMemoryResources{
   856  				MemoryMB: 2048,
   857  			},
   858  		},
   859  	}
   860  	score = ScoreFitBinPack(node, util)
   861  	if score < 10.0 || score > 16.0 {
   862  		t.Fatalf("bad: %v", score)
   863  	}
   864  }
   865  
   866  func TestScoreFitBinPack(t *testing.T) {
   867  	ci.Parallel(t)
   868  
   869  	node := &Node{}
   870  	node.NodeResources = &NodeResources{
   871  		Cpu: NodeCpuResources{
   872  			CpuShares: 4096,
   873  		},
   874  		Memory: NodeMemoryResources{
   875  			MemoryMB: 8192,
   876  		},
   877  	}
   878  	node.ReservedResources = &NodeReservedResources{
   879  		Cpu: NodeReservedCpuResources{
   880  			CpuShares: 2048,
   881  		},
   882  		Memory: NodeReservedMemoryResources{
   883  			MemoryMB: 4096,
   884  		},
   885  	}
   886  
   887  	cases := []struct {
   888  		name         string
   889  		flattened    AllocatedTaskResources
   890  		binPackScore float64
   891  		spreadScore  float64
   892  	}{
   893  		{
   894  			name: "almost filled node, but with just enough hole",
   895  			flattened: AllocatedTaskResources{
   896  				Cpu:    AllocatedCpuResources{CpuShares: 2048},
   897  				Memory: AllocatedMemoryResources{MemoryMB: 4096},
   898  			},
   899  			binPackScore: 18,
   900  			spreadScore:  0,
   901  		},
   902  		{
   903  			name: "unutilized node",
   904  			flattened: AllocatedTaskResources{
   905  				Cpu:    AllocatedCpuResources{CpuShares: 0},
   906  				Memory: AllocatedMemoryResources{MemoryMB: 0},
   907  			},
   908  			binPackScore: 0,
   909  			spreadScore:  18,
   910  		},
   911  		{
   912  			name: "mid-case scnario",
   913  			flattened: AllocatedTaskResources{
   914  				Cpu:    AllocatedCpuResources{CpuShares: 1024},
   915  				Memory: AllocatedMemoryResources{MemoryMB: 2048},
   916  			},
   917  			binPackScore: 13.675,
   918  			spreadScore:  4.325,
   919  		},
   920  	}
   921  
   922  	for _, c := range cases {
   923  		t.Run(c.name, func(t *testing.T) {
   924  			util := &ComparableResources{Flattened: c.flattened}
   925  
   926  			binPackScore := ScoreFitBinPack(node, util)
   927  			require.InDelta(t, c.binPackScore, binPackScore, 0.001, "binpack score")
   928  
   929  			spreadScore := ScoreFitSpread(node, util)
   930  			require.InDelta(t, c.spreadScore, spreadScore, 0.001, "spread score")
   931  
   932  			require.InDelta(t, 18, binPackScore+spreadScore, 0.001, "score sum")
   933  		})
   934  	}
   935  }
   936  
   937  func TestACLPolicyListHash(t *testing.T) {
   938  	ci.Parallel(t)
   939  
   940  	h1 := ACLPolicyListHash(nil)
   941  	assert.NotEqual(t, "", h1)
   942  
   943  	p1 := &ACLPolicy{
   944  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   945  		Description: "Super cool policy!",
   946  		Rules: `
   947  		namespace "default" {
   948  			policy = "write"
   949  		}
   950  		node {
   951  			policy = "read"
   952  		}
   953  		agent {
   954  			policy = "read"
   955  		}
   956  		`,
   957  		CreateIndex: 10,
   958  		ModifyIndex: 20,
   959  	}
   960  
   961  	h2 := ACLPolicyListHash([]*ACLPolicy{p1})
   962  	assert.NotEqual(t, "", h2)
   963  	assert.NotEqual(t, h1, h2)
   964  
   965  	// Create P2 as copy of P1 with new name
   966  	p2 := &ACLPolicy{}
   967  	*p2 = *p1
   968  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
   969  
   970  	h3 := ACLPolicyListHash([]*ACLPolicy{p1, p2})
   971  	assert.NotEqual(t, "", h3)
   972  	assert.NotEqual(t, h2, h3)
   973  
   974  	h4 := ACLPolicyListHash([]*ACLPolicy{p2})
   975  	assert.NotEqual(t, "", h4)
   976  	assert.NotEqual(t, h3, h4)
   977  
   978  	// ModifyIndex should change the hash
   979  	p2.ModifyIndex++
   980  	h5 := ACLPolicyListHash([]*ACLPolicy{p2})
   981  	assert.NotEqual(t, "", h5)
   982  	assert.NotEqual(t, h4, h5)
   983  }
   984  
   985  func TestCompileACLObject(t *testing.T) {
   986  	ci.Parallel(t)
   987  
   988  	p1 := &ACLPolicy{
   989  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   990  		Description: "Super cool policy!",
   991  		Rules: `
   992  		namespace "default" {
   993  			policy = "write"
   994  		}
   995  		node {
   996  			policy = "read"
   997  		}
   998  		agent {
   999  			policy = "read"
  1000  		}
  1001  		`,
  1002  		CreateIndex: 10,
  1003  		ModifyIndex: 20,
  1004  	}
  1005  
  1006  	// Create P2 as copy of P1 with new name
  1007  	p2 := &ACLPolicy{}
  1008  	*p2 = *p1
  1009  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
  1010  
  1011  	// Create a small cache
  1012  	cache := NewACLCache[*acl.ACL](10)
  1013  
  1014  	// Test compilation
  1015  	aclObj, err := CompileACLObject(cache, []*ACLPolicy{p1})
  1016  	assert.Nil(t, err)
  1017  	assert.NotNil(t, aclObj)
  1018  
  1019  	// Should get the same object
  1020  	aclObj2, err := CompileACLObject(cache, []*ACLPolicy{p1})
  1021  	assert.Nil(t, err)
  1022  	if aclObj != aclObj2 {
  1023  		t.Fatalf("expected the same object")
  1024  	}
  1025  
  1026  	// Should get another object
  1027  	aclObj3, err := CompileACLObject(cache, []*ACLPolicy{p1, p2})
  1028  	assert.Nil(t, err)
  1029  	assert.NotNil(t, aclObj3)
  1030  	if aclObj == aclObj3 {
  1031  		t.Fatalf("unexpected same object")
  1032  	}
  1033  
  1034  	// Should be order independent
  1035  	aclObj4, err := CompileACLObject(cache, []*ACLPolicy{p2, p1})
  1036  	assert.Nil(t, err)
  1037  	assert.NotNil(t, aclObj4)
  1038  	if aclObj3 != aclObj4 {
  1039  		t.Fatalf("expected same object")
  1040  	}
  1041  }
  1042  
  1043  // TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP
  1044  // headers and CompareMigrateToken works as expected.
  1045  func TestGenerateMigrateToken(t *testing.T) {
  1046  	ci.Parallel(t)
  1047  
  1048  	assert := assert.New(t)
  1049  	allocID := uuid.Generate()
  1050  	nodeSecret := uuid.Generate()
  1051  	token, err := GenerateMigrateToken(allocID, nodeSecret)
  1052  	assert.Nil(err)
  1053  	_, err = base64.URLEncoding.DecodeString(token)
  1054  	assert.Nil(err)
  1055  
  1056  	assert.True(CompareMigrateToken(allocID, nodeSecret, token))
  1057  	assert.False(CompareMigrateToken("x", nodeSecret, token))
  1058  	assert.False(CompareMigrateToken(allocID, "x", token))
  1059  	assert.False(CompareMigrateToken(allocID, nodeSecret, "x"))
  1060  
  1061  	token2, err := GenerateMigrateToken("x", nodeSecret)
  1062  	assert.Nil(err)
  1063  	assert.False(CompareMigrateToken(allocID, nodeSecret, token2))
  1064  	assert.True(CompareMigrateToken("x", nodeSecret, token2))
  1065  }
  1066  
  1067  func TestVaultPoliciesSet(t *testing.T) {
  1068  	input := map[string]map[string]*Vault{
  1069  		"tg1": {
  1070  			"task1": {
  1071  				Policies: []string{"policy1-1"},
  1072  			},
  1073  			"task2": {
  1074  				Policies: []string{"policy1-2"},
  1075  			},
  1076  		},
  1077  		"tg2": {
  1078  			"task1": {
  1079  				Policies: []string{"policy2"},
  1080  			},
  1081  			"task2": {
  1082  				Policies: []string{"policy2"},
  1083  			},
  1084  		},
  1085  		"tg3": {
  1086  			"task1": {
  1087  				Policies: []string{"policy3-1"},
  1088  			},
  1089  		},
  1090  		"tg4": {
  1091  			"task1": nil,
  1092  		},
  1093  		"tg5": {
  1094  			"task1": {
  1095  				Policies: []string{"policy2"},
  1096  			},
  1097  		},
  1098  		"tg6": {
  1099  			"task1": {},
  1100  		},
  1101  		"tg7": {
  1102  			"task1": {
  1103  				Policies: []string{"policy7", "policy7"},
  1104  			},
  1105  		},
  1106  		"tg8": {
  1107  			"task1": {
  1108  				Policies: []string{"policy8-1-1", "policy8-1-2"},
  1109  			},
  1110  		},
  1111  	}
  1112  	expected := []string{
  1113  		"policy1-1",
  1114  		"policy1-2",
  1115  		"policy2",
  1116  		"policy3-1",
  1117  		"policy7",
  1118  		"policy8-1-1",
  1119  		"policy8-1-2",
  1120  	}
  1121  	got := VaultPoliciesSet(input)
  1122  	require.ElementsMatch(t, expected, got)
  1123  }
  1124  
  1125  func TestVaultNamespaceSet(t *testing.T) {
  1126  	input := map[string]map[string]*Vault{
  1127  		"tg1": {
  1128  			"task1": {
  1129  				Namespace: "ns1-1",
  1130  			},
  1131  			"task2": {
  1132  				Namespace: "ns1-2",
  1133  			},
  1134  		},
  1135  		"tg2": {
  1136  			"task1": {
  1137  				Namespace: "ns2",
  1138  			},
  1139  			"task2": {
  1140  				Namespace: "ns2",
  1141  			},
  1142  		},
  1143  		"tg3": {
  1144  			"task1": {
  1145  				Namespace: "ns3-1",
  1146  			},
  1147  		},
  1148  		"tg4": {
  1149  			"task1": nil,
  1150  		},
  1151  		"tg5": {
  1152  			"task1": {
  1153  				Namespace: "ns2",
  1154  			},
  1155  		},
  1156  		"tg6": {
  1157  			"task1": {},
  1158  		},
  1159  	}
  1160  	expected := []string{
  1161  		"ns1-1",
  1162  		"ns1-2",
  1163  		"ns2",
  1164  		"ns3-1",
  1165  	}
  1166  	got := VaultNamespaceSet(input)
  1167  	require.ElementsMatch(t, expected, got)
  1168  }
  1169  
  1170  // TestParsePortRanges asserts ParsePortRanges errors on invalid port ranges.
  1171  func TestParsePortRanges(t *testing.T) {
  1172  	ci.Parallel(t)
  1173  
  1174  	cases := []struct {
  1175  		name string
  1176  		spec string
  1177  		err  string
  1178  	}{
  1179  		{
  1180  			name: "UnmatchedDash",
  1181  			spec: "-1",
  1182  			err:  `strconv.ParseUint: parsing "": invalid syntax`,
  1183  		},
  1184  		{
  1185  			name: "Zero",
  1186  			spec: "0",
  1187  			err:  "port must be > 0",
  1188  		},
  1189  		{
  1190  			name: "TooBig",
  1191  			spec: fmt.Sprintf("1-%d", MaxValidPort+1),
  1192  			err:  "port must be < 65536 but found 65537",
  1193  		},
  1194  		{
  1195  			name: "WayTooBig",           // would OOM if not caught early enough
  1196  			spec: "9223372036854775807", // (2**63)-1
  1197  			err:  "port must be < 65536 but found 9223372036854775807",
  1198  		},
  1199  	}
  1200  
  1201  	for i := range cases {
  1202  		tc := cases[i]
  1203  		t.Run(tc.name, func(t *testing.T) {
  1204  			results, err := ParsePortRanges(tc.spec)
  1205  			require.Nil(t, results)
  1206  			require.EqualError(t, err, tc.err)
  1207  		})
  1208  	}
  1209  }