github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/nomad/structs/funcs_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"encoding/base64"
     5  	"errors"
     6  	"fmt"
     7  	"testing"
     8  
     9  	lru "github.com/hashicorp/golang-lru"
    10  	"github.com/hashicorp/nomad/ci"
    11  	"github.com/hashicorp/nomad/helper/uuid"
    12  	"github.com/stretchr/testify/assert"
    13  	"github.com/stretchr/testify/require"
    14  )
    15  
    16  func TestRemoveAllocs(t *testing.T) {
    17  	ci.Parallel(t)
    18  
    19  	l := []*Allocation{
    20  		{ID: "foo"},
    21  		{ID: "bar"},
    22  		{ID: "baz"},
    23  		{ID: "zip"},
    24  	}
    25  
    26  	out := RemoveAllocs(l, []*Allocation{l[1], l[3]})
    27  	if len(out) != 2 {
    28  		t.Fatalf("bad: %#v", out)
    29  	}
    30  	if out[0].ID != "foo" && out[1].ID != "baz" {
    31  		t.Fatalf("bad: %#v", out)
    32  	}
    33  }
    34  
    35  func TestFilterTerminalAllocs(t *testing.T) {
    36  	ci.Parallel(t)
    37  
    38  	l := []*Allocation{
    39  		{
    40  			ID:            "bar",
    41  			Name:          "myname1",
    42  			DesiredStatus: AllocDesiredStatusEvict,
    43  		},
    44  		{ID: "baz", DesiredStatus: AllocDesiredStatusStop},
    45  		{
    46  			ID:            "foo",
    47  			DesiredStatus: AllocDesiredStatusRun,
    48  			ClientStatus:  AllocClientStatusPending,
    49  		},
    50  		{
    51  			ID:            "bam",
    52  			Name:          "myname",
    53  			DesiredStatus: AllocDesiredStatusRun,
    54  			ClientStatus:  AllocClientStatusComplete,
    55  			CreateIndex:   5,
    56  		},
    57  		{
    58  			ID:            "lol",
    59  			Name:          "myname",
    60  			DesiredStatus: AllocDesiredStatusRun,
    61  			ClientStatus:  AllocClientStatusComplete,
    62  			CreateIndex:   2,
    63  		},
    64  	}
    65  
    66  	out, terminalAllocs := FilterTerminalAllocs(l)
    67  	if len(out) != 1 {
    68  		t.Fatalf("bad: %#v", out)
    69  	}
    70  	if out[0].ID != "foo" {
    71  		t.Fatalf("bad: %#v", out)
    72  	}
    73  
    74  	if len(terminalAllocs) != 3 {
    75  		for _, o := range terminalAllocs {
    76  			fmt.Printf("%#v \n", o)
    77  		}
    78  
    79  		t.Fatalf("bad: %#v", terminalAllocs)
    80  	}
    81  
    82  	if terminalAllocs["myname"].ID != "bam" {
    83  		t.Fatalf("bad: %#v", terminalAllocs["myname"])
    84  	}
    85  }
    86  
    87  // COMPAT(0.11): Remove in 0.11
    88  func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) {
    89  	ci.Parallel(t)
    90  
    91  	n := &Node{
    92  		Resources: &Resources{
    93  			Networks: []*NetworkResource{
    94  				{
    95  					Device: "eth0",
    96  					CIDR:   "10.0.0.0/8",
    97  					MBits:  100,
    98  				},
    99  			},
   100  		},
   101  	}
   102  
   103  	a1 := &Allocation{
   104  		Job: &Job{
   105  			TaskGroups: []*TaskGroup{
   106  				{
   107  					Name:          "web",
   108  					EphemeralDisk: DefaultEphemeralDisk(),
   109  				},
   110  			},
   111  		},
   112  		TaskResources: map[string]*Resources{
   113  			"web": {
   114  				Networks: []*NetworkResource{
   115  					{
   116  						Device:        "eth0",
   117  						IP:            "10.0.0.1",
   118  						MBits:         50,
   119  						ReservedPorts: []Port{{"main", 8000, 80, ""}},
   120  					},
   121  				},
   122  			},
   123  		},
   124  	}
   125  
   126  	// Should fit one allocation
   127  	fit, dim, _, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   128  	if err != nil {
   129  		t.Fatalf("err: %v", err)
   130  	}
   131  	if !fit {
   132  		t.Fatalf("Bad: %s", dim)
   133  	}
   134  
   135  	// Should not fit second allocation
   136  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   137  	if err != nil {
   138  		t.Fatalf("err: %v", err)
   139  	}
   140  	if fit {
   141  		t.Fatalf("Bad")
   142  	}
   143  }
   144  
   145  // COMPAT(0.11): Remove in 0.11
   146  func TestAllocsFit_Old(t *testing.T) {
   147  	ci.Parallel(t)
   148  
   149  	require := require.New(t)
   150  
   151  	n := &Node{
   152  		Resources: &Resources{
   153  			CPU:      2000,
   154  			MemoryMB: 2048,
   155  			DiskMB:   10000,
   156  			Networks: []*NetworkResource{
   157  				{
   158  					Device: "eth0",
   159  					CIDR:   "10.0.0.0/8",
   160  					MBits:  100,
   161  				},
   162  			},
   163  		},
   164  		Reserved: &Resources{
   165  			CPU:      1000,
   166  			MemoryMB: 1024,
   167  			DiskMB:   5000,
   168  			Networks: []*NetworkResource{
   169  				{
   170  					Device:        "eth0",
   171  					IP:            "10.0.0.1",
   172  					MBits:         50,
   173  					ReservedPorts: []Port{{"main", 80, 0, ""}},
   174  				},
   175  			},
   176  		},
   177  	}
   178  
   179  	a1 := &Allocation{
   180  		Resources: &Resources{
   181  			CPU:      1000,
   182  			MemoryMB: 1024,
   183  			DiskMB:   5000,
   184  			Networks: []*NetworkResource{
   185  				{
   186  					Device:        "eth0",
   187  					IP:            "10.0.0.1",
   188  					MBits:         50,
   189  					ReservedPorts: []Port{{"main", 8000, 80, ""}},
   190  				},
   191  			},
   192  		},
   193  	}
   194  
   195  	// Should fit one allocation
   196  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   197  	require.NoError(err)
   198  	require.True(fit)
   199  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   200  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   201  
   202  	// Should not fit second allocation
   203  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   204  	require.NoError(err)
   205  	require.False(fit)
   206  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   207  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   208  }
   209  
   210  // COMPAT(0.11): Remove in 0.11
   211  func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
   212  	ci.Parallel(t)
   213  
   214  	require := require.New(t)
   215  
   216  	n := &Node{
   217  		Resources: &Resources{
   218  			CPU:      2000,
   219  			MemoryMB: 2048,
   220  			DiskMB:   10000,
   221  			Networks: []*NetworkResource{
   222  				{
   223  					Device: "eth0",
   224  					CIDR:   "10.0.0.0/8",
   225  					MBits:  100,
   226  				},
   227  			},
   228  		},
   229  		Reserved: &Resources{
   230  			CPU:      1000,
   231  			MemoryMB: 1024,
   232  			DiskMB:   5000,
   233  			Networks: []*NetworkResource{
   234  				{
   235  					Device:        "eth0",
   236  					IP:            "10.0.0.1",
   237  					MBits:         50,
   238  					ReservedPorts: []Port{{"main", 80, 0, ""}},
   239  				},
   240  			},
   241  		},
   242  	}
   243  
   244  	a1 := &Allocation{
   245  		Resources: &Resources{
   246  			CPU:      1000,
   247  			MemoryMB: 1024,
   248  			DiskMB:   5000,
   249  			Networks: []*NetworkResource{
   250  				{
   251  					Device:        "eth0",
   252  					IP:            "10.0.0.1",
   253  					MBits:         50,
   254  					ReservedPorts: []Port{{"main", 8000, 0, ""}},
   255  				},
   256  			},
   257  		},
   258  	}
   259  
   260  	// Should fit one allocation
   261  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   262  	require.NoError(err)
   263  	require.True(fit)
   264  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   265  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   266  
   267  	// Should fit second allocation since it is terminal
   268  	a2 := a1.Copy()
   269  	a2.DesiredStatus = AllocDesiredStatusStop
   270  	a2.ClientStatus = AllocClientStatusComplete
   271  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   272  	require.NoError(err)
   273  	require.True(fit)
   274  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   275  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   276  }
   277  
   278  func TestAllocsFit(t *testing.T) {
   279  	ci.Parallel(t)
   280  
   281  	require := require.New(t)
   282  
   283  	n := &Node{
   284  		NodeResources: &NodeResources{
   285  			Cpu: NodeCpuResources{
   286  				CpuShares:          2000,
   287  				TotalCpuCores:      2,
   288  				ReservableCpuCores: []uint16{0, 1},
   289  			},
   290  			Memory: NodeMemoryResources{
   291  				MemoryMB: 2048,
   292  			},
   293  			Disk: NodeDiskResources{
   294  				DiskMB: 10000,
   295  			},
   296  			Networks: []*NetworkResource{
   297  				{
   298  					Device: "eth0",
   299  					CIDR:   "10.0.0.0/8",
   300  					MBits:  100,
   301  				},
   302  			},
   303  			NodeNetworks: []*NodeNetworkResource{
   304  				{
   305  					Mode:   "host",
   306  					Device: "eth0",
   307  					Addresses: []NodeNetworkAddress{
   308  						{
   309  							Address: "10.0.0.1",
   310  						},
   311  					},
   312  				},
   313  			},
   314  		},
   315  		ReservedResources: &NodeReservedResources{
   316  			Cpu: NodeReservedCpuResources{
   317  				CpuShares: 1000,
   318  			},
   319  			Memory: NodeReservedMemoryResources{
   320  				MemoryMB: 1024,
   321  			},
   322  			Disk: NodeReservedDiskResources{
   323  				DiskMB: 5000,
   324  			},
   325  			Networks: NodeReservedNetworkResources{
   326  				ReservedHostPorts: "80",
   327  			},
   328  		},
   329  	}
   330  
   331  	a1 := &Allocation{
   332  		AllocatedResources: &AllocatedResources{
   333  			Tasks: map[string]*AllocatedTaskResources{
   334  				"web": {
   335  					Cpu: AllocatedCpuResources{
   336  						CpuShares:     1000,
   337  						ReservedCores: []uint16{},
   338  					},
   339  					Memory: AllocatedMemoryResources{
   340  						MemoryMB: 1024,
   341  					},
   342  				},
   343  			},
   344  			Shared: AllocatedSharedResources{
   345  				DiskMB: 5000,
   346  				Networks: Networks{
   347  					{
   348  						Mode:          "host",
   349  						IP:            "10.0.0.1",
   350  						ReservedPorts: []Port{{"main", 8000, 0, ""}},
   351  					},
   352  				},
   353  				Ports: AllocatedPorts{
   354  					{
   355  						Label:  "main",
   356  						Value:  8000,
   357  						HostIP: "10.0.0.1",
   358  					},
   359  				},
   360  			},
   361  		},
   362  	}
   363  
   364  	// Should fit one allocation
   365  	fit, dim, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   366  	require.NoError(err)
   367  	require.True(fit, "failed for dimension %q", dim)
   368  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   369  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   370  
   371  	// Should not fit second allocation
   372  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   373  	require.NoError(err)
   374  	require.False(fit)
   375  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   376  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   377  
   378  	a2 := &Allocation{
   379  		AllocatedResources: &AllocatedResources{
   380  			Tasks: map[string]*AllocatedTaskResources{
   381  				"web": {
   382  					Cpu: AllocatedCpuResources{
   383  						CpuShares:     500,
   384  						ReservedCores: []uint16{0},
   385  					},
   386  					Memory: AllocatedMemoryResources{
   387  						MemoryMB: 512,
   388  					},
   389  				},
   390  			},
   391  			Shared: AllocatedSharedResources{
   392  				DiskMB: 1000,
   393  				Networks: Networks{
   394  					{
   395  						Mode: "host",
   396  						IP:   "10.0.0.1",
   397  					},
   398  				},
   399  			},
   400  		},
   401  	}
   402  
   403  	// Should fit one allocation
   404  	fit, dim, used, err = AllocsFit(n, []*Allocation{a2}, nil, false)
   405  	require.NoError(err)
   406  	require.True(fit, "failed for dimension %q", dim)
   407  	require.EqualValues(500, used.Flattened.Cpu.CpuShares)
   408  	require.EqualValues([]uint16{0}, used.Flattened.Cpu.ReservedCores)
   409  	require.EqualValues(512, used.Flattened.Memory.MemoryMB)
   410  
   411  	// Should not fit second allocation
   412  	fit, dim, used, err = AllocsFit(n, []*Allocation{a2, a2}, nil, false)
   413  	require.NoError(err)
   414  	require.False(fit)
   415  	require.EqualValues("cores", dim)
   416  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   417  	require.EqualValues([]uint16{0}, used.Flattened.Cpu.ReservedCores)
   418  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   419  }
   420  
   421  func TestAllocsFit_TerminalAlloc(t *testing.T) {
   422  	ci.Parallel(t)
   423  
   424  	require := require.New(t)
   425  
   426  	n := &Node{
   427  		NodeResources: &NodeResources{
   428  			Cpu: NodeCpuResources{
   429  				CpuShares: 2000,
   430  			},
   431  			Memory: NodeMemoryResources{
   432  				MemoryMB: 2048,
   433  			},
   434  			Disk: NodeDiskResources{
   435  				DiskMB: 10000,
   436  			},
   437  			Networks: []*NetworkResource{
   438  				{
   439  					Device: "eth0",
   440  					CIDR:   "10.0.0.0/8",
   441  					IP:     "10.0.0.1",
   442  					MBits:  100,
   443  				},
   444  			},
   445  		},
   446  		ReservedResources: &NodeReservedResources{
   447  			Cpu: NodeReservedCpuResources{
   448  				CpuShares: 1000,
   449  			},
   450  			Memory: NodeReservedMemoryResources{
   451  				MemoryMB: 1024,
   452  			},
   453  			Disk: NodeReservedDiskResources{
   454  				DiskMB: 5000,
   455  			},
   456  			Networks: NodeReservedNetworkResources{
   457  				ReservedHostPorts: "80",
   458  			},
   459  		},
   460  	}
   461  
   462  	a1 := &Allocation{
   463  		AllocatedResources: &AllocatedResources{
   464  			Tasks: map[string]*AllocatedTaskResources{
   465  				"web": {
   466  					Cpu: AllocatedCpuResources{
   467  						CpuShares: 1000,
   468  					},
   469  					Memory: AllocatedMemoryResources{
   470  						MemoryMB: 1024,
   471  					},
   472  					Networks: []*NetworkResource{
   473  						{
   474  							Device:        "eth0",
   475  							IP:            "10.0.0.1",
   476  							MBits:         50,
   477  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   478  						},
   479  					},
   480  				},
   481  			},
   482  			Shared: AllocatedSharedResources{
   483  				DiskMB: 5000,
   484  			},
   485  		},
   486  	}
   487  
   488  	// Should fit one allocation
   489  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   490  	require.NoError(err)
   491  	require.True(fit)
   492  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   493  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   494  
   495  	// Should fit second allocation since it is terminal
   496  	a2 := a1.Copy()
   497  	a2.DesiredStatus = AllocDesiredStatusStop
   498  	a2.ClientStatus = AllocClientStatusComplete
   499  	fit, dim, used, err := AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   500  	require.NoError(err)
   501  	require.True(fit, dim)
   502  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   503  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   504  }
   505  
   506  // TestAllocsFit_ClientTerminalAlloc asserts that allocs which have a terminal
   507  // ClientStatus *do not* have their resources counted as in-use.
   508  func TestAllocsFit_ClientTerminalAlloc(t *testing.T) {
   509  	ci.Parallel(t)
   510  
   511  	n := &Node{
   512  		ID: "test-node",
   513  		NodeResources: &NodeResources{
   514  			Cpu: NodeCpuResources{
   515  				CpuShares: 2000,
   516  			},
   517  			Memory: NodeMemoryResources{
   518  				MemoryMB: 2048,
   519  			},
   520  			Disk: NodeDiskResources{
   521  				DiskMB: 10000,
   522  			},
   523  			Networks: []*NetworkResource{
   524  				{
   525  					Device: "eth0",
   526  					CIDR:   "10.0.0.0/8",
   527  					IP:     "10.0.0.1",
   528  					MBits:  100,
   529  				},
   530  			},
   531  		},
   532  		ReservedResources: &NodeReservedResources{
   533  			Cpu: NodeReservedCpuResources{
   534  				CpuShares: 1000,
   535  			},
   536  			Memory: NodeReservedMemoryResources{
   537  				MemoryMB: 1024,
   538  			},
   539  			Disk: NodeReservedDiskResources{
   540  				DiskMB: 5000,
   541  			},
   542  			Networks: NodeReservedNetworkResources{
   543  				ReservedHostPorts: "80",
   544  			},
   545  		},
   546  	}
   547  
   548  	liveAlloc := &Allocation{
   549  		ID:            "test-alloc-live",
   550  		ClientStatus:  AllocClientStatusPending,
   551  		DesiredStatus: AllocDesiredStatusRun,
   552  		AllocatedResources: &AllocatedResources{
   553  			Tasks: map[string]*AllocatedTaskResources{
   554  				"web": {
   555  					Cpu: AllocatedCpuResources{
   556  						CpuShares: 1000,
   557  					},
   558  					Memory: AllocatedMemoryResources{
   559  						MemoryMB: 1024,
   560  					},
   561  					Networks: []*NetworkResource{
   562  						{
   563  							Device:        "eth0",
   564  							IP:            "10.0.0.1",
   565  							MBits:         50,
   566  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   567  						},
   568  					},
   569  				},
   570  			},
   571  			Shared: AllocatedSharedResources{
   572  				DiskMB: 5000,
   573  			},
   574  		},
   575  	}
   576  
   577  	deadAlloc := liveAlloc.Copy()
   578  	deadAlloc.ID = "test-alloc-dead"
   579  	deadAlloc.ClientStatus = AllocClientStatusFailed
   580  	deadAlloc.DesiredStatus = AllocDesiredStatusRun
   581  
   582  	// *Should* fit both allocations since deadAlloc is not running on the
   583  	// client
   584  	fit, _, used, err := AllocsFit(n, []*Allocation{liveAlloc, deadAlloc}, nil, false)
   585  	require.NoError(t, err)
   586  	require.True(t, fit)
   587  	require.EqualValues(t, 1000, used.Flattened.Cpu.CpuShares)
   588  	require.EqualValues(t, 1024, used.Flattened.Memory.MemoryMB)
   589  }
   590  
   591  // TestAllocsFit_ServerTerminalAlloc asserts that allocs which have a terminal
   592  // DesiredStatus but are still running on clients *do* have their resources
   593  // counted as in-use.
   594  func TestAllocsFit_ServerTerminalAlloc(t *testing.T) {
   595  	ci.Parallel(t)
   596  
   597  	n := &Node{
   598  		ID: "test-node",
   599  		NodeResources: &NodeResources{
   600  			Cpu: NodeCpuResources{
   601  				CpuShares: 2000,
   602  			},
   603  			Memory: NodeMemoryResources{
   604  				MemoryMB: 2048,
   605  			},
   606  			Disk: NodeDiskResources{
   607  				DiskMB: 10000,
   608  			},
   609  			Networks: []*NetworkResource{
   610  				{
   611  					Device: "eth0",
   612  					CIDR:   "10.0.0.0/8",
   613  					IP:     "10.0.0.1",
   614  					MBits:  100,
   615  				},
   616  			},
   617  		},
   618  		ReservedResources: &NodeReservedResources{
   619  			Cpu: NodeReservedCpuResources{
   620  				CpuShares: 1000,
   621  			},
   622  			Memory: NodeReservedMemoryResources{
   623  				MemoryMB: 1024,
   624  			},
   625  			Disk: NodeReservedDiskResources{
   626  				DiskMB: 5000,
   627  			},
   628  			Networks: NodeReservedNetworkResources{
   629  				ReservedHostPorts: "80",
   630  			},
   631  		},
   632  	}
   633  
   634  	liveAlloc := &Allocation{
   635  		ID:            "test-alloc-live",
   636  		ClientStatus:  AllocClientStatusPending,
   637  		DesiredStatus: AllocDesiredStatusRun,
   638  		AllocatedResources: &AllocatedResources{
   639  			Tasks: map[string]*AllocatedTaskResources{
   640  				"web": {
   641  					Cpu: AllocatedCpuResources{
   642  						CpuShares: 1000,
   643  					},
   644  					Memory: AllocatedMemoryResources{
   645  						MemoryMB: 1024,
   646  					},
   647  					Networks: []*NetworkResource{
   648  						{
   649  							Device:        "eth0",
   650  							IP:            "10.0.0.1",
   651  							MBits:         50,
   652  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   653  						},
   654  					},
   655  				},
   656  			},
   657  			Shared: AllocatedSharedResources{
   658  				DiskMB: 5000,
   659  			},
   660  		},
   661  	}
   662  
   663  	deadAlloc := liveAlloc.Copy()
   664  	deadAlloc.ID = "test-alloc-dead"
   665  	deadAlloc.ClientStatus = AllocClientStatusRunning
   666  	deadAlloc.DesiredStatus = AllocDesiredStatusStop
   667  
   668  	// Should *not* fit both allocations since deadAlloc is still running
   669  	fit, _, used, err := AllocsFit(n, []*Allocation{liveAlloc, deadAlloc}, nil, false)
   670  	require.NoError(t, err)
   671  	require.False(t, fit)
   672  	require.EqualValues(t, 2000, used.Flattened.Cpu.CpuShares)
   673  	require.EqualValues(t, 2048, used.Flattened.Memory.MemoryMB)
   674  }
   675  
   676  // Tests that AllocsFit detects device collisions
   677  func TestAllocsFit_Devices(t *testing.T) {
   678  	ci.Parallel(t)
   679  
   680  	require := require.New(t)
   681  
   682  	n := MockNvidiaNode()
   683  	a1 := &Allocation{
   684  		AllocatedResources: &AllocatedResources{
   685  			Tasks: map[string]*AllocatedTaskResources{
   686  				"web": {
   687  					Cpu: AllocatedCpuResources{
   688  						CpuShares: 1000,
   689  					},
   690  					Memory: AllocatedMemoryResources{
   691  						MemoryMB: 1024,
   692  					},
   693  					Devices: []*AllocatedDeviceResource{
   694  						{
   695  							Type:      "gpu",
   696  							Vendor:    "nvidia",
   697  							Name:      "1080ti",
   698  							DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID},
   699  						},
   700  					},
   701  				},
   702  			},
   703  			Shared: AllocatedSharedResources{
   704  				DiskMB: 5000,
   705  			},
   706  		},
   707  	}
   708  	a2 := a1.Copy()
   709  	a2.AllocatedResources.Tasks["web"] = &AllocatedTaskResources{
   710  		Cpu: AllocatedCpuResources{
   711  			CpuShares: 1000,
   712  		},
   713  		Memory: AllocatedMemoryResources{
   714  			MemoryMB: 1024,
   715  		},
   716  		Devices: []*AllocatedDeviceResource{
   717  			{
   718  				Type:      "gpu",
   719  				Vendor:    "nvidia",
   720  				Name:      "1080ti",
   721  				DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID}, // Use the same ID
   722  			},
   723  		},
   724  	}
   725  
   726  	// Should fit one allocation
   727  	fit, _, _, err := AllocsFit(n, []*Allocation{a1}, nil, true)
   728  	require.NoError(err)
   729  	require.True(fit)
   730  
   731  	// Should not fit second allocation
   732  	fit, msg, _, err := AllocsFit(n, []*Allocation{a1, a2}, nil, true)
   733  	require.NoError(err)
   734  	require.False(fit)
   735  	require.Equal("device oversubscribed", msg)
   736  
   737  	// Should not fit second allocation but won't detect since we disabled
   738  	// devices
   739  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   740  	require.NoError(err)
   741  	require.True(fit)
   742  }
   743  
   744  // TestAllocsFit_MemoryOversubscription asserts that only reserved memory is
   745  // used for capacity
   746  func TestAllocsFit_MemoryOversubscription(t *testing.T) {
   747  	ci.Parallel(t)
   748  
   749  	n := &Node{
   750  		NodeResources: &NodeResources{
   751  			Cpu: NodeCpuResources{
   752  				CpuShares: 2000,
   753  			},
   754  			Memory: NodeMemoryResources{
   755  				MemoryMB: 2048,
   756  			},
   757  		},
   758  	}
   759  
   760  	a1 := &Allocation{
   761  		AllocatedResources: &AllocatedResources{
   762  			Tasks: map[string]*AllocatedTaskResources{
   763  				"web": {
   764  					Cpu: AllocatedCpuResources{
   765  						CpuShares: 100,
   766  					},
   767  					Memory: AllocatedMemoryResources{
   768  						MemoryMB:    1000,
   769  						MemoryMaxMB: 4000,
   770  					},
   771  				},
   772  			},
   773  		},
   774  	}
   775  
   776  	// Should fit one allocation
   777  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   778  	require.NoError(t, err)
   779  	require.True(t, fit)
   780  	require.EqualValues(t, 100, used.Flattened.Cpu.CpuShares)
   781  	require.EqualValues(t, 1000, used.Flattened.Memory.MemoryMB)
   782  	require.EqualValues(t, 4000, used.Flattened.Memory.MemoryMaxMB)
   783  
   784  	// Should fit second allocation
   785  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   786  	require.NoError(t, err)
   787  	require.True(t, fit)
   788  	require.EqualValues(t, 200, used.Flattened.Cpu.CpuShares)
   789  	require.EqualValues(t, 2000, used.Flattened.Memory.MemoryMB)
   790  	require.EqualValues(t, 8000, used.Flattened.Memory.MemoryMaxMB)
   791  
   792  	// Should not fit a third allocation
   793  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1, a1}, nil, false)
   794  	require.NoError(t, err)
   795  	require.False(t, fit)
   796  	require.EqualValues(t, 300, used.Flattened.Cpu.CpuShares)
   797  	require.EqualValues(t, 3000, used.Flattened.Memory.MemoryMB)
   798  	require.EqualValues(t, 12000, used.Flattened.Memory.MemoryMaxMB)
   799  }
   800  
   801  // COMPAT(0.11): Remove in 0.11
   802  func TestScoreFitBinPack_Old(t *testing.T) {
   803  	ci.Parallel(t)
   804  
   805  	node := &Node{}
   806  	node.Resources = &Resources{
   807  		CPU:      4096,
   808  		MemoryMB: 8192,
   809  	}
   810  	node.Reserved = &Resources{
   811  		CPU:      2048,
   812  		MemoryMB: 4096,
   813  	}
   814  
   815  	// Test a perfect fit
   816  	util := &ComparableResources{
   817  		Flattened: AllocatedTaskResources{
   818  			Cpu: AllocatedCpuResources{
   819  				CpuShares: 2048,
   820  			},
   821  			Memory: AllocatedMemoryResources{
   822  				MemoryMB: 4096,
   823  			},
   824  		},
   825  	}
   826  	score := ScoreFitBinPack(node, util)
   827  	if score != 18.0 {
   828  		t.Fatalf("bad: %v", score)
   829  	}
   830  
   831  	// Test the worst fit
   832  	util = &ComparableResources{
   833  		Flattened: AllocatedTaskResources{
   834  			Cpu: AllocatedCpuResources{
   835  				CpuShares: 0,
   836  			},
   837  			Memory: AllocatedMemoryResources{
   838  				MemoryMB: 0,
   839  			},
   840  		},
   841  	}
   842  	score = ScoreFitBinPack(node, util)
   843  	if score != 0.0 {
   844  		t.Fatalf("bad: %v", score)
   845  	}
   846  
   847  	// Test a mid-case scenario
   848  	util = &ComparableResources{
   849  		Flattened: AllocatedTaskResources{
   850  			Cpu: AllocatedCpuResources{
   851  				CpuShares: 1024,
   852  			},
   853  			Memory: AllocatedMemoryResources{
   854  				MemoryMB: 2048,
   855  			},
   856  		},
   857  	}
   858  	score = ScoreFitBinPack(node, util)
   859  	if score < 10.0 || score > 16.0 {
   860  		t.Fatalf("bad: %v", score)
   861  	}
   862  }
   863  
   864  func TestScoreFitBinPack(t *testing.T) {
   865  	ci.Parallel(t)
   866  
   867  	node := &Node{}
   868  	node.NodeResources = &NodeResources{
   869  		Cpu: NodeCpuResources{
   870  			CpuShares: 4096,
   871  		},
   872  		Memory: NodeMemoryResources{
   873  			MemoryMB: 8192,
   874  		},
   875  	}
   876  	node.ReservedResources = &NodeReservedResources{
   877  		Cpu: NodeReservedCpuResources{
   878  			CpuShares: 2048,
   879  		},
   880  		Memory: NodeReservedMemoryResources{
   881  			MemoryMB: 4096,
   882  		},
   883  	}
   884  
   885  	cases := []struct {
   886  		name         string
   887  		flattened    AllocatedTaskResources
   888  		binPackScore float64
   889  		spreadScore  float64
   890  	}{
   891  		{
   892  			name: "almost filled node, but with just enough hole",
   893  			flattened: AllocatedTaskResources{
   894  				Cpu:    AllocatedCpuResources{CpuShares: 2048},
   895  				Memory: AllocatedMemoryResources{MemoryMB: 4096},
   896  			},
   897  			binPackScore: 18,
   898  			spreadScore:  0,
   899  		},
   900  		{
   901  			name: "unutilized node",
   902  			flattened: AllocatedTaskResources{
   903  				Cpu:    AllocatedCpuResources{CpuShares: 0},
   904  				Memory: AllocatedMemoryResources{MemoryMB: 0},
   905  			},
   906  			binPackScore: 0,
   907  			spreadScore:  18,
   908  		},
   909  		{
   910  			name: "mid-case scnario",
   911  			flattened: AllocatedTaskResources{
   912  				Cpu:    AllocatedCpuResources{CpuShares: 1024},
   913  				Memory: AllocatedMemoryResources{MemoryMB: 2048},
   914  			},
   915  			binPackScore: 13.675,
   916  			spreadScore:  4.325,
   917  		},
   918  	}
   919  
   920  	for _, c := range cases {
   921  		t.Run(c.name, func(t *testing.T) {
   922  			util := &ComparableResources{Flattened: c.flattened}
   923  
   924  			binPackScore := ScoreFitBinPack(node, util)
   925  			require.InDelta(t, c.binPackScore, binPackScore, 0.001, "binpack score")
   926  
   927  			spreadScore := ScoreFitSpread(node, util)
   928  			require.InDelta(t, c.spreadScore, spreadScore, 0.001, "spread score")
   929  
   930  			require.InDelta(t, 18, binPackScore+spreadScore, 0.001, "score sum")
   931  		})
   932  	}
   933  }
   934  
   935  func TestACLPolicyListHash(t *testing.T) {
   936  	ci.Parallel(t)
   937  
   938  	h1 := ACLPolicyListHash(nil)
   939  	assert.NotEqual(t, "", h1)
   940  
   941  	p1 := &ACLPolicy{
   942  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   943  		Description: "Super cool policy!",
   944  		Rules: `
   945  		namespace "default" {
   946  			policy = "write"
   947  		}
   948  		node {
   949  			policy = "read"
   950  		}
   951  		agent {
   952  			policy = "read"
   953  		}
   954  		`,
   955  		CreateIndex: 10,
   956  		ModifyIndex: 20,
   957  	}
   958  
   959  	h2 := ACLPolicyListHash([]*ACLPolicy{p1})
   960  	assert.NotEqual(t, "", h2)
   961  	assert.NotEqual(t, h1, h2)
   962  
   963  	// Create P2 as copy of P1 with new name
   964  	p2 := &ACLPolicy{}
   965  	*p2 = *p1
   966  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
   967  
   968  	h3 := ACLPolicyListHash([]*ACLPolicy{p1, p2})
   969  	assert.NotEqual(t, "", h3)
   970  	assert.NotEqual(t, h2, h3)
   971  
   972  	h4 := ACLPolicyListHash([]*ACLPolicy{p2})
   973  	assert.NotEqual(t, "", h4)
   974  	assert.NotEqual(t, h3, h4)
   975  
   976  	// ModifyIndex should change the hash
   977  	p2.ModifyIndex++
   978  	h5 := ACLPolicyListHash([]*ACLPolicy{p2})
   979  	assert.NotEqual(t, "", h5)
   980  	assert.NotEqual(t, h4, h5)
   981  }
   982  
   983  func TestCompileACLObject(t *testing.T) {
   984  	ci.Parallel(t)
   985  
   986  	p1 := &ACLPolicy{
   987  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   988  		Description: "Super cool policy!",
   989  		Rules: `
   990  		namespace "default" {
   991  			policy = "write"
   992  		}
   993  		node {
   994  			policy = "read"
   995  		}
   996  		agent {
   997  			policy = "read"
   998  		}
   999  		`,
  1000  		CreateIndex: 10,
  1001  		ModifyIndex: 20,
  1002  	}
  1003  
  1004  	// Create P2 as copy of P1 with new name
  1005  	p2 := &ACLPolicy{}
  1006  	*p2 = *p1
  1007  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
  1008  
  1009  	// Create a small cache
  1010  	cache, err := lru.New2Q(16)
  1011  	assert.Nil(t, err)
  1012  
  1013  	// Test compilation
  1014  	aclObj, err := CompileACLObject(cache, []*ACLPolicy{p1})
  1015  	assert.Nil(t, err)
  1016  	assert.NotNil(t, aclObj)
  1017  
  1018  	// Should get the same object
  1019  	aclObj2, err := CompileACLObject(cache, []*ACLPolicy{p1})
  1020  	assert.Nil(t, err)
  1021  	if aclObj != aclObj2 {
  1022  		t.Fatalf("expected the same object")
  1023  	}
  1024  
  1025  	// Should get another object
  1026  	aclObj3, err := CompileACLObject(cache, []*ACLPolicy{p1, p2})
  1027  	assert.Nil(t, err)
  1028  	assert.NotNil(t, aclObj3)
  1029  	if aclObj == aclObj3 {
  1030  		t.Fatalf("unexpected same object")
  1031  	}
  1032  
  1033  	// Should be order independent
  1034  	aclObj4, err := CompileACLObject(cache, []*ACLPolicy{p2, p1})
  1035  	assert.Nil(t, err)
  1036  	assert.NotNil(t, aclObj4)
  1037  	if aclObj3 != aclObj4 {
  1038  		t.Fatalf("expected same object")
  1039  	}
  1040  }
  1041  
  1042  // TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP
  1043  // headers and CompareMigrateToken works as expected.
  1044  func TestGenerateMigrateToken(t *testing.T) {
  1045  	ci.Parallel(t)
  1046  
  1047  	assert := assert.New(t)
  1048  	allocID := uuid.Generate()
  1049  	nodeSecret := uuid.Generate()
  1050  	token, err := GenerateMigrateToken(allocID, nodeSecret)
  1051  	assert.Nil(err)
  1052  	_, err = base64.URLEncoding.DecodeString(token)
  1053  	assert.Nil(err)
  1054  
  1055  	assert.True(CompareMigrateToken(allocID, nodeSecret, token))
  1056  	assert.False(CompareMigrateToken("x", nodeSecret, token))
  1057  	assert.False(CompareMigrateToken(allocID, "x", token))
  1058  	assert.False(CompareMigrateToken(allocID, nodeSecret, "x"))
  1059  
  1060  	token2, err := GenerateMigrateToken("x", nodeSecret)
  1061  	assert.Nil(err)
  1062  	assert.False(CompareMigrateToken(allocID, nodeSecret, token2))
  1063  	assert.True(CompareMigrateToken("x", nodeSecret, token2))
  1064  }
  1065  
  1066  func TestMergeMultierrorWarnings(t *testing.T) {
  1067  	ci.Parallel(t)
  1068  
  1069  	var errs []error
  1070  
  1071  	// empty
  1072  	str := MergeMultierrorWarnings(errs...)
  1073  	require.Equal(t, "", str)
  1074  
  1075  	// non-empty
  1076  	errs = []error{
  1077  		errors.New("foo"),
  1078  		nil,
  1079  		errors.New("bar"),
  1080  	}
  1081  
  1082  	str = MergeMultierrorWarnings(errs...)
  1083  
  1084  	require.Equal(t, "2 warning(s):\n\n* foo\n* bar", str)
  1085  }
  1086  
  1087  func TestVaultPoliciesSet(t *testing.T) {
  1088  	input := map[string]map[string]*Vault{
  1089  		"tg1": {
  1090  			"task1": {
  1091  				Policies: []string{"policy1-1"},
  1092  			},
  1093  			"task2": {
  1094  				Policies: []string{"policy1-2"},
  1095  			},
  1096  		},
  1097  		"tg2": {
  1098  			"task1": {
  1099  				Policies: []string{"policy2"},
  1100  			},
  1101  			"task2": {
  1102  				Policies: []string{"policy2"},
  1103  			},
  1104  		},
  1105  		"tg3": {
  1106  			"task1": {
  1107  				Policies: []string{"policy3-1"},
  1108  			},
  1109  		},
  1110  		"tg4": {
  1111  			"task1": nil,
  1112  		},
  1113  		"tg5": {
  1114  			"task1": {
  1115  				Policies: []string{"policy2"},
  1116  			},
  1117  		},
  1118  		"tg6": {
  1119  			"task1": {},
  1120  		},
  1121  		"tg7": {
  1122  			"task1": {
  1123  				Policies: []string{"policy7", "policy7"},
  1124  			},
  1125  		},
  1126  		"tg8": {
  1127  			"task1": {
  1128  				Policies: []string{"policy8-1-1", "policy8-1-2"},
  1129  			},
  1130  		},
  1131  	}
  1132  	expected := []string{
  1133  		"policy1-1",
  1134  		"policy1-2",
  1135  		"policy2",
  1136  		"policy3-1",
  1137  		"policy7",
  1138  		"policy8-1-1",
  1139  		"policy8-1-2",
  1140  	}
  1141  	got := VaultPoliciesSet(input)
  1142  	require.ElementsMatch(t, expected, got)
  1143  }
  1144  
  1145  func TestVaultNamespaceSet(t *testing.T) {
  1146  	input := map[string]map[string]*Vault{
  1147  		"tg1": {
  1148  			"task1": {
  1149  				Namespace: "ns1-1",
  1150  			},
  1151  			"task2": {
  1152  				Namespace: "ns1-2",
  1153  			},
  1154  		},
  1155  		"tg2": {
  1156  			"task1": {
  1157  				Namespace: "ns2",
  1158  			},
  1159  			"task2": {
  1160  				Namespace: "ns2",
  1161  			},
  1162  		},
  1163  		"tg3": {
  1164  			"task1": {
  1165  				Namespace: "ns3-1",
  1166  			},
  1167  		},
  1168  		"tg4": {
  1169  			"task1": nil,
  1170  		},
  1171  		"tg5": {
  1172  			"task1": {
  1173  				Namespace: "ns2",
  1174  			},
  1175  		},
  1176  		"tg6": {
  1177  			"task1": {},
  1178  		},
  1179  	}
  1180  	expected := []string{
  1181  		"ns1-1",
  1182  		"ns1-2",
  1183  		"ns2",
  1184  		"ns3-1",
  1185  	}
  1186  	got := VaultNamespaceSet(input)
  1187  	require.ElementsMatch(t, expected, got)
  1188  }
  1189  
  1190  // TestParsePortRanges asserts ParsePortRanges errors on invalid port ranges.
  1191  func TestParsePortRanges(t *testing.T) {
  1192  	ci.Parallel(t)
  1193  
  1194  	cases := []struct {
  1195  		name string
  1196  		spec string
  1197  		err  string
  1198  	}{
  1199  		{
  1200  			name: "UnmatchedDash",
  1201  			spec: "-1",
  1202  			err:  `strconv.ParseUint: parsing "": invalid syntax`,
  1203  		},
  1204  		{
  1205  			name: "Zero",
  1206  			spec: "0",
  1207  			err:  "port must be > 0",
  1208  		},
  1209  		{
  1210  			name: "TooBig",
  1211  			spec: fmt.Sprintf("1-%d", MaxValidPort+1),
  1212  			err:  "port must be < 65536 but found 65537",
  1213  		},
  1214  		{
  1215  			name: "WayTooBig",           // would OOM if not caught early enough
  1216  			spec: "9223372036854775807", // (2**63)-1
  1217  			err:  "port must be < 65536 but found 9223372036854775807",
  1218  		},
  1219  	}
  1220  
  1221  	for i := range cases {
  1222  		tc := cases[i]
  1223  		t.Run(tc.name, func(t *testing.T) {
  1224  			results, err := ParsePortRanges(tc.spec)
  1225  			require.Nil(t, results)
  1226  			require.EqualError(t, err, tc.err)
  1227  		})
  1228  	}
  1229  }