github.com/uchennaokeke444/nomad@v0.11.8/nomad/structs/funcs_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"encoding/base64"
     5  	"fmt"
     6  	"testing"
     7  
     8  	lru "github.com/hashicorp/golang-lru"
     9  	"github.com/hashicorp/nomad/helper/uuid"
    10  	"github.com/stretchr/testify/assert"
    11  	"github.com/stretchr/testify/require"
    12  )
    13  
    14  func TestRemoveAllocs(t *testing.T) {
    15  	l := []*Allocation{
    16  		{ID: "foo"},
    17  		{ID: "bar"},
    18  		{ID: "baz"},
    19  		{ID: "zip"},
    20  	}
    21  
    22  	out := RemoveAllocs(l, []*Allocation{l[1], l[3]})
    23  	if len(out) != 2 {
    24  		t.Fatalf("bad: %#v", out)
    25  	}
    26  	if out[0].ID != "foo" && out[1].ID != "baz" {
    27  		t.Fatalf("bad: %#v", out)
    28  	}
    29  }
    30  
    31  func TestFilterTerminalAllocs(t *testing.T) {
    32  	l := []*Allocation{
    33  		{
    34  			ID:            "bar",
    35  			Name:          "myname1",
    36  			DesiredStatus: AllocDesiredStatusEvict,
    37  		},
    38  		{ID: "baz", DesiredStatus: AllocDesiredStatusStop},
    39  		{
    40  			ID:            "foo",
    41  			DesiredStatus: AllocDesiredStatusRun,
    42  			ClientStatus:  AllocClientStatusPending,
    43  		},
    44  		{
    45  			ID:            "bam",
    46  			Name:          "myname",
    47  			DesiredStatus: AllocDesiredStatusRun,
    48  			ClientStatus:  AllocClientStatusComplete,
    49  			CreateIndex:   5,
    50  		},
    51  		{
    52  			ID:            "lol",
    53  			Name:          "myname",
    54  			DesiredStatus: AllocDesiredStatusRun,
    55  			ClientStatus:  AllocClientStatusComplete,
    56  			CreateIndex:   2,
    57  		},
    58  	}
    59  
    60  	out, terminalAllocs := FilterTerminalAllocs(l)
    61  	if len(out) != 1 {
    62  		t.Fatalf("bad: %#v", out)
    63  	}
    64  	if out[0].ID != "foo" {
    65  		t.Fatalf("bad: %#v", out)
    66  	}
    67  
    68  	if len(terminalAllocs) != 3 {
    69  		for _, o := range terminalAllocs {
    70  			fmt.Printf("%#v \n", o)
    71  		}
    72  
    73  		t.Fatalf("bad: %#v", terminalAllocs)
    74  	}
    75  
    76  	if terminalAllocs["myname"].ID != "bam" {
    77  		t.Fatalf("bad: %#v", terminalAllocs["myname"])
    78  	}
    79  }
    80  
    81  // COMPAT(0.11): Remove in 0.11
    82  func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) {
    83  	n := &Node{
    84  		Resources: &Resources{
    85  			Networks: []*NetworkResource{
    86  				{
    87  					Device: "eth0",
    88  					CIDR:   "10.0.0.0/8",
    89  					MBits:  100,
    90  				},
    91  			},
    92  		},
    93  	}
    94  
    95  	a1 := &Allocation{
    96  		Job: &Job{
    97  			TaskGroups: []*TaskGroup{
    98  				{
    99  					Name:          "web",
   100  					EphemeralDisk: DefaultEphemeralDisk(),
   101  				},
   102  			},
   103  		},
   104  		TaskResources: map[string]*Resources{
   105  			"web": {
   106  				Networks: []*NetworkResource{
   107  					{
   108  						Device:        "eth0",
   109  						IP:            "10.0.0.1",
   110  						MBits:         50,
   111  						ReservedPorts: []Port{{"main", 8000, 80}},
   112  					},
   113  				},
   114  			},
   115  		},
   116  	}
   117  
   118  	// Should fit one allocation
   119  	fit, dim, _, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   120  	if err != nil {
   121  		t.Fatalf("err: %v", err)
   122  	}
   123  	if !fit {
   124  		t.Fatalf("Bad: %s", dim)
   125  	}
   126  
   127  	// Should not fit second allocation
   128  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   129  	if err != nil {
   130  		t.Fatalf("err: %v", err)
   131  	}
   132  	if fit {
   133  		t.Fatalf("Bad")
   134  	}
   135  }
   136  
   137  // COMPAT(0.11): Remove in 0.11
   138  func TestAllocsFit_Old(t *testing.T) {
   139  	require := require.New(t)
   140  
   141  	n := &Node{
   142  		Resources: &Resources{
   143  			CPU:      2000,
   144  			MemoryMB: 2048,
   145  			DiskMB:   10000,
   146  			Networks: []*NetworkResource{
   147  				{
   148  					Device: "eth0",
   149  					CIDR:   "10.0.0.0/8",
   150  					MBits:  100,
   151  				},
   152  			},
   153  		},
   154  		Reserved: &Resources{
   155  			CPU:      1000,
   156  			MemoryMB: 1024,
   157  			DiskMB:   5000,
   158  			Networks: []*NetworkResource{
   159  				{
   160  					Device:        "eth0",
   161  					IP:            "10.0.0.1",
   162  					MBits:         50,
   163  					ReservedPorts: []Port{{"main", 80, 0}},
   164  				},
   165  			},
   166  		},
   167  	}
   168  
   169  	a1 := &Allocation{
   170  		Resources: &Resources{
   171  			CPU:      1000,
   172  			MemoryMB: 1024,
   173  			DiskMB:   5000,
   174  			Networks: []*NetworkResource{
   175  				{
   176  					Device:        "eth0",
   177  					IP:            "10.0.0.1",
   178  					MBits:         50,
   179  					ReservedPorts: []Port{{"main", 8000, 80}},
   180  				},
   181  			},
   182  		},
   183  	}
   184  
   185  	// Should fit one allocation
   186  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   187  	require.NoError(err)
   188  	require.True(fit)
   189  
   190  	// Sanity check the used resources
   191  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   192  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   193  
   194  	// Should not fit second allocation
   195  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   196  	require.NoError(err)
   197  	require.False(fit)
   198  
   199  	// Sanity check the used resources
   200  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   201  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   202  }
   203  
   204  // COMPAT(0.11): Remove in 0.11
   205  func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
   206  	require := require.New(t)
   207  
   208  	n := &Node{
   209  		Resources: &Resources{
   210  			CPU:      2000,
   211  			MemoryMB: 2048,
   212  			DiskMB:   10000,
   213  			Networks: []*NetworkResource{
   214  				{
   215  					Device: "eth0",
   216  					CIDR:   "10.0.0.0/8",
   217  					MBits:  100,
   218  				},
   219  			},
   220  		},
   221  		Reserved: &Resources{
   222  			CPU:      1000,
   223  			MemoryMB: 1024,
   224  			DiskMB:   5000,
   225  			Networks: []*NetworkResource{
   226  				{
   227  					Device:        "eth0",
   228  					IP:            "10.0.0.1",
   229  					MBits:         50,
   230  					ReservedPorts: []Port{{"main", 80, 0}},
   231  				},
   232  			},
   233  		},
   234  	}
   235  
   236  	a1 := &Allocation{
   237  		Resources: &Resources{
   238  			CPU:      1000,
   239  			MemoryMB: 1024,
   240  			DiskMB:   5000,
   241  			Networks: []*NetworkResource{
   242  				{
   243  					Device:        "eth0",
   244  					IP:            "10.0.0.1",
   245  					MBits:         50,
   246  					ReservedPorts: []Port{{"main", 8000, 0}},
   247  				},
   248  			},
   249  		},
   250  	}
   251  
   252  	// Should fit one allocation
   253  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   254  	require.NoError(err)
   255  	require.True(fit)
   256  
   257  	// Sanity check the used resources
   258  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   259  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   260  
   261  	// Should fit second allocation since it is terminal
   262  	a2 := a1.Copy()
   263  	a2.DesiredStatus = AllocDesiredStatusStop
   264  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   265  	require.NoError(err)
   266  	require.True(fit)
   267  
   268  	// Sanity check the used resources
   269  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   270  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   271  }
   272  
   273  func TestAllocsFit(t *testing.T) {
   274  	require := require.New(t)
   275  
   276  	n := &Node{
   277  		NodeResources: &NodeResources{
   278  			Cpu: NodeCpuResources{
   279  				CpuShares: 2000,
   280  			},
   281  			Memory: NodeMemoryResources{
   282  				MemoryMB: 2048,
   283  			},
   284  			Disk: NodeDiskResources{
   285  				DiskMB: 10000,
   286  			},
   287  			Networks: []*NetworkResource{
   288  				{
   289  					Device: "eth0",
   290  					CIDR:   "10.0.0.0/8",
   291  					MBits:  100,
   292  				},
   293  			},
   294  		},
   295  		ReservedResources: &NodeReservedResources{
   296  			Cpu: NodeReservedCpuResources{
   297  				CpuShares: 1000,
   298  			},
   299  			Memory: NodeReservedMemoryResources{
   300  				MemoryMB: 1024,
   301  			},
   302  			Disk: NodeReservedDiskResources{
   303  				DiskMB: 5000,
   304  			},
   305  			Networks: NodeReservedNetworkResources{
   306  				ReservedHostPorts: "80",
   307  			},
   308  		},
   309  	}
   310  
   311  	a1 := &Allocation{
   312  		AllocatedResources: &AllocatedResources{
   313  			Tasks: map[string]*AllocatedTaskResources{
   314  				"web": {
   315  					Cpu: AllocatedCpuResources{
   316  						CpuShares: 1000,
   317  					},
   318  					Memory: AllocatedMemoryResources{
   319  						MemoryMB: 1024,
   320  					},
   321  					Networks: []*NetworkResource{
   322  						{
   323  							Device:        "eth0",
   324  							IP:            "10.0.0.1",
   325  							MBits:         50,
   326  							ReservedPorts: []Port{{"main", 8000, 0}},
   327  						},
   328  					},
   329  				},
   330  			},
   331  			Shared: AllocatedSharedResources{
   332  				DiskMB: 5000,
   333  			},
   334  		},
   335  	}
   336  
   337  	// Should fit one allocation
   338  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   339  	require.NoError(err)
   340  	require.True(fit)
   341  
   342  	// Sanity check the used resources
   343  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   344  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   345  
   346  	// Should not fit second allocation
   347  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   348  	require.NoError(err)
   349  	require.False(fit)
   350  
   351  	// Sanity check the used resources
   352  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   353  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   354  }
   355  
   356  func TestAllocsFit_TerminalAlloc(t *testing.T) {
   357  	require := require.New(t)
   358  
   359  	n := &Node{
   360  		NodeResources: &NodeResources{
   361  			Cpu: NodeCpuResources{
   362  				CpuShares: 2000,
   363  			},
   364  			Memory: NodeMemoryResources{
   365  				MemoryMB: 2048,
   366  			},
   367  			Disk: NodeDiskResources{
   368  				DiskMB: 10000,
   369  			},
   370  			Networks: []*NetworkResource{
   371  				{
   372  					Device: "eth0",
   373  					CIDR:   "10.0.0.0/8",
   374  					IP:     "10.0.0.1",
   375  					MBits:  100,
   376  				},
   377  			},
   378  		},
   379  		ReservedResources: &NodeReservedResources{
   380  			Cpu: NodeReservedCpuResources{
   381  				CpuShares: 1000,
   382  			},
   383  			Memory: NodeReservedMemoryResources{
   384  				MemoryMB: 1024,
   385  			},
   386  			Disk: NodeReservedDiskResources{
   387  				DiskMB: 5000,
   388  			},
   389  			Networks: NodeReservedNetworkResources{
   390  				ReservedHostPorts: "80",
   391  			},
   392  		},
   393  	}
   394  
   395  	a1 := &Allocation{
   396  		AllocatedResources: &AllocatedResources{
   397  			Tasks: map[string]*AllocatedTaskResources{
   398  				"web": {
   399  					Cpu: AllocatedCpuResources{
   400  						CpuShares: 1000,
   401  					},
   402  					Memory: AllocatedMemoryResources{
   403  						MemoryMB: 1024,
   404  					},
   405  					Networks: []*NetworkResource{
   406  						{
   407  							Device:        "eth0",
   408  							IP:            "10.0.0.1",
   409  							MBits:         50,
   410  							ReservedPorts: []Port{{"main", 8000, 80}},
   411  						},
   412  					},
   413  				},
   414  			},
   415  			Shared: AllocatedSharedResources{
   416  				DiskMB: 5000,
   417  			},
   418  		},
   419  	}
   420  
   421  	// Should fit one allocation
   422  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   423  	require.NoError(err)
   424  	require.True(fit)
   425  
   426  	// Sanity check the used resources
   427  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   428  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   429  
   430  	// Should fit second allocation since it is terminal
   431  	a2 := a1.Copy()
   432  	a2.DesiredStatus = AllocDesiredStatusStop
   433  	fit, dim, used, err := AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   434  	require.NoError(err)
   435  	require.True(fit, dim)
   436  
   437  	// Sanity check the used resources
   438  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   439  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   440  }
   441  
   442  // Tests that AllocsFit detects device collisions
   443  func TestAllocsFit_Devices(t *testing.T) {
   444  	require := require.New(t)
   445  
   446  	n := MockNvidiaNode()
   447  	a1 := &Allocation{
   448  		AllocatedResources: &AllocatedResources{
   449  			Tasks: map[string]*AllocatedTaskResources{
   450  				"web": {
   451  					Cpu: AllocatedCpuResources{
   452  						CpuShares: 1000,
   453  					},
   454  					Memory: AllocatedMemoryResources{
   455  						MemoryMB: 1024,
   456  					},
   457  					Devices: []*AllocatedDeviceResource{
   458  						{
   459  							Type:      "gpu",
   460  							Vendor:    "nvidia",
   461  							Name:      "1080ti",
   462  							DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID},
   463  						},
   464  					},
   465  				},
   466  			},
   467  			Shared: AllocatedSharedResources{
   468  				DiskMB: 5000,
   469  			},
   470  		},
   471  	}
   472  	a2 := a1.Copy()
   473  	a2.AllocatedResources.Tasks["web"] = &AllocatedTaskResources{
   474  		Cpu: AllocatedCpuResources{
   475  			CpuShares: 1000,
   476  		},
   477  		Memory: AllocatedMemoryResources{
   478  			MemoryMB: 1024,
   479  		},
   480  		Devices: []*AllocatedDeviceResource{
   481  			{
   482  				Type:      "gpu",
   483  				Vendor:    "nvidia",
   484  				Name:      "1080ti",
   485  				DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID}, // Use the same ID
   486  			},
   487  		},
   488  	}
   489  
   490  	// Should fit one allocation
   491  	fit, _, _, err := AllocsFit(n, []*Allocation{a1}, nil, true)
   492  	require.NoError(err)
   493  	require.True(fit)
   494  
   495  	// Should not fit second allocation
   496  	fit, msg, _, err := AllocsFit(n, []*Allocation{a1, a2}, nil, true)
   497  	require.NoError(err)
   498  	require.False(fit)
   499  	require.Equal("device oversubscribed", msg)
   500  
   501  	// Should not fit second allocation but won't detect since we disabled
   502  	// devices
   503  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   504  	require.NoError(err)
   505  	require.True(fit)
   506  }
   507  
   508  // COMPAT(0.11): Remove in 0.11
   509  func TestScoreFitBinPack_Old(t *testing.T) {
   510  	node := &Node{}
   511  	node.Resources = &Resources{
   512  		CPU:      4096,
   513  		MemoryMB: 8192,
   514  	}
   515  	node.Reserved = &Resources{
   516  		CPU:      2048,
   517  		MemoryMB: 4096,
   518  	}
   519  
   520  	// Test a perfect fit
   521  	util := &ComparableResources{
   522  		Flattened: AllocatedTaskResources{
   523  			Cpu: AllocatedCpuResources{
   524  				CpuShares: 2048,
   525  			},
   526  			Memory: AllocatedMemoryResources{
   527  				MemoryMB: 4096,
   528  			},
   529  		},
   530  	}
   531  	score := ScoreFitBinPack(node, util)
   532  	if score != 18.0 {
   533  		t.Fatalf("bad: %v", score)
   534  	}
   535  
   536  	// Test the worst fit
   537  	util = &ComparableResources{
   538  		Flattened: AllocatedTaskResources{
   539  			Cpu: AllocatedCpuResources{
   540  				CpuShares: 0,
   541  			},
   542  			Memory: AllocatedMemoryResources{
   543  				MemoryMB: 0,
   544  			},
   545  		},
   546  	}
   547  	score = ScoreFitBinPack(node, util)
   548  	if score != 0.0 {
   549  		t.Fatalf("bad: %v", score)
   550  	}
   551  
   552  	// Test a mid-case scenario
   553  	util = &ComparableResources{
   554  		Flattened: AllocatedTaskResources{
   555  			Cpu: AllocatedCpuResources{
   556  				CpuShares: 1024,
   557  			},
   558  			Memory: AllocatedMemoryResources{
   559  				MemoryMB: 2048,
   560  			},
   561  		},
   562  	}
   563  	score = ScoreFitBinPack(node, util)
   564  	if score < 10.0 || score > 16.0 {
   565  		t.Fatalf("bad: %v", score)
   566  	}
   567  }
   568  
   569  func TestScoreFitBinPack(t *testing.T) {
   570  	node := &Node{}
   571  	node.NodeResources = &NodeResources{
   572  		Cpu: NodeCpuResources{
   573  			CpuShares: 4096,
   574  		},
   575  		Memory: NodeMemoryResources{
   576  			MemoryMB: 8192,
   577  		},
   578  	}
   579  	node.ReservedResources = &NodeReservedResources{
   580  		Cpu: NodeReservedCpuResources{
   581  			CpuShares: 2048,
   582  		},
   583  		Memory: NodeReservedMemoryResources{
   584  			MemoryMB: 4096,
   585  		},
   586  	}
   587  
   588  	cases := []struct {
   589  		name         string
   590  		flattened    AllocatedTaskResources
   591  		binPackScore float64
   592  		spreadScore  float64
   593  	}{
   594  		{
   595  			name: "almost filled node, but with just enough hole",
   596  			flattened: AllocatedTaskResources{
   597  				Cpu:    AllocatedCpuResources{CpuShares: 2048},
   598  				Memory: AllocatedMemoryResources{MemoryMB: 4096},
   599  			},
   600  			binPackScore: 18,
   601  			spreadScore:  0,
   602  		},
   603  		{
   604  			name: "unutilized node",
   605  			flattened: AllocatedTaskResources{
   606  				Cpu:    AllocatedCpuResources{CpuShares: 0},
   607  				Memory: AllocatedMemoryResources{MemoryMB: 0},
   608  			},
   609  			binPackScore: 0,
   610  			spreadScore:  18,
   611  		},
   612  		{
   613  			name: "mid-case scnario",
   614  			flattened: AllocatedTaskResources{
   615  				Cpu:    AllocatedCpuResources{CpuShares: 1024},
   616  				Memory: AllocatedMemoryResources{MemoryMB: 2048},
   617  			},
   618  			binPackScore: 13.675,
   619  			spreadScore:  4.325,
   620  		},
   621  	}
   622  
   623  	for _, c := range cases {
   624  		t.Run(c.name, func(t *testing.T) {
   625  			util := &ComparableResources{Flattened: c.flattened}
   626  
   627  			binPackScore := ScoreFitBinPack(node, util)
   628  			require.InDelta(t, c.binPackScore, binPackScore, 0.001, "binpack score")
   629  
   630  			spreadScore := ScoreFitSpread(node, util)
   631  			require.InDelta(t, c.spreadScore, spreadScore, 0.001, "spread score")
   632  
   633  			require.InDelta(t, 18, binPackScore+spreadScore, 0.001, "score sum")
   634  		})
   635  	}
   636  }
   637  
   638  func TestACLPolicyListHash(t *testing.T) {
   639  	h1 := ACLPolicyListHash(nil)
   640  	assert.NotEqual(t, "", h1)
   641  
   642  	p1 := &ACLPolicy{
   643  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   644  		Description: "Super cool policy!",
   645  		Rules: `
   646  		namespace "default" {
   647  			policy = "write"
   648  		}
   649  		node {
   650  			policy = "read"
   651  		}
   652  		agent {
   653  			policy = "read"
   654  		}
   655  		`,
   656  		CreateIndex: 10,
   657  		ModifyIndex: 20,
   658  	}
   659  
   660  	h2 := ACLPolicyListHash([]*ACLPolicy{p1})
   661  	assert.NotEqual(t, "", h2)
   662  	assert.NotEqual(t, h1, h2)
   663  
   664  	// Create P2 as copy of P1 with new name
   665  	p2 := &ACLPolicy{}
   666  	*p2 = *p1
   667  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
   668  
   669  	h3 := ACLPolicyListHash([]*ACLPolicy{p1, p2})
   670  	assert.NotEqual(t, "", h3)
   671  	assert.NotEqual(t, h2, h3)
   672  
   673  	h4 := ACLPolicyListHash([]*ACLPolicy{p2})
   674  	assert.NotEqual(t, "", h4)
   675  	assert.NotEqual(t, h3, h4)
   676  
   677  	// ModifyIndex should change the hash
   678  	p2.ModifyIndex++
   679  	h5 := ACLPolicyListHash([]*ACLPolicy{p2})
   680  	assert.NotEqual(t, "", h5)
   681  	assert.NotEqual(t, h4, h5)
   682  }
   683  
   684  func TestCompileACLObject(t *testing.T) {
   685  	p1 := &ACLPolicy{
   686  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   687  		Description: "Super cool policy!",
   688  		Rules: `
   689  		namespace "default" {
   690  			policy = "write"
   691  		}
   692  		node {
   693  			policy = "read"
   694  		}
   695  		agent {
   696  			policy = "read"
   697  		}
   698  		`,
   699  		CreateIndex: 10,
   700  		ModifyIndex: 20,
   701  	}
   702  
   703  	// Create P2 as copy of P1 with new name
   704  	p2 := &ACLPolicy{}
   705  	*p2 = *p1
   706  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
   707  
   708  	// Create a small cache
   709  	cache, err := lru.New2Q(16)
   710  	assert.Nil(t, err)
   711  
   712  	// Test compilation
   713  	aclObj, err := CompileACLObject(cache, []*ACLPolicy{p1})
   714  	assert.Nil(t, err)
   715  	assert.NotNil(t, aclObj)
   716  
   717  	// Should get the same object
   718  	aclObj2, err := CompileACLObject(cache, []*ACLPolicy{p1})
   719  	assert.Nil(t, err)
   720  	if aclObj != aclObj2 {
   721  		t.Fatalf("expected the same object")
   722  	}
   723  
   724  	// Should get another object
   725  	aclObj3, err := CompileACLObject(cache, []*ACLPolicy{p1, p2})
   726  	assert.Nil(t, err)
   727  	assert.NotNil(t, aclObj3)
   728  	if aclObj == aclObj3 {
   729  		t.Fatalf("unexpected same object")
   730  	}
   731  
   732  	// Should be order independent
   733  	aclObj4, err := CompileACLObject(cache, []*ACLPolicy{p2, p1})
   734  	assert.Nil(t, err)
   735  	assert.NotNil(t, aclObj4)
   736  	if aclObj3 != aclObj4 {
   737  		t.Fatalf("expected same object")
   738  	}
   739  }
   740  
   741  // TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP
   742  // headers and CompareMigrateToken works as expected.
   743  func TestGenerateMigrateToken(t *testing.T) {
   744  	assert := assert.New(t)
   745  	allocID := uuid.Generate()
   746  	nodeSecret := uuid.Generate()
   747  	token, err := GenerateMigrateToken(allocID, nodeSecret)
   748  	assert.Nil(err)
   749  	_, err = base64.URLEncoding.DecodeString(token)
   750  	assert.Nil(err)
   751  
   752  	assert.True(CompareMigrateToken(allocID, nodeSecret, token))
   753  	assert.False(CompareMigrateToken("x", nodeSecret, token))
   754  	assert.False(CompareMigrateToken(allocID, "x", token))
   755  	assert.False(CompareMigrateToken(allocID, nodeSecret, "x"))
   756  
   757  	token2, err := GenerateMigrateToken("x", nodeSecret)
   758  	assert.Nil(err)
   759  	assert.False(CompareMigrateToken(allocID, nodeSecret, token2))
   760  	assert.True(CompareMigrateToken("x", nodeSecret, token2))
   761  }