github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/nomad/structs/funcs_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"encoding/base64"
     5  	"errors"
     6  	"fmt"
     7  	"testing"
     8  
     9  	lru "github.com/hashicorp/golang-lru"
    10  	"github.com/hashicorp/nomad/helper/uuid"
    11  	"github.com/stretchr/testify/assert"
    12  	"github.com/stretchr/testify/require"
    13  )
    14  
    15  func TestRemoveAllocs(t *testing.T) {
    16  	l := []*Allocation{
    17  		{ID: "foo"},
    18  		{ID: "bar"},
    19  		{ID: "baz"},
    20  		{ID: "zip"},
    21  	}
    22  
    23  	out := RemoveAllocs(l, []*Allocation{l[1], l[3]})
    24  	if len(out) != 2 {
    25  		t.Fatalf("bad: %#v", out)
    26  	}
    27  	if out[0].ID != "foo" && out[1].ID != "baz" {
    28  		t.Fatalf("bad: %#v", out)
    29  	}
    30  }
    31  
    32  func TestFilterTerminalAllocs(t *testing.T) {
    33  	l := []*Allocation{
    34  		{
    35  			ID:            "bar",
    36  			Name:          "myname1",
    37  			DesiredStatus: AllocDesiredStatusEvict,
    38  		},
    39  		{ID: "baz", DesiredStatus: AllocDesiredStatusStop},
    40  		{
    41  			ID:            "foo",
    42  			DesiredStatus: AllocDesiredStatusRun,
    43  			ClientStatus:  AllocClientStatusPending,
    44  		},
    45  		{
    46  			ID:            "bam",
    47  			Name:          "myname",
    48  			DesiredStatus: AllocDesiredStatusRun,
    49  			ClientStatus:  AllocClientStatusComplete,
    50  			CreateIndex:   5,
    51  		},
    52  		{
    53  			ID:            "lol",
    54  			Name:          "myname",
    55  			DesiredStatus: AllocDesiredStatusRun,
    56  			ClientStatus:  AllocClientStatusComplete,
    57  			CreateIndex:   2,
    58  		},
    59  	}
    60  
    61  	out, terminalAllocs := FilterTerminalAllocs(l)
    62  	if len(out) != 1 {
    63  		t.Fatalf("bad: %#v", out)
    64  	}
    65  	if out[0].ID != "foo" {
    66  		t.Fatalf("bad: %#v", out)
    67  	}
    68  
    69  	if len(terminalAllocs) != 3 {
    70  		for _, o := range terminalAllocs {
    71  			fmt.Printf("%#v \n", o)
    72  		}
    73  
    74  		t.Fatalf("bad: %#v", terminalAllocs)
    75  	}
    76  
    77  	if terminalAllocs["myname"].ID != "bam" {
    78  		t.Fatalf("bad: %#v", terminalAllocs["myname"])
    79  	}
    80  }
    81  
    82  // COMPAT(0.11): Remove in 0.11
    83  func TestAllocsFit_PortsOvercommitted_Old(t *testing.T) {
    84  	n := &Node{
    85  		Resources: &Resources{
    86  			Networks: []*NetworkResource{
    87  				{
    88  					Device: "eth0",
    89  					CIDR:   "10.0.0.0/8",
    90  					MBits:  100,
    91  				},
    92  			},
    93  		},
    94  	}
    95  
    96  	a1 := &Allocation{
    97  		Job: &Job{
    98  			TaskGroups: []*TaskGroup{
    99  				{
   100  					Name:          "web",
   101  					EphemeralDisk: DefaultEphemeralDisk(),
   102  				},
   103  			},
   104  		},
   105  		TaskResources: map[string]*Resources{
   106  			"web": {
   107  				Networks: []*NetworkResource{
   108  					{
   109  						Device:        "eth0",
   110  						IP:            "10.0.0.1",
   111  						MBits:         50,
   112  						ReservedPorts: []Port{{"main", 8000, 80, ""}},
   113  					},
   114  				},
   115  			},
   116  		},
   117  	}
   118  
   119  	// Should fit one allocation
   120  	fit, dim, _, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   121  	if err != nil {
   122  		t.Fatalf("err: %v", err)
   123  	}
   124  	if !fit {
   125  		t.Fatalf("Bad: %s", dim)
   126  	}
   127  
   128  	// Should not fit second allocation
   129  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   130  	if err != nil {
   131  		t.Fatalf("err: %v", err)
   132  	}
   133  	if fit {
   134  		t.Fatalf("Bad")
   135  	}
   136  }
   137  
   138  // COMPAT(0.11): Remove in 0.11
   139  func TestAllocsFit_Old(t *testing.T) {
   140  	require := require.New(t)
   141  
   142  	n := &Node{
   143  		Resources: &Resources{
   144  			CPU:      2000,
   145  			MemoryMB: 2048,
   146  			DiskMB:   10000,
   147  			Networks: []*NetworkResource{
   148  				{
   149  					Device: "eth0",
   150  					CIDR:   "10.0.0.0/8",
   151  					MBits:  100,
   152  				},
   153  			},
   154  		},
   155  		Reserved: &Resources{
   156  			CPU:      1000,
   157  			MemoryMB: 1024,
   158  			DiskMB:   5000,
   159  			Networks: []*NetworkResource{
   160  				{
   161  					Device:        "eth0",
   162  					IP:            "10.0.0.1",
   163  					MBits:         50,
   164  					ReservedPorts: []Port{{"main", 80, 0, ""}},
   165  				},
   166  			},
   167  		},
   168  	}
   169  
   170  	a1 := &Allocation{
   171  		Resources: &Resources{
   172  			CPU:      1000,
   173  			MemoryMB: 1024,
   174  			DiskMB:   5000,
   175  			Networks: []*NetworkResource{
   176  				{
   177  					Device:        "eth0",
   178  					IP:            "10.0.0.1",
   179  					MBits:         50,
   180  					ReservedPorts: []Port{{"main", 8000, 80, ""}},
   181  				},
   182  			},
   183  		},
   184  	}
   185  
   186  	// Should fit one allocation
   187  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   188  	require.NoError(err)
   189  	require.True(fit)
   190  
   191  	// Sanity check the used resources
   192  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   193  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   194  
   195  	// Should not fit second allocation
   196  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   197  	require.NoError(err)
   198  	require.False(fit)
   199  
   200  	// Sanity check the used resources
   201  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   202  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   203  }
   204  
   205  // COMPAT(0.11): Remove in 0.11
   206  func TestAllocsFit_TerminalAlloc_Old(t *testing.T) {
   207  	require := require.New(t)
   208  
   209  	n := &Node{
   210  		Resources: &Resources{
   211  			CPU:      2000,
   212  			MemoryMB: 2048,
   213  			DiskMB:   10000,
   214  			Networks: []*NetworkResource{
   215  				{
   216  					Device: "eth0",
   217  					CIDR:   "10.0.0.0/8",
   218  					MBits:  100,
   219  				},
   220  			},
   221  		},
   222  		Reserved: &Resources{
   223  			CPU:      1000,
   224  			MemoryMB: 1024,
   225  			DiskMB:   5000,
   226  			Networks: []*NetworkResource{
   227  				{
   228  					Device:        "eth0",
   229  					IP:            "10.0.0.1",
   230  					MBits:         50,
   231  					ReservedPorts: []Port{{"main", 80, 0, ""}},
   232  				},
   233  			},
   234  		},
   235  	}
   236  
   237  	a1 := &Allocation{
   238  		Resources: &Resources{
   239  			CPU:      1000,
   240  			MemoryMB: 1024,
   241  			DiskMB:   5000,
   242  			Networks: []*NetworkResource{
   243  				{
   244  					Device:        "eth0",
   245  					IP:            "10.0.0.1",
   246  					MBits:         50,
   247  					ReservedPorts: []Port{{"main", 8000, 0, ""}},
   248  				},
   249  			},
   250  		},
   251  	}
   252  
   253  	// Should fit one allocation
   254  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   255  	require.NoError(err)
   256  	require.True(fit)
   257  
   258  	// Sanity check the used resources
   259  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   260  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   261  
   262  	// Should fit second allocation since it is terminal
   263  	a2 := a1.Copy()
   264  	a2.DesiredStatus = AllocDesiredStatusStop
   265  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   266  	require.NoError(err)
   267  	require.True(fit)
   268  
   269  	// Sanity check the used resources
   270  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   271  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   272  }
   273  
   274  func TestAllocsFit(t *testing.T) {
   275  	require := require.New(t)
   276  
   277  	n := &Node{
   278  		NodeResources: &NodeResources{
   279  			Cpu: NodeCpuResources{
   280  				CpuShares: 2000,
   281  			},
   282  			Memory: NodeMemoryResources{
   283  				MemoryMB: 2048,
   284  			},
   285  			Disk: NodeDiskResources{
   286  				DiskMB: 10000,
   287  			},
   288  			Networks: []*NetworkResource{
   289  				{
   290  					Device: "eth0",
   291  					CIDR:   "10.0.0.0/8",
   292  					MBits:  100,
   293  				},
   294  			},
   295  			NodeNetworks: []*NodeNetworkResource{
   296  				{
   297  					Mode:   "host",
   298  					Device: "eth0",
   299  					Addresses: []NodeNetworkAddress{
   300  						{
   301  							Address: "10.0.0.1",
   302  						},
   303  					},
   304  				},
   305  			},
   306  		},
   307  		ReservedResources: &NodeReservedResources{
   308  			Cpu: NodeReservedCpuResources{
   309  				CpuShares: 1000,
   310  			},
   311  			Memory: NodeReservedMemoryResources{
   312  				MemoryMB: 1024,
   313  			},
   314  			Disk: NodeReservedDiskResources{
   315  				DiskMB: 5000,
   316  			},
   317  			Networks: NodeReservedNetworkResources{
   318  				ReservedHostPorts: "80",
   319  			},
   320  		},
   321  	}
   322  
   323  	a1 := &Allocation{
   324  		AllocatedResources: &AllocatedResources{
   325  			Tasks: map[string]*AllocatedTaskResources{
   326  				"web": {
   327  					Cpu: AllocatedCpuResources{
   328  						CpuShares: 1000,
   329  					},
   330  					Memory: AllocatedMemoryResources{
   331  						MemoryMB: 1024,
   332  					},
   333  				},
   334  			},
   335  			Shared: AllocatedSharedResources{
   336  				DiskMB: 5000,
   337  				Networks: Networks{
   338  					{
   339  						Mode:          "host",
   340  						IP:            "10.0.0.1",
   341  						ReservedPorts: []Port{{"main", 8000, 0, ""}},
   342  					},
   343  				},
   344  				Ports: AllocatedPorts{
   345  					{
   346  						Label:  "main",
   347  						Value:  8000,
   348  						HostIP: "10.0.0.1",
   349  					},
   350  				},
   351  			},
   352  		},
   353  	}
   354  
   355  	// Should fit one allocation
   356  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   357  	require.NoError(err)
   358  	require.True(fit)
   359  
   360  	// Sanity check the used resources
   361  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   362  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   363  
   364  	// Should not fit second allocation
   365  	fit, _, used, err = AllocsFit(n, []*Allocation{a1, a1}, nil, false)
   366  	require.NoError(err)
   367  	require.False(fit)
   368  
   369  	// Sanity check the used resources
   370  	require.EqualValues(2000, used.Flattened.Cpu.CpuShares)
   371  	require.EqualValues(2048, used.Flattened.Memory.MemoryMB)
   372  }
   373  
   374  func TestAllocsFit_TerminalAlloc(t *testing.T) {
   375  	require := require.New(t)
   376  
   377  	n := &Node{
   378  		NodeResources: &NodeResources{
   379  			Cpu: NodeCpuResources{
   380  				CpuShares: 2000,
   381  			},
   382  			Memory: NodeMemoryResources{
   383  				MemoryMB: 2048,
   384  			},
   385  			Disk: NodeDiskResources{
   386  				DiskMB: 10000,
   387  			},
   388  			Networks: []*NetworkResource{
   389  				{
   390  					Device: "eth0",
   391  					CIDR:   "10.0.0.0/8",
   392  					IP:     "10.0.0.1",
   393  					MBits:  100,
   394  				},
   395  			},
   396  		},
   397  		ReservedResources: &NodeReservedResources{
   398  			Cpu: NodeReservedCpuResources{
   399  				CpuShares: 1000,
   400  			},
   401  			Memory: NodeReservedMemoryResources{
   402  				MemoryMB: 1024,
   403  			},
   404  			Disk: NodeReservedDiskResources{
   405  				DiskMB: 5000,
   406  			},
   407  			Networks: NodeReservedNetworkResources{
   408  				ReservedHostPorts: "80",
   409  			},
   410  		},
   411  	}
   412  
   413  	a1 := &Allocation{
   414  		AllocatedResources: &AllocatedResources{
   415  			Tasks: map[string]*AllocatedTaskResources{
   416  				"web": {
   417  					Cpu: AllocatedCpuResources{
   418  						CpuShares: 1000,
   419  					},
   420  					Memory: AllocatedMemoryResources{
   421  						MemoryMB: 1024,
   422  					},
   423  					Networks: []*NetworkResource{
   424  						{
   425  							Device:        "eth0",
   426  							IP:            "10.0.0.1",
   427  							MBits:         50,
   428  							ReservedPorts: []Port{{"main", 8000, 80, ""}},
   429  						},
   430  					},
   431  				},
   432  			},
   433  			Shared: AllocatedSharedResources{
   434  				DiskMB: 5000,
   435  			},
   436  		},
   437  	}
   438  
   439  	// Should fit one allocation
   440  	fit, _, used, err := AllocsFit(n, []*Allocation{a1}, nil, false)
   441  	require.NoError(err)
   442  	require.True(fit)
   443  
   444  	// Sanity check the used resources
   445  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   446  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   447  
   448  	// Should fit second allocation since it is terminal
   449  	a2 := a1.Copy()
   450  	a2.DesiredStatus = AllocDesiredStatusStop
   451  	fit, dim, used, err := AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   452  	require.NoError(err)
   453  	require.True(fit, dim)
   454  
   455  	// Sanity check the used resources
   456  	require.EqualValues(1000, used.Flattened.Cpu.CpuShares)
   457  	require.EqualValues(1024, used.Flattened.Memory.MemoryMB)
   458  }
   459  
   460  // Tests that AllocsFit detects device collisions
   461  func TestAllocsFit_Devices(t *testing.T) {
   462  	require := require.New(t)
   463  
   464  	n := MockNvidiaNode()
   465  	a1 := &Allocation{
   466  		AllocatedResources: &AllocatedResources{
   467  			Tasks: map[string]*AllocatedTaskResources{
   468  				"web": {
   469  					Cpu: AllocatedCpuResources{
   470  						CpuShares: 1000,
   471  					},
   472  					Memory: AllocatedMemoryResources{
   473  						MemoryMB: 1024,
   474  					},
   475  					Devices: []*AllocatedDeviceResource{
   476  						{
   477  							Type:      "gpu",
   478  							Vendor:    "nvidia",
   479  							Name:      "1080ti",
   480  							DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID},
   481  						},
   482  					},
   483  				},
   484  			},
   485  			Shared: AllocatedSharedResources{
   486  				DiskMB: 5000,
   487  			},
   488  		},
   489  	}
   490  	a2 := a1.Copy()
   491  	a2.AllocatedResources.Tasks["web"] = &AllocatedTaskResources{
   492  		Cpu: AllocatedCpuResources{
   493  			CpuShares: 1000,
   494  		},
   495  		Memory: AllocatedMemoryResources{
   496  			MemoryMB: 1024,
   497  		},
   498  		Devices: []*AllocatedDeviceResource{
   499  			{
   500  				Type:      "gpu",
   501  				Vendor:    "nvidia",
   502  				Name:      "1080ti",
   503  				DeviceIDs: []string{n.NodeResources.Devices[0].Instances[0].ID}, // Use the same ID
   504  			},
   505  		},
   506  	}
   507  
   508  	// Should fit one allocation
   509  	fit, _, _, err := AllocsFit(n, []*Allocation{a1}, nil, true)
   510  	require.NoError(err)
   511  	require.True(fit)
   512  
   513  	// Should not fit second allocation
   514  	fit, msg, _, err := AllocsFit(n, []*Allocation{a1, a2}, nil, true)
   515  	require.NoError(err)
   516  	require.False(fit)
   517  	require.Equal("device oversubscribed", msg)
   518  
   519  	// Should not fit second allocation but won't detect since we disabled
   520  	// devices
   521  	fit, _, _, err = AllocsFit(n, []*Allocation{a1, a2}, nil, false)
   522  	require.NoError(err)
   523  	require.True(fit)
   524  }
   525  
   526  // COMPAT(0.11): Remove in 0.11
   527  func TestScoreFitBinPack_Old(t *testing.T) {
   528  	node := &Node{}
   529  	node.Resources = &Resources{
   530  		CPU:      4096,
   531  		MemoryMB: 8192,
   532  	}
   533  	node.Reserved = &Resources{
   534  		CPU:      2048,
   535  		MemoryMB: 4096,
   536  	}
   537  
   538  	// Test a perfect fit
   539  	util := &ComparableResources{
   540  		Flattened: AllocatedTaskResources{
   541  			Cpu: AllocatedCpuResources{
   542  				CpuShares: 2048,
   543  			},
   544  			Memory: AllocatedMemoryResources{
   545  				MemoryMB: 4096,
   546  			},
   547  		},
   548  	}
   549  	score := ScoreFitBinPack(node, util)
   550  	if score != 18.0 {
   551  		t.Fatalf("bad: %v", score)
   552  	}
   553  
   554  	// Test the worst fit
   555  	util = &ComparableResources{
   556  		Flattened: AllocatedTaskResources{
   557  			Cpu: AllocatedCpuResources{
   558  				CpuShares: 0,
   559  			},
   560  			Memory: AllocatedMemoryResources{
   561  				MemoryMB: 0,
   562  			},
   563  		},
   564  	}
   565  	score = ScoreFitBinPack(node, util)
   566  	if score != 0.0 {
   567  		t.Fatalf("bad: %v", score)
   568  	}
   569  
   570  	// Test a mid-case scenario
   571  	util = &ComparableResources{
   572  		Flattened: AllocatedTaskResources{
   573  			Cpu: AllocatedCpuResources{
   574  				CpuShares: 1024,
   575  			},
   576  			Memory: AllocatedMemoryResources{
   577  				MemoryMB: 2048,
   578  			},
   579  		},
   580  	}
   581  	score = ScoreFitBinPack(node, util)
   582  	if score < 10.0 || score > 16.0 {
   583  		t.Fatalf("bad: %v", score)
   584  	}
   585  }
   586  
   587  func TestScoreFitBinPack(t *testing.T) {
   588  	node := &Node{}
   589  	node.NodeResources = &NodeResources{
   590  		Cpu: NodeCpuResources{
   591  			CpuShares: 4096,
   592  		},
   593  		Memory: NodeMemoryResources{
   594  			MemoryMB: 8192,
   595  		},
   596  	}
   597  	node.ReservedResources = &NodeReservedResources{
   598  		Cpu: NodeReservedCpuResources{
   599  			CpuShares: 2048,
   600  		},
   601  		Memory: NodeReservedMemoryResources{
   602  			MemoryMB: 4096,
   603  		},
   604  	}
   605  
   606  	cases := []struct {
   607  		name         string
   608  		flattened    AllocatedTaskResources
   609  		binPackScore float64
   610  		spreadScore  float64
   611  	}{
   612  		{
   613  			name: "almost filled node, but with just enough hole",
   614  			flattened: AllocatedTaskResources{
   615  				Cpu:    AllocatedCpuResources{CpuShares: 2048},
   616  				Memory: AllocatedMemoryResources{MemoryMB: 4096},
   617  			},
   618  			binPackScore: 18,
   619  			spreadScore:  0,
   620  		},
   621  		{
   622  			name: "unutilized node",
   623  			flattened: AllocatedTaskResources{
   624  				Cpu:    AllocatedCpuResources{CpuShares: 0},
   625  				Memory: AllocatedMemoryResources{MemoryMB: 0},
   626  			},
   627  			binPackScore: 0,
   628  			spreadScore:  18,
   629  		},
   630  		{
   631  			name: "mid-case scnario",
   632  			flattened: AllocatedTaskResources{
   633  				Cpu:    AllocatedCpuResources{CpuShares: 1024},
   634  				Memory: AllocatedMemoryResources{MemoryMB: 2048},
   635  			},
   636  			binPackScore: 13.675,
   637  			spreadScore:  4.325,
   638  		},
   639  	}
   640  
   641  	for _, c := range cases {
   642  		t.Run(c.name, func(t *testing.T) {
   643  			util := &ComparableResources{Flattened: c.flattened}
   644  
   645  			binPackScore := ScoreFitBinPack(node, util)
   646  			require.InDelta(t, c.binPackScore, binPackScore, 0.001, "binpack score")
   647  
   648  			spreadScore := ScoreFitSpread(node, util)
   649  			require.InDelta(t, c.spreadScore, spreadScore, 0.001, "spread score")
   650  
   651  			require.InDelta(t, 18, binPackScore+spreadScore, 0.001, "score sum")
   652  		})
   653  	}
   654  }
   655  
   656  func TestACLPolicyListHash(t *testing.T) {
   657  	h1 := ACLPolicyListHash(nil)
   658  	assert.NotEqual(t, "", h1)
   659  
   660  	p1 := &ACLPolicy{
   661  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   662  		Description: "Super cool policy!",
   663  		Rules: `
   664  		namespace "default" {
   665  			policy = "write"
   666  		}
   667  		node {
   668  			policy = "read"
   669  		}
   670  		agent {
   671  			policy = "read"
   672  		}
   673  		`,
   674  		CreateIndex: 10,
   675  		ModifyIndex: 20,
   676  	}
   677  
   678  	h2 := ACLPolicyListHash([]*ACLPolicy{p1})
   679  	assert.NotEqual(t, "", h2)
   680  	assert.NotEqual(t, h1, h2)
   681  
   682  	// Create P2 as copy of P1 with new name
   683  	p2 := &ACLPolicy{}
   684  	*p2 = *p1
   685  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
   686  
   687  	h3 := ACLPolicyListHash([]*ACLPolicy{p1, p2})
   688  	assert.NotEqual(t, "", h3)
   689  	assert.NotEqual(t, h2, h3)
   690  
   691  	h4 := ACLPolicyListHash([]*ACLPolicy{p2})
   692  	assert.NotEqual(t, "", h4)
   693  	assert.NotEqual(t, h3, h4)
   694  
   695  	// ModifyIndex should change the hash
   696  	p2.ModifyIndex++
   697  	h5 := ACLPolicyListHash([]*ACLPolicy{p2})
   698  	assert.NotEqual(t, "", h5)
   699  	assert.NotEqual(t, h4, h5)
   700  }
   701  
   702  func TestCompileACLObject(t *testing.T) {
   703  	p1 := &ACLPolicy{
   704  		Name:        fmt.Sprintf("policy-%s", uuid.Generate()),
   705  		Description: "Super cool policy!",
   706  		Rules: `
   707  		namespace "default" {
   708  			policy = "write"
   709  		}
   710  		node {
   711  			policy = "read"
   712  		}
   713  		agent {
   714  			policy = "read"
   715  		}
   716  		`,
   717  		CreateIndex: 10,
   718  		ModifyIndex: 20,
   719  	}
   720  
   721  	// Create P2 as copy of P1 with new name
   722  	p2 := &ACLPolicy{}
   723  	*p2 = *p1
   724  	p2.Name = fmt.Sprintf("policy-%s", uuid.Generate())
   725  
   726  	// Create a small cache
   727  	cache, err := lru.New2Q(16)
   728  	assert.Nil(t, err)
   729  
   730  	// Test compilation
   731  	aclObj, err := CompileACLObject(cache, []*ACLPolicy{p1})
   732  	assert.Nil(t, err)
   733  	assert.NotNil(t, aclObj)
   734  
   735  	// Should get the same object
   736  	aclObj2, err := CompileACLObject(cache, []*ACLPolicy{p1})
   737  	assert.Nil(t, err)
   738  	if aclObj != aclObj2 {
   739  		t.Fatalf("expected the same object")
   740  	}
   741  
   742  	// Should get another object
   743  	aclObj3, err := CompileACLObject(cache, []*ACLPolicy{p1, p2})
   744  	assert.Nil(t, err)
   745  	assert.NotNil(t, aclObj3)
   746  	if aclObj == aclObj3 {
   747  		t.Fatalf("unexpected same object")
   748  	}
   749  
   750  	// Should be order independent
   751  	aclObj4, err := CompileACLObject(cache, []*ACLPolicy{p2, p1})
   752  	assert.Nil(t, err)
   753  	assert.NotNil(t, aclObj4)
   754  	if aclObj3 != aclObj4 {
   755  		t.Fatalf("expected same object")
   756  	}
   757  }
   758  
   759  // TestGenerateMigrateToken asserts the migrate token is valid for use in HTTP
   760  // headers and CompareMigrateToken works as expected.
   761  func TestGenerateMigrateToken(t *testing.T) {
   762  	assert := assert.New(t)
   763  	allocID := uuid.Generate()
   764  	nodeSecret := uuid.Generate()
   765  	token, err := GenerateMigrateToken(allocID, nodeSecret)
   766  	assert.Nil(err)
   767  	_, err = base64.URLEncoding.DecodeString(token)
   768  	assert.Nil(err)
   769  
   770  	assert.True(CompareMigrateToken(allocID, nodeSecret, token))
   771  	assert.False(CompareMigrateToken("x", nodeSecret, token))
   772  	assert.False(CompareMigrateToken(allocID, "x", token))
   773  	assert.False(CompareMigrateToken(allocID, nodeSecret, "x"))
   774  
   775  	token2, err := GenerateMigrateToken("x", nodeSecret)
   776  	assert.Nil(err)
   777  	assert.False(CompareMigrateToken(allocID, nodeSecret, token2))
   778  	assert.True(CompareMigrateToken("x", nodeSecret, token2))
   779  }
   780  
   781  func TestMergeMultierrorWarnings(t *testing.T) {
   782  	var errs []error
   783  
   784  	// empty
   785  	str := MergeMultierrorWarnings(errs...)
   786  	require.Equal(t, "", str)
   787  
   788  	// non-empty
   789  	errs = []error{
   790  		errors.New("foo"),
   791  		nil,
   792  		errors.New("bar"),
   793  	}
   794  
   795  	str = MergeMultierrorWarnings(errs...)
   796  
   797  	require.Equal(t, "2 warning(s):\n\n* foo\n* bar", str)
   798  }