github.com/hspak/nomad@v0.7.2-0.20180309000617-bc4ae22a39a5/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"log"
     7  	"os"
     8  	"path/filepath"
     9  	"testing"
    10  	"time"
    11  
    12  	memdb "github.com/hashicorp/go-memdb"
    13  	"github.com/hashicorp/nomad/client/config"
    14  	"github.com/hashicorp/nomad/client/driver"
    15  	"github.com/hashicorp/nomad/command/agent/consul"
    16  	"github.com/hashicorp/nomad/helper/uuid"
    17  	"github.com/hashicorp/nomad/nomad"
    18  	"github.com/hashicorp/nomad/nomad/mock"
    19  	"github.com/hashicorp/nomad/nomad/structs"
    20  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    21  	"github.com/hashicorp/nomad/testutil"
    22  	"github.com/stretchr/testify/assert"
    23  	"github.com/stretchr/testify/require"
    24  
    25  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    26  )
    27  
    28  func testACLServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, *structs.ACLToken) {
    29  	server, token := nomad.TestACLServer(t, cb)
    30  	return server, server.GetConfig().RPCAddr.String(), token
    31  }
    32  
    33  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    34  	server := nomad.TestServer(t, cb)
    35  	return server, server.GetConfig().RPCAddr.String()
    36  }
    37  
    38  func TestClient_StartStop(t *testing.T) {
    39  	t.Parallel()
    40  	client := TestClient(t, nil)
    41  	if err := client.Shutdown(); err != nil {
    42  		t.Fatalf("err: %v", err)
    43  	}
    44  }
    45  
    46  // Certain labels for metrics are dependant on client initial setup. This tests
    47  // that the client has properly initialized before we assign values to labels
    48  func TestClient_BaseLabels(t *testing.T) {
    49  	t.Parallel()
    50  	assert := assert.New(t)
    51  
    52  	client := TestClient(t, nil)
    53  	if err := client.Shutdown(); err != nil {
    54  		t.Fatalf("err: %v", err)
    55  	}
    56  
    57  	// directly invoke this function, as otherwise this will fail on a CI build
    58  	// due to a race condition
    59  	client.emitStats()
    60  
    61  	baseLabels := client.baseLabels
    62  	assert.NotEqual(0, len(baseLabels))
    63  
    64  	nodeID := client.Node().ID
    65  	for _, e := range baseLabels {
    66  		if e.Name == "node_id" {
    67  			assert.Equal(nodeID, e.Value)
    68  		}
    69  	}
    70  }
    71  
    72  func TestClient_RPC(t *testing.T) {
    73  	t.Parallel()
    74  	s1, addr := testServer(t, nil)
    75  	defer s1.Shutdown()
    76  
    77  	c1 := TestClient(t, func(c *config.Config) {
    78  		c.Servers = []string{addr}
    79  	})
    80  	defer c1.Shutdown()
    81  
    82  	// RPC should succeed
    83  	testutil.WaitForResult(func() (bool, error) {
    84  		var out struct{}
    85  		err := c1.RPC("Status.Ping", struct{}{}, &out)
    86  		return err == nil, err
    87  	}, func(err error) {
    88  		t.Fatalf("err: %v", err)
    89  	})
    90  }
    91  
    92  func TestClient_RPC_Passthrough(t *testing.T) {
    93  	t.Parallel()
    94  	s1, _ := testServer(t, nil)
    95  	defer s1.Shutdown()
    96  
    97  	c1 := TestClient(t, func(c *config.Config) {
    98  		c.RPCHandler = s1
    99  	})
   100  	defer c1.Shutdown()
   101  
   102  	// RPC should succeed
   103  	testutil.WaitForResult(func() (bool, error) {
   104  		var out struct{}
   105  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   106  		return err == nil, err
   107  	}, func(err error) {
   108  		t.Fatalf("err: %v", err)
   109  	})
   110  }
   111  
   112  func TestClient_Fingerprint(t *testing.T) {
   113  	t.Parallel()
   114  	require := require.New(t)
   115  
   116  	driver.CheckForMockDriver(t)
   117  
   118  	c := TestClient(t, nil)
   119  	defer c.Shutdown()
   120  
   121  	// Ensure default values are present
   122  	node := c.Node()
   123  	require.NotEqual("", node.Attributes["kernel.name"])
   124  	require.NotEqual("", node.Attributes["cpu.arch"])
   125  	require.NotEqual("", node.Attributes["driver.mock_driver"])
   126  }
   127  
   128  func TestClient_Fingerprint_Periodic(t *testing.T) {
   129  	driver.CheckForMockDriver(t)
   130  	t.Parallel()
   131  
   132  	// these constants are only defined when nomad_test is enabled, so these fail
   133  	// our linter without explicit disabling.
   134  	c1 := TestClient(t, func(c *config.Config) {
   135  		c.Options = map[string]string{
   136  			driver.ShutdownPeriodicAfter:    "true",
   137  			driver.ShutdownPeriodicDuration: "3",
   138  		}
   139  	})
   140  	defer c1.Shutdown()
   141  
   142  	node := c1.config.Node
   143  	mockDriverName := "driver.mock_driver"
   144  
   145  	// Ensure the mock driver is registered on the client
   146  	testutil.WaitForResult(func() (bool, error) {
   147  		mockDriverStatus := node.Attributes[mockDriverName]
   148  		if mockDriverStatus == "" {
   149  			return false, fmt.Errorf("mock driver attribute should be set on the client")
   150  		}
   151  		return true, nil
   152  	}, func(err error) {
   153  		t.Fatalf("err: %v", err)
   154  	})
   155  
   156  	// Ensure that the client fingerprinter eventually removes this attribute
   157  	testutil.WaitForResult(func() (bool, error) {
   158  		mockDriverStatus := node.Attributes[mockDriverName]
   159  		if mockDriverStatus != "" {
   160  			return false, fmt.Errorf("mock driver attribute should not be set on the client")
   161  		}
   162  		return true, nil
   163  	}, func(err error) {
   164  		t.Fatalf("err: %v", err)
   165  	})
   166  }
   167  
   168  // TestClient_MixedTLS asserts that when a server is running with TLS enabled
   169  // it will reject any RPC connections from clients that lack TLS. See #2525
   170  func TestClient_MixedTLS(t *testing.T) {
   171  	t.Parallel()
   172  	const (
   173  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   174  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   175  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   176  	)
   177  	s1, addr := testServer(t, func(c *nomad.Config) {
   178  		c.TLSConfig = &nconfig.TLSConfig{
   179  			EnableHTTP:           true,
   180  			EnableRPC:            true,
   181  			VerifyServerHostname: true,
   182  			CAFile:               cafile,
   183  			CertFile:             foocert,
   184  			KeyFile:              fookey,
   185  		}
   186  	})
   187  	defer s1.Shutdown()
   188  	testutil.WaitForLeader(t, s1.RPC)
   189  
   190  	c1 := TestClient(t, func(c *config.Config) {
   191  		c.Servers = []string{addr}
   192  	})
   193  	defer c1.Shutdown()
   194  
   195  	req := structs.NodeSpecificRequest{
   196  		NodeID:       c1.Node().ID,
   197  		QueryOptions: structs.QueryOptions{Region: "global"},
   198  	}
   199  	var out structs.SingleNodeResponse
   200  	testutil.AssertUntil(100*time.Millisecond,
   201  		func() (bool, error) {
   202  			err := c1.RPC("Node.GetNode", &req, &out)
   203  			if err == nil {
   204  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   205  			}
   206  			return true, nil
   207  		},
   208  		func(err error) {
   209  			t.Fatalf(err.Error())
   210  		},
   211  	)
   212  }
   213  
   214  // TestClient_BadTLS asserts that when a client and server are running with TLS
   215  // enabled -- but their certificates are signed by different CAs -- they're
   216  // unable to communicate.
   217  func TestClient_BadTLS(t *testing.T) {
   218  	t.Parallel()
   219  	const (
   220  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   221  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   222  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   223  		badca   = "../helper/tlsutil/testdata/ca-bad.pem"
   224  		badcert = "../helper/tlsutil/testdata/nomad-bad.pem"
   225  		badkey  = "../helper/tlsutil/testdata/nomad-bad-key.pem"
   226  	)
   227  	s1, addr := testServer(t, func(c *nomad.Config) {
   228  		c.TLSConfig = &nconfig.TLSConfig{
   229  			EnableHTTP:           true,
   230  			EnableRPC:            true,
   231  			VerifyServerHostname: true,
   232  			CAFile:               cafile,
   233  			CertFile:             foocert,
   234  			KeyFile:              fookey,
   235  		}
   236  	})
   237  	defer s1.Shutdown()
   238  	testutil.WaitForLeader(t, s1.RPC)
   239  
   240  	c1 := TestClient(t, func(c *config.Config) {
   241  		c.Servers = []string{addr}
   242  		c.TLSConfig = &nconfig.TLSConfig{
   243  			EnableHTTP:           true,
   244  			EnableRPC:            true,
   245  			VerifyServerHostname: true,
   246  			CAFile:               badca,
   247  			CertFile:             badcert,
   248  			KeyFile:              badkey,
   249  		}
   250  	})
   251  	defer c1.Shutdown()
   252  
   253  	req := structs.NodeSpecificRequest{
   254  		NodeID:       c1.Node().ID,
   255  		QueryOptions: structs.QueryOptions{Region: "global"},
   256  	}
   257  	var out structs.SingleNodeResponse
   258  	testutil.AssertUntil(100*time.Millisecond,
   259  		func() (bool, error) {
   260  			err := c1.RPC("Node.GetNode", &req, &out)
   261  			if err == nil {
   262  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   263  			}
   264  			return true, nil
   265  		},
   266  		func(err error) {
   267  			t.Fatalf(err.Error())
   268  		},
   269  	)
   270  }
   271  
   272  func TestClient_Register(t *testing.T) {
   273  	t.Parallel()
   274  	s1, _ := testServer(t, nil)
   275  	defer s1.Shutdown()
   276  	testutil.WaitForLeader(t, s1.RPC)
   277  
   278  	c1 := TestClient(t, func(c *config.Config) {
   279  		c.RPCHandler = s1
   280  	})
   281  	defer c1.Shutdown()
   282  
   283  	req := structs.NodeSpecificRequest{
   284  		NodeID:       c1.Node().ID,
   285  		QueryOptions: structs.QueryOptions{Region: "global"},
   286  	}
   287  	var out structs.SingleNodeResponse
   288  
   289  	// Register should succeed
   290  	testutil.WaitForResult(func() (bool, error) {
   291  		err := s1.RPC("Node.GetNode", &req, &out)
   292  		if err != nil {
   293  			return false, err
   294  		}
   295  		if out.Node == nil {
   296  			return false, fmt.Errorf("missing reg")
   297  		}
   298  		return out.Node.ID == req.NodeID, nil
   299  	}, func(err error) {
   300  		t.Fatalf("err: %v", err)
   301  	})
   302  }
   303  
   304  func TestClient_Heartbeat(t *testing.T) {
   305  	t.Parallel()
   306  	s1, _ := testServer(t, func(c *nomad.Config) {
   307  		c.MinHeartbeatTTL = 50 * time.Millisecond
   308  	})
   309  	defer s1.Shutdown()
   310  	testutil.WaitForLeader(t, s1.RPC)
   311  
   312  	c1 := TestClient(t, func(c *config.Config) {
   313  		c.RPCHandler = s1
   314  	})
   315  	defer c1.Shutdown()
   316  
   317  	req := structs.NodeSpecificRequest{
   318  		NodeID:       c1.Node().ID,
   319  		QueryOptions: structs.QueryOptions{Region: "global"},
   320  	}
   321  	var out structs.SingleNodeResponse
   322  
   323  	// Register should succeed
   324  	testutil.WaitForResult(func() (bool, error) {
   325  		err := s1.RPC("Node.GetNode", &req, &out)
   326  		if err != nil {
   327  			return false, err
   328  		}
   329  		if out.Node == nil {
   330  			return false, fmt.Errorf("missing reg")
   331  		}
   332  		return out.Node.Status == structs.NodeStatusReady, nil
   333  	}, func(err error) {
   334  		t.Fatalf("err: %v", err)
   335  	})
   336  }
   337  
   338  func TestClient_UpdateAllocStatus(t *testing.T) {
   339  	t.Parallel()
   340  	s1, _ := testServer(t, nil)
   341  	defer s1.Shutdown()
   342  	testutil.WaitForLeader(t, s1.RPC)
   343  
   344  	c1 := TestClient(t, func(c *config.Config) {
   345  		c.RPCHandler = s1
   346  	})
   347  	defer c1.Shutdown()
   348  
   349  	// Wait til the node is ready
   350  	waitTilNodeReady(c1, t)
   351  
   352  	job := mock.Job()
   353  	alloc := mock.Alloc()
   354  	alloc.NodeID = c1.Node().ID
   355  	alloc.Job = job
   356  	alloc.JobID = job.ID
   357  	originalStatus := "foo"
   358  	alloc.ClientStatus = originalStatus
   359  
   360  	// Insert at zero so they are pulled
   361  	state := s1.State()
   362  	if err := state.UpsertJob(0, job); err != nil {
   363  		t.Fatal(err)
   364  	}
   365  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   366  		t.Fatal(err)
   367  	}
   368  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   369  
   370  	testutil.WaitForResult(func() (bool, error) {
   371  		ws := memdb.NewWatchSet()
   372  		out, err := state.AllocByID(ws, alloc.ID)
   373  		if err != nil {
   374  			return false, err
   375  		}
   376  		if out == nil {
   377  			return false, fmt.Errorf("no such alloc")
   378  		}
   379  		if out.ClientStatus == originalStatus {
   380  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   381  		}
   382  		return true, nil
   383  	}, func(err error) {
   384  		t.Fatalf("err: %v", err)
   385  	})
   386  }
   387  
   388  func TestClient_WatchAllocs(t *testing.T) {
   389  	t.Parallel()
   390  	ctestutil.ExecCompatible(t)
   391  	s1, _ := testServer(t, nil)
   392  	defer s1.Shutdown()
   393  	testutil.WaitForLeader(t, s1.RPC)
   394  
   395  	c1 := TestClient(t, func(c *config.Config) {
   396  		c.RPCHandler = s1
   397  	})
   398  	defer c1.Shutdown()
   399  
   400  	// Wait til the node is ready
   401  	waitTilNodeReady(c1, t)
   402  
   403  	// Create mock allocations
   404  	job := mock.Job()
   405  	alloc1 := mock.Alloc()
   406  	alloc1.JobID = job.ID
   407  	alloc1.Job = job
   408  	alloc1.NodeID = c1.Node().ID
   409  	alloc2 := mock.Alloc()
   410  	alloc2.NodeID = c1.Node().ID
   411  	alloc2.JobID = job.ID
   412  	alloc2.Job = job
   413  
   414  	state := s1.State()
   415  	if err := state.UpsertJob(100, job); err != nil {
   416  		t.Fatal(err)
   417  	}
   418  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   419  		t.Fatal(err)
   420  	}
   421  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   422  	if err != nil {
   423  		t.Fatalf("err: %v", err)
   424  	}
   425  
   426  	// Both allocations should get registered
   427  	testutil.WaitForResult(func() (bool, error) {
   428  		c1.allocLock.RLock()
   429  		num := len(c1.allocs)
   430  		c1.allocLock.RUnlock()
   431  		return num == 2, nil
   432  	}, func(err error) {
   433  		t.Fatalf("err: %v", err)
   434  	})
   435  
   436  	// Delete one allocation
   437  	if err := state.DeleteEval(103, nil, []string{alloc1.ID}); err != nil {
   438  		t.Fatalf("err: %v", err)
   439  	}
   440  
   441  	// Update the other allocation. Have to make a copy because the allocs are
   442  	// shared in memory in the test and the modify index would be updated in the
   443  	// alloc runner.
   444  	alloc2_2 := alloc2.Copy()
   445  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   446  	if err := state.UpsertAllocs(104, []*structs.Allocation{alloc2_2}); err != nil {
   447  		t.Fatalf("err upserting stopped alloc: %v", err)
   448  	}
   449  
   450  	// One allocation should get GC'd and removed
   451  	testutil.WaitForResult(func() (bool, error) {
   452  		c1.allocLock.RLock()
   453  		num := len(c1.allocs)
   454  		c1.allocLock.RUnlock()
   455  		return num == 1, nil
   456  	}, func(err error) {
   457  		t.Fatalf("err: %v", err)
   458  	})
   459  
   460  	// One allocations should get updated
   461  	testutil.WaitForResult(func() (bool, error) {
   462  		c1.allocLock.RLock()
   463  		ar := c1.allocs[alloc2.ID]
   464  		c1.allocLock.RUnlock()
   465  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   466  	}, func(err error) {
   467  		t.Fatalf("err: %v", err)
   468  	})
   469  }
   470  
   471  func waitTilNodeReady(client *Client, t *testing.T) {
   472  	testutil.WaitForResult(func() (bool, error) {
   473  		n := client.Node()
   474  		if n.Status != structs.NodeStatusReady {
   475  			return false, fmt.Errorf("node not registered")
   476  		}
   477  		return true, nil
   478  	}, func(err error) {
   479  		t.Fatalf("err: %v", err)
   480  	})
   481  }
   482  
   483  func TestClient_SaveRestoreState(t *testing.T) {
   484  	t.Parallel()
   485  	ctestutil.ExecCompatible(t)
   486  	s1, _ := testServer(t, nil)
   487  	defer s1.Shutdown()
   488  	testutil.WaitForLeader(t, s1.RPC)
   489  
   490  	c1 := TestClient(t, func(c *config.Config) {
   491  		c.DevMode = false
   492  		c.RPCHandler = s1
   493  	})
   494  	defer c1.Shutdown()
   495  
   496  	// Wait til the node is ready
   497  	waitTilNodeReady(c1, t)
   498  
   499  	// Create mock allocations
   500  	job := mock.Job()
   501  	alloc1 := mock.Alloc()
   502  	alloc1.NodeID = c1.Node().ID
   503  	alloc1.Job = job
   504  	alloc1.JobID = job.ID
   505  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   506  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   507  	task.Config["run_for"] = "10s"
   508  
   509  	state := s1.State()
   510  	if err := state.UpsertJob(100, job); err != nil {
   511  		t.Fatal(err)
   512  	}
   513  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   514  		t.Fatal(err)
   515  	}
   516  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   517  		t.Fatalf("err: %v", err)
   518  	}
   519  
   520  	// Allocations should get registered
   521  	testutil.WaitForResult(func() (bool, error) {
   522  		c1.allocLock.RLock()
   523  		ar := c1.allocs[alloc1.ID]
   524  		c1.allocLock.RUnlock()
   525  		if ar == nil {
   526  			return false, fmt.Errorf("nil alloc runner")
   527  		}
   528  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   529  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   530  		}
   531  		return true, nil
   532  	}, func(err error) {
   533  		t.Fatalf("err: %v", err)
   534  	})
   535  
   536  	// Shutdown the client, saves state
   537  	if err := c1.Shutdown(); err != nil {
   538  		t.Fatalf("err: %v", err)
   539  	}
   540  
   541  	// Create a new client
   542  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   543  	catalog := consul.NewMockCatalog(logger)
   544  	mockService := newMockConsulServiceClient(t)
   545  	mockService.logger = logger
   546  	c2, err := NewClient(c1.config, catalog, mockService, logger)
   547  	if err != nil {
   548  		t.Fatalf("err: %v", err)
   549  	}
   550  	defer c2.Shutdown()
   551  
   552  	// Ensure the allocation is running
   553  	testutil.WaitForResult(func() (bool, error) {
   554  		c2.allocLock.RLock()
   555  		ar := c2.allocs[alloc1.ID]
   556  		c2.allocLock.RUnlock()
   557  		status := ar.Alloc().ClientStatus
   558  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   559  		if !alive {
   560  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   561  		}
   562  		return true, nil
   563  	}, func(err error) {
   564  		t.Fatalf("err: %v", err)
   565  	})
   566  
   567  	// Destroy all the allocations
   568  	for _, ar := range c2.getAllocRunners() {
   569  		ar.Destroy()
   570  	}
   571  
   572  	for _, ar := range c2.getAllocRunners() {
   573  		<-ar.WaitCh()
   574  	}
   575  }
   576  
   577  func TestClient_Init(t *testing.T) {
   578  	t.Parallel()
   579  	dir, err := ioutil.TempDir("", "nomad")
   580  	if err != nil {
   581  		t.Fatalf("err: %s", err)
   582  	}
   583  	defer os.RemoveAll(dir)
   584  	allocDir := filepath.Join(dir, "alloc")
   585  
   586  	client := &Client{
   587  		config: &config.Config{
   588  			AllocDir: allocDir,
   589  		},
   590  		logger: log.New(os.Stderr, "", log.LstdFlags),
   591  	}
   592  	if err := client.init(); err != nil {
   593  		t.Fatalf("err: %s", err)
   594  	}
   595  
   596  	if _, err := os.Stat(allocDir); err != nil {
   597  		t.Fatalf("err: %s", err)
   598  	}
   599  }
   600  
   601  func TestClient_BlockedAllocations(t *testing.T) {
   602  	t.Parallel()
   603  	s1, _ := testServer(t, nil)
   604  	defer s1.Shutdown()
   605  	testutil.WaitForLeader(t, s1.RPC)
   606  
   607  	c1 := TestClient(t, func(c *config.Config) {
   608  		c.RPCHandler = s1
   609  	})
   610  	defer c1.Shutdown()
   611  
   612  	// Wait for the node to be ready
   613  	state := s1.State()
   614  	testutil.WaitForResult(func() (bool, error) {
   615  		ws := memdb.NewWatchSet()
   616  		out, err := state.NodeByID(ws, c1.Node().ID)
   617  		if err != nil {
   618  			return false, err
   619  		}
   620  		if out == nil || out.Status != structs.NodeStatusReady {
   621  			return false, fmt.Errorf("bad node: %#v", out)
   622  		}
   623  		return true, nil
   624  	}, func(err error) {
   625  		t.Fatalf("err: %v", err)
   626  	})
   627  
   628  	// Add an allocation
   629  	alloc := mock.Alloc()
   630  	alloc.NodeID = c1.Node().ID
   631  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   632  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   633  		"kill_after":  "1s",
   634  		"run_for":     "100s",
   635  		"exit_code":   0,
   636  		"exit_signal": 0,
   637  		"exit_err":    "",
   638  	}
   639  
   640  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   641  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   642  
   643  	// Wait until the client downloads and starts the allocation
   644  	testutil.WaitForResult(func() (bool, error) {
   645  		ws := memdb.NewWatchSet()
   646  		out, err := state.AllocByID(ws, alloc.ID)
   647  		if err != nil {
   648  			return false, err
   649  		}
   650  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   651  			return false, fmt.Errorf("bad alloc: %#v", out)
   652  		}
   653  		return true, nil
   654  	}, func(err error) {
   655  		t.Fatalf("err: %v", err)
   656  	})
   657  
   658  	// Add a new chained alloc
   659  	alloc2 := alloc.Copy()
   660  	alloc2.ID = uuid.Generate()
   661  	alloc2.Job = alloc.Job
   662  	alloc2.JobID = alloc.JobID
   663  	alloc2.PreviousAllocation = alloc.ID
   664  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   665  		t.Fatalf("err: %v", err)
   666  	}
   667  
   668  	// Enusre that the chained allocation is being tracked as blocked
   669  	testutil.WaitForResult(func() (bool, error) {
   670  		ar := c1.getAllocRunners()[alloc2.ID]
   671  		if ar == nil {
   672  			return false, fmt.Errorf("alloc 2's alloc runner does not exist")
   673  		}
   674  		if !ar.IsWaiting() {
   675  			return false, fmt.Errorf("alloc 2 is not blocked")
   676  		}
   677  		return true, nil
   678  	}, func(err error) {
   679  		t.Fatalf("err: %v", err)
   680  	})
   681  
   682  	// Change the desired state of the parent alloc to stop
   683  	alloc1 := alloc.Copy()
   684  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   685  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   686  		t.Fatalf("err: %v", err)
   687  	}
   688  
   689  	// Ensure that there are no blocked allocations
   690  	testutil.WaitForResult(func() (bool, error) {
   691  		for id, ar := range c1.getAllocRunners() {
   692  			if ar.IsWaiting() {
   693  				return false, fmt.Errorf("%q still blocked", id)
   694  			}
   695  			if ar.IsMigrating() {
   696  				return false, fmt.Errorf("%q still migrating", id)
   697  			}
   698  		}
   699  		return true, nil
   700  	}, func(err error) {
   701  		t.Fatalf("err: %v", err)
   702  	})
   703  
   704  	// Destroy all the allocations
   705  	for _, ar := range c1.getAllocRunners() {
   706  		ar.Destroy()
   707  	}
   708  
   709  	for _, ar := range c1.getAllocRunners() {
   710  		<-ar.WaitCh()
   711  	}
   712  }
   713  
   714  func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) {
   715  	t.Parallel()
   716  	assert := assert.New(t)
   717  
   718  	c := TestClient(t, func(c *config.Config) {
   719  		c.ACLEnabled = true
   720  	})
   721  	defer c.Shutdown()
   722  
   723  	alloc := mock.Alloc()
   724  	validToken, err := structs.GenerateMigrateToken(alloc.ID, c.secretNodeID())
   725  	assert.Nil(err)
   726  
   727  	assert.Equal(c.ValidateMigrateToken(alloc.ID, validToken), true)
   728  }
   729  
   730  func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) {
   731  	t.Parallel()
   732  	assert := assert.New(t)
   733  
   734  	c := TestClient(t, func(c *config.Config) {
   735  		c.ACLEnabled = true
   736  	})
   737  	defer c.Shutdown()
   738  
   739  	assert.Equal(c.ValidateMigrateToken("", ""), false)
   740  
   741  	alloc := mock.Alloc()
   742  	assert.Equal(c.ValidateMigrateToken(alloc.ID, alloc.ID), false)
   743  	assert.Equal(c.ValidateMigrateToken(alloc.ID, ""), false)
   744  }
   745  
   746  func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) {
   747  	t.Parallel()
   748  	assert := assert.New(t)
   749  
   750  	c := TestClient(t, func(c *config.Config) {})
   751  	defer c.Shutdown()
   752  
   753  	assert.Equal(c.ValidateMigrateToken("", ""), true)
   754  }
   755  
   756  func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) {
   757  	t.Parallel()
   758  	assert := assert.New(t)
   759  
   760  	s1, addr := testServer(t, func(c *nomad.Config) {
   761  		c.Region = "regionFoo"
   762  	})
   763  	defer s1.Shutdown()
   764  	testutil.WaitForLeader(t, s1.RPC)
   765  
   766  	const (
   767  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   768  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   769  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   770  	)
   771  
   772  	c1 := TestClient(t, func(c *config.Config) {
   773  		c.Servers = []string{addr}
   774  	})
   775  	defer c1.Shutdown()
   776  
   777  	// Registering a node over plaintext should succeed
   778  	{
   779  		req := structs.NodeSpecificRequest{
   780  			NodeID:       c1.Node().ID,
   781  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   782  		}
   783  
   784  		testutil.WaitForResult(func() (bool, error) {
   785  			var out structs.SingleNodeResponse
   786  			err := c1.RPC("Node.GetNode", &req, &out)
   787  			if err != nil {
   788  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
   789  			}
   790  			return true, nil
   791  		},
   792  			func(err error) {
   793  				t.Fatalf(err.Error())
   794  			},
   795  		)
   796  	}
   797  
   798  	newConfig := &nconfig.TLSConfig{
   799  		EnableHTTP:           true,
   800  		EnableRPC:            true,
   801  		VerifyServerHostname: true,
   802  		CAFile:               cafile,
   803  		CertFile:             foocert,
   804  		KeyFile:              fookey,
   805  	}
   806  
   807  	err := c1.reloadTLSConnections(newConfig)
   808  	assert.Nil(err)
   809  
   810  	// Registering a node over plaintext should fail after the node has upgraded
   811  	// to TLS
   812  	{
   813  		req := structs.NodeSpecificRequest{
   814  			NodeID:       c1.Node().ID,
   815  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   816  		}
   817  		testutil.WaitForResult(func() (bool, error) {
   818  			var out structs.SingleNodeResponse
   819  			err := c1.RPC("Node.GetNode", &req, &out)
   820  			if err == nil {
   821  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", err)
   822  			}
   823  			return true, nil
   824  		},
   825  			func(err error) {
   826  				t.Fatalf(err.Error())
   827  			},
   828  		)
   829  	}
   830  }
   831  
   832  func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) {
   833  	t.Parallel()
   834  	assert := assert.New(t)
   835  
   836  	s1, addr := testServer(t, func(c *nomad.Config) {
   837  		c.Region = "regionFoo"
   838  	})
   839  	defer s1.Shutdown()
   840  	testutil.WaitForLeader(t, s1.RPC)
   841  
   842  	const (
   843  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   844  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   845  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   846  	)
   847  
   848  	c1 := TestClient(t, func(c *config.Config) {
   849  		c.Servers = []string{addr}
   850  		c.TLSConfig = &nconfig.TLSConfig{
   851  			EnableHTTP:           true,
   852  			EnableRPC:            true,
   853  			VerifyServerHostname: true,
   854  			CAFile:               cafile,
   855  			CertFile:             foocert,
   856  			KeyFile:              fookey,
   857  		}
   858  	})
   859  	defer c1.Shutdown()
   860  
   861  	// assert that when one node is running in encrypted mode, a RPC request to a
   862  	// node running in plaintext mode should fail
   863  	{
   864  		req := structs.NodeSpecificRequest{
   865  			NodeID:       c1.Node().ID,
   866  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   867  		}
   868  		testutil.WaitForResult(func() (bool, error) {
   869  			var out structs.SingleNodeResponse
   870  			err := c1.RPC("Node.GetNode", &req, &out)
   871  			if err == nil {
   872  				return false, fmt.Errorf("client RPC succeeded when it should have failed :\n%+v", err)
   873  			}
   874  			return true, nil
   875  		}, func(err error) {
   876  			t.Fatalf(err.Error())
   877  		},
   878  		)
   879  	}
   880  
   881  	newConfig := &nconfig.TLSConfig{}
   882  
   883  	err := c1.reloadTLSConnections(newConfig)
   884  	assert.Nil(err)
   885  
   886  	// assert that when both nodes are in plaintext mode, a RPC request should
   887  	// succeed
   888  	{
   889  		req := structs.NodeSpecificRequest{
   890  			NodeID:       c1.Node().ID,
   891  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   892  		}
   893  		testutil.WaitForResult(func() (bool, error) {
   894  			var out structs.SingleNodeResponse
   895  			err := c1.RPC("Node.GetNode", &req, &out)
   896  			if err != nil {
   897  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
   898  			}
   899  			return true, nil
   900  		}, func(err error) {
   901  			t.Fatalf(err.Error())
   902  		},
   903  		)
   904  	}
   905  }
   906  
   907  // TestClient_ServerList tests client methods that interact with the internal
   908  // nomad server list.
   909  func TestClient_ServerList(t *testing.T) {
   910  	t.Parallel()
   911  	client := TestClient(t, func(c *config.Config) {})
   912  
   913  	if s := client.GetServers(); len(s) != 0 {
   914  		t.Fatalf("expected server lit to be empty but found: %+q", s)
   915  	}
   916  	if err := client.SetServers(nil); err != noServersErr {
   917  		t.Fatalf("expected setting an empty list to return a 'no servers' error but received %v", err)
   918  	}
   919  	if err := client.SetServers([]string{"123.456.13123.123.13:80"}); err == nil {
   920  		t.Fatalf("expected setting a bad server to return an error")
   921  	}
   922  	if err := client.SetServers([]string{"123.456.13123.123.13:80", "127.0.0.1:1234", "127.0.0.1"}); err == nil {
   923  		t.Fatalf("expected setting at least one good server to succeed but received: %v", err)
   924  	}
   925  	s := client.GetServers()
   926  	if len(s) != 0 {
   927  		t.Fatalf("expected 2 servers but received: %+q", s)
   928  	}
   929  }