github.com/djenriquez/nomad-1@v0.8.1/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"log"
     7  	"os"
     8  	"path/filepath"
     9  	"testing"
    10  	"time"
    11  
    12  	memdb "github.com/hashicorp/go-memdb"
    13  	"github.com/hashicorp/nomad/client/config"
    14  	"github.com/hashicorp/nomad/client/driver"
    15  	"github.com/hashicorp/nomad/command/agent/consul"
    16  	"github.com/hashicorp/nomad/helper/uuid"
    17  	"github.com/hashicorp/nomad/nomad"
    18  	"github.com/hashicorp/nomad/nomad/mock"
    19  	"github.com/hashicorp/nomad/nomad/structs"
    20  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    21  	"github.com/hashicorp/nomad/testutil"
    22  	"github.com/stretchr/testify/assert"
    23  
    24  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    25  )
    26  
    27  func testACLServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, *structs.ACLToken) {
    28  	server, token := nomad.TestACLServer(t, cb)
    29  	return server, server.GetConfig().RPCAddr.String(), token
    30  }
    31  
    32  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    33  	server := nomad.TestServer(t, cb)
    34  	return server, server.GetConfig().RPCAddr.String()
    35  }
    36  
    37  func TestClient_StartStop(t *testing.T) {
    38  	t.Parallel()
    39  	client := TestClient(t, nil)
    40  	if err := client.Shutdown(); err != nil {
    41  		t.Fatalf("err: %v", err)
    42  	}
    43  }
    44  
    45  // Certain labels for metrics are dependant on client initial setup. This tests
    46  // that the client has properly initialized before we assign values to labels
    47  func TestClient_BaseLabels(t *testing.T) {
    48  	t.Parallel()
    49  	assert := assert.New(t)
    50  
    51  	client := TestClient(t, nil)
    52  	if err := client.Shutdown(); err != nil {
    53  		t.Fatalf("err: %v", err)
    54  	}
    55  
    56  	// directly invoke this function, as otherwise this will fail on a CI build
    57  	// due to a race condition
    58  	client.emitStats()
    59  
    60  	baseLabels := client.baseLabels
    61  	assert.NotEqual(0, len(baseLabels))
    62  
    63  	nodeID := client.Node().ID
    64  	for _, e := range baseLabels {
    65  		if e.Name == "node_id" {
    66  			assert.Equal(nodeID, e.Value)
    67  		}
    68  	}
    69  }
    70  
    71  func TestClient_RPC(t *testing.T) {
    72  	t.Parallel()
    73  	s1, addr := testServer(t, nil)
    74  	defer s1.Shutdown()
    75  
    76  	c1 := TestClient(t, func(c *config.Config) {
    77  		c.Servers = []string{addr}
    78  	})
    79  	defer c1.Shutdown()
    80  
    81  	// RPC should succeed
    82  	testutil.WaitForResult(func() (bool, error) {
    83  		var out struct{}
    84  		err := c1.RPC("Status.Ping", struct{}{}, &out)
    85  		return err == nil, err
    86  	}, func(err error) {
    87  		t.Fatalf("err: %v", err)
    88  	})
    89  }
    90  
    91  func TestClient_RPC_FireRetryWatchers(t *testing.T) {
    92  	t.Parallel()
    93  	s1, addr := testServer(t, nil)
    94  	defer s1.Shutdown()
    95  
    96  	c1 := TestClient(t, func(c *config.Config) {
    97  		c.Servers = []string{addr}
    98  	})
    99  	defer c1.Shutdown()
   100  
   101  	watcher := c1.rpcRetryWatcher()
   102  
   103  	// RPC should succeed
   104  	testutil.WaitForResult(func() (bool, error) {
   105  		var out struct{}
   106  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   107  		return err == nil, err
   108  	}, func(err error) {
   109  		t.Fatalf("err: %v", err)
   110  	})
   111  
   112  	select {
   113  	case <-watcher:
   114  	default:
   115  		t.Fatal("watcher should be fired")
   116  	}
   117  }
   118  
   119  func TestClient_RPC_Passthrough(t *testing.T) {
   120  	t.Parallel()
   121  	s1, _ := testServer(t, nil)
   122  	defer s1.Shutdown()
   123  
   124  	c1 := TestClient(t, func(c *config.Config) {
   125  		c.RPCHandler = s1
   126  	})
   127  	defer c1.Shutdown()
   128  
   129  	// RPC should succeed
   130  	testutil.WaitForResult(func() (bool, error) {
   131  		var out struct{}
   132  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   133  		return err == nil, err
   134  	}, func(err error) {
   135  		t.Fatalf("err: %v", err)
   136  	})
   137  }
   138  
   139  func TestClient_Fingerprint(t *testing.T) {
   140  	t.Parallel()
   141  	c := TestClient(t, nil)
   142  	defer c.Shutdown()
   143  
   144  	// Ensure we are fingerprinting
   145  	testutil.WaitForResult(func() (bool, error) {
   146  		node := c.Node()
   147  		if _, ok := node.Attributes["kernel.name"]; !ok {
   148  			return false, fmt.Errorf("Expected value for kernel.name")
   149  		}
   150  		if _, ok := node.Attributes["cpu.arch"]; !ok {
   151  			return false, fmt.Errorf("Expected value for cpu.arch")
   152  		}
   153  		return true, nil
   154  	}, func(err error) {
   155  		t.Fatalf("err: %v", err)
   156  	})
   157  }
   158  
   159  func TestClient_Fingerprint_Periodic(t *testing.T) {
   160  	driver.CheckForMockDriver(t)
   161  	t.Parallel()
   162  
   163  	// these constants are only defined when nomad_test is enabled, so these fail
   164  	// our linter without explicit disabling.
   165  	c1 := TestClient(t, func(c *config.Config) {
   166  		c.Options = map[string]string{
   167  			driver.ShutdownPeriodicAfter:    "true",
   168  			driver.ShutdownPeriodicDuration: "1",
   169  		}
   170  	})
   171  	defer c1.Shutdown()
   172  
   173  	node := c1.config.Node
   174  	{
   175  		// Ensure the mock driver is registered on the client
   176  		testutil.WaitForResult(func() (bool, error) {
   177  			c1.configLock.Lock()
   178  			defer c1.configLock.Unlock()
   179  
   180  			// assert that the driver is set on the node attributes
   181  			mockDriverInfoAttr := node.Attributes["driver.mock_driver"]
   182  			if mockDriverInfoAttr == "" {
   183  				return false, fmt.Errorf("mock driver is empty when it should be set on the node attributes")
   184  			}
   185  
   186  			mockDriverInfo := node.Drivers["mock_driver"]
   187  
   188  			// assert that the Driver information for the node is also set correctly
   189  			if mockDriverInfo == nil {
   190  				return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers")
   191  			}
   192  			if !mockDriverInfo.Detected {
   193  				return false, fmt.Errorf("mock driver should be set as detected")
   194  			}
   195  			if !mockDriverInfo.Healthy {
   196  				return false, fmt.Errorf("mock driver should be set as healthy")
   197  			}
   198  			if mockDriverInfo.HealthDescription == "" {
   199  				return false, fmt.Errorf("mock driver description should not be empty")
   200  			}
   201  			return true, nil
   202  		}, func(err error) {
   203  			t.Fatalf("err: %v", err)
   204  		})
   205  	}
   206  
   207  	{
   208  		testutil.WaitForResult(func() (bool, error) {
   209  			c1.configLock.Lock()
   210  			defer c1.configLock.Unlock()
   211  			mockDriverInfo := node.Drivers["mock_driver"]
   212  			// assert that the Driver information for the node is also set correctly
   213  			if mockDriverInfo == nil {
   214  				return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers")
   215  			}
   216  			if mockDriverInfo.Detected {
   217  				return false, fmt.Errorf("mock driver should be set as detected")
   218  			}
   219  			if mockDriverInfo.Healthy {
   220  				return false, fmt.Errorf("mock driver should be set as healthy")
   221  			}
   222  			if mockDriverInfo.HealthDescription == "" {
   223  				return false, fmt.Errorf("mock driver description should not be empty")
   224  			}
   225  			return true, nil
   226  		}, func(err error) {
   227  			t.Fatalf("err: %v", err)
   228  		})
   229  	}
   230  }
   231  
   232  // TestClient_MixedTLS asserts that when a server is running with TLS enabled
   233  // it will reject any RPC connections from clients that lack TLS. See #2525
   234  func TestClient_MixedTLS(t *testing.T) {
   235  	t.Parallel()
   236  	const (
   237  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   238  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   239  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   240  	)
   241  	s1, addr := testServer(t, func(c *nomad.Config) {
   242  		c.TLSConfig = &nconfig.TLSConfig{
   243  			EnableHTTP:           true,
   244  			EnableRPC:            true,
   245  			VerifyServerHostname: true,
   246  			CAFile:               cafile,
   247  			CertFile:             foocert,
   248  			KeyFile:              fookey,
   249  		}
   250  	})
   251  	defer s1.Shutdown()
   252  	testutil.WaitForLeader(t, s1.RPC)
   253  
   254  	c1 := TestClient(t, func(c *config.Config) {
   255  		c.Servers = []string{addr}
   256  	})
   257  	defer c1.Shutdown()
   258  
   259  	req := structs.NodeSpecificRequest{
   260  		NodeID:       c1.Node().ID,
   261  		QueryOptions: structs.QueryOptions{Region: "global"},
   262  	}
   263  	var out structs.SingleNodeResponse
   264  	testutil.AssertUntil(100*time.Millisecond,
   265  		func() (bool, error) {
   266  			err := c1.RPC("Node.GetNode", &req, &out)
   267  			if err == nil {
   268  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   269  			}
   270  			return true, nil
   271  		},
   272  		func(err error) {
   273  			t.Fatalf(err.Error())
   274  		},
   275  	)
   276  }
   277  
   278  // TestClient_BadTLS asserts that when a client and server are running with TLS
   279  // enabled -- but their certificates are signed by different CAs -- they're
   280  // unable to communicate.
   281  func TestClient_BadTLS(t *testing.T) {
   282  	t.Parallel()
   283  	const (
   284  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   285  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   286  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   287  		badca   = "../helper/tlsutil/testdata/ca-bad.pem"
   288  		badcert = "../helper/tlsutil/testdata/nomad-bad.pem"
   289  		badkey  = "../helper/tlsutil/testdata/nomad-bad-key.pem"
   290  	)
   291  	s1, addr := testServer(t, func(c *nomad.Config) {
   292  		c.TLSConfig = &nconfig.TLSConfig{
   293  			EnableHTTP:           true,
   294  			EnableRPC:            true,
   295  			VerifyServerHostname: true,
   296  			CAFile:               cafile,
   297  			CertFile:             foocert,
   298  			KeyFile:              fookey,
   299  		}
   300  	})
   301  	defer s1.Shutdown()
   302  	testutil.WaitForLeader(t, s1.RPC)
   303  
   304  	c1 := TestClient(t, func(c *config.Config) {
   305  		c.Servers = []string{addr}
   306  		c.TLSConfig = &nconfig.TLSConfig{
   307  			EnableHTTP:           true,
   308  			EnableRPC:            true,
   309  			VerifyServerHostname: true,
   310  			CAFile:               badca,
   311  			CertFile:             badcert,
   312  			KeyFile:              badkey,
   313  		}
   314  	})
   315  	defer c1.Shutdown()
   316  
   317  	req := structs.NodeSpecificRequest{
   318  		NodeID:       c1.Node().ID,
   319  		QueryOptions: structs.QueryOptions{Region: "global"},
   320  	}
   321  	var out structs.SingleNodeResponse
   322  	testutil.AssertUntil(100*time.Millisecond,
   323  		func() (bool, error) {
   324  			err := c1.RPC("Node.GetNode", &req, &out)
   325  			if err == nil {
   326  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   327  			}
   328  			return true, nil
   329  		},
   330  		func(err error) {
   331  			t.Fatalf(err.Error())
   332  		},
   333  	)
   334  }
   335  
   336  func TestClient_Register(t *testing.T) {
   337  	t.Parallel()
   338  	s1, _ := testServer(t, nil)
   339  	defer s1.Shutdown()
   340  	testutil.WaitForLeader(t, s1.RPC)
   341  
   342  	c1 := TestClient(t, func(c *config.Config) {
   343  		c.RPCHandler = s1
   344  	})
   345  	defer c1.Shutdown()
   346  
   347  	req := structs.NodeSpecificRequest{
   348  		NodeID:       c1.Node().ID,
   349  		QueryOptions: structs.QueryOptions{Region: "global"},
   350  	}
   351  	var out structs.SingleNodeResponse
   352  
   353  	// Register should succeed
   354  	testutil.WaitForResult(func() (bool, error) {
   355  		err := s1.RPC("Node.GetNode", &req, &out)
   356  		if err != nil {
   357  			return false, err
   358  		}
   359  		if out.Node == nil {
   360  			return false, fmt.Errorf("missing reg")
   361  		}
   362  		return out.Node.ID == req.NodeID, nil
   363  	}, func(err error) {
   364  		t.Fatalf("err: %v", err)
   365  	})
   366  }
   367  
   368  func TestClient_Heartbeat(t *testing.T) {
   369  	t.Parallel()
   370  	s1, _ := testServer(t, func(c *nomad.Config) {
   371  		c.MinHeartbeatTTL = 50 * time.Millisecond
   372  	})
   373  	defer s1.Shutdown()
   374  	testutil.WaitForLeader(t, s1.RPC)
   375  
   376  	c1 := TestClient(t, func(c *config.Config) {
   377  		c.RPCHandler = s1
   378  	})
   379  	defer c1.Shutdown()
   380  
   381  	req := structs.NodeSpecificRequest{
   382  		NodeID:       c1.Node().ID,
   383  		QueryOptions: structs.QueryOptions{Region: "global"},
   384  	}
   385  	var out structs.SingleNodeResponse
   386  
   387  	// Register should succeed
   388  	testutil.WaitForResult(func() (bool, error) {
   389  		err := s1.RPC("Node.GetNode", &req, &out)
   390  		if err != nil {
   391  			return false, err
   392  		}
   393  		if out.Node == nil {
   394  			return false, fmt.Errorf("missing reg")
   395  		}
   396  		return out.Node.Status == structs.NodeStatusReady, nil
   397  	}, func(err error) {
   398  		t.Fatalf("err: %v", err)
   399  	})
   400  }
   401  
   402  func TestClient_UpdateAllocStatus(t *testing.T) {
   403  	t.Parallel()
   404  	s1, _ := testServer(t, nil)
   405  	defer s1.Shutdown()
   406  	testutil.WaitForLeader(t, s1.RPC)
   407  
   408  	c1 := TestClient(t, func(c *config.Config) {
   409  		c.RPCHandler = s1
   410  	})
   411  	defer c1.Shutdown()
   412  
   413  	// Wait until the node is ready
   414  	waitTilNodeReady(c1, t)
   415  
   416  	job := mock.Job()
   417  	alloc := mock.Alloc()
   418  	alloc.NodeID = c1.Node().ID
   419  	alloc.Job = job
   420  	alloc.JobID = job.ID
   421  	originalStatus := "foo"
   422  	alloc.ClientStatus = originalStatus
   423  
   424  	// Insert at zero so they are pulled
   425  	state := s1.State()
   426  	if err := state.UpsertJob(0, job); err != nil {
   427  		t.Fatal(err)
   428  	}
   429  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   430  		t.Fatal(err)
   431  	}
   432  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   433  
   434  	testutil.WaitForResult(func() (bool, error) {
   435  		ws := memdb.NewWatchSet()
   436  		out, err := state.AllocByID(ws, alloc.ID)
   437  		if err != nil {
   438  			return false, err
   439  		}
   440  		if out == nil {
   441  			return false, fmt.Errorf("no such alloc")
   442  		}
   443  		if out.ClientStatus == originalStatus {
   444  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   445  		}
   446  		return true, nil
   447  	}, func(err error) {
   448  		t.Fatalf("err: %v", err)
   449  	})
   450  }
   451  
   452  func TestClient_WatchAllocs(t *testing.T) {
   453  	t.Parallel()
   454  	ctestutil.ExecCompatible(t)
   455  	s1, _ := testServer(t, nil)
   456  	defer s1.Shutdown()
   457  	testutil.WaitForLeader(t, s1.RPC)
   458  
   459  	c1 := TestClient(t, func(c *config.Config) {
   460  		c.RPCHandler = s1
   461  	})
   462  	defer c1.Shutdown()
   463  
   464  	// Wait until the node is ready
   465  	waitTilNodeReady(c1, t)
   466  
   467  	// Create mock allocations
   468  	job := mock.Job()
   469  	alloc1 := mock.Alloc()
   470  	alloc1.JobID = job.ID
   471  	alloc1.Job = job
   472  	alloc1.NodeID = c1.Node().ID
   473  	alloc2 := mock.Alloc()
   474  	alloc2.NodeID = c1.Node().ID
   475  	alloc2.JobID = job.ID
   476  	alloc2.Job = job
   477  
   478  	state := s1.State()
   479  	if err := state.UpsertJob(100, job); err != nil {
   480  		t.Fatal(err)
   481  	}
   482  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   483  		t.Fatal(err)
   484  	}
   485  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   486  	if err != nil {
   487  		t.Fatalf("err: %v", err)
   488  	}
   489  
   490  	// Both allocations should get registered
   491  	testutil.WaitForResult(func() (bool, error) {
   492  		c1.allocLock.RLock()
   493  		num := len(c1.allocs)
   494  		c1.allocLock.RUnlock()
   495  		return num == 2, nil
   496  	}, func(err error) {
   497  		t.Fatalf("err: %v", err)
   498  	})
   499  
   500  	// Delete one allocation
   501  	if err := state.DeleteEval(103, nil, []string{alloc1.ID}); err != nil {
   502  		t.Fatalf("err: %v", err)
   503  	}
   504  
   505  	// Update the other allocation. Have to make a copy because the allocs are
   506  	// shared in memory in the test and the modify index would be updated in the
   507  	// alloc runner.
   508  	alloc2_2 := alloc2.Copy()
   509  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   510  	if err := state.UpsertAllocs(104, []*structs.Allocation{alloc2_2}); err != nil {
   511  		t.Fatalf("err upserting stopped alloc: %v", err)
   512  	}
   513  
   514  	// One allocation should get GC'd and removed
   515  	testutil.WaitForResult(func() (bool, error) {
   516  		c1.allocLock.RLock()
   517  		num := len(c1.allocs)
   518  		c1.allocLock.RUnlock()
   519  		return num == 1, nil
   520  	}, func(err error) {
   521  		t.Fatalf("err: %v", err)
   522  	})
   523  
   524  	// One allocations should get updated
   525  	testutil.WaitForResult(func() (bool, error) {
   526  		c1.allocLock.RLock()
   527  		ar := c1.allocs[alloc2.ID]
   528  		c1.allocLock.RUnlock()
   529  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   530  	}, func(err error) {
   531  		t.Fatalf("err: %v", err)
   532  	})
   533  }
   534  
   535  func waitTilNodeReady(client *Client, t *testing.T) {
   536  	testutil.WaitForResult(func() (bool, error) {
   537  		n := client.Node()
   538  		if n.Status != structs.NodeStatusReady {
   539  			return false, fmt.Errorf("node not registered")
   540  		}
   541  		return true, nil
   542  	}, func(err error) {
   543  		t.Fatalf("err: %v", err)
   544  	})
   545  }
   546  
   547  func TestClient_SaveRestoreState(t *testing.T) {
   548  	t.Parallel()
   549  	ctestutil.ExecCompatible(t)
   550  	s1, _ := testServer(t, nil)
   551  	defer s1.Shutdown()
   552  	testutil.WaitForLeader(t, s1.RPC)
   553  
   554  	c1 := TestClient(t, func(c *config.Config) {
   555  		c.DevMode = false
   556  		c.RPCHandler = s1
   557  	})
   558  	defer c1.Shutdown()
   559  
   560  	// Wait until the node is ready
   561  	waitTilNodeReady(c1, t)
   562  
   563  	// Create mock allocations
   564  	job := mock.Job()
   565  	alloc1 := mock.Alloc()
   566  	alloc1.NodeID = c1.Node().ID
   567  	alloc1.Job = job
   568  	alloc1.JobID = job.ID
   569  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   570  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   571  	task.Config["run_for"] = "10s"
   572  
   573  	state := s1.State()
   574  	if err := state.UpsertJob(100, job); err != nil {
   575  		t.Fatal(err)
   576  	}
   577  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   578  		t.Fatal(err)
   579  	}
   580  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   581  		t.Fatalf("err: %v", err)
   582  	}
   583  
   584  	// Allocations should get registered
   585  	testutil.WaitForResult(func() (bool, error) {
   586  		c1.allocLock.RLock()
   587  		ar := c1.allocs[alloc1.ID]
   588  		c1.allocLock.RUnlock()
   589  		if ar == nil {
   590  			return false, fmt.Errorf("nil alloc runner")
   591  		}
   592  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   593  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   594  		}
   595  		return true, nil
   596  	}, func(err error) {
   597  		t.Fatalf("err: %v", err)
   598  	})
   599  
   600  	// Shutdown the client, saves state
   601  	if err := c1.Shutdown(); err != nil {
   602  		t.Fatalf("err: %v", err)
   603  	}
   604  
   605  	// Create a new client
   606  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   607  	catalog := consul.NewMockCatalog(logger)
   608  	mockService := newMockConsulServiceClient(t)
   609  	mockService.logger = logger
   610  	c2, err := NewClient(c1.config, catalog, mockService, logger)
   611  	if err != nil {
   612  		t.Fatalf("err: %v", err)
   613  	}
   614  	defer c2.Shutdown()
   615  
   616  	// Ensure the allocation is running
   617  	testutil.WaitForResult(func() (bool, error) {
   618  		c2.allocLock.RLock()
   619  		ar := c2.allocs[alloc1.ID]
   620  		c2.allocLock.RUnlock()
   621  		status := ar.Alloc().ClientStatus
   622  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   623  		if !alive {
   624  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   625  		}
   626  		return true, nil
   627  	}, func(err error) {
   628  		t.Fatalf("err: %v", err)
   629  	})
   630  
   631  	// Destroy all the allocations
   632  	for _, ar := range c2.getAllocRunners() {
   633  		ar.Destroy()
   634  	}
   635  
   636  	for _, ar := range c2.getAllocRunners() {
   637  		<-ar.WaitCh()
   638  	}
   639  }
   640  
   641  func TestClient_Init(t *testing.T) {
   642  	t.Parallel()
   643  	dir, err := ioutil.TempDir("", "nomad")
   644  	if err != nil {
   645  		t.Fatalf("err: %s", err)
   646  	}
   647  	defer os.RemoveAll(dir)
   648  	allocDir := filepath.Join(dir, "alloc")
   649  
   650  	client := &Client{
   651  		config: &config.Config{
   652  			AllocDir: allocDir,
   653  		},
   654  		logger: log.New(os.Stderr, "", log.LstdFlags),
   655  	}
   656  	if err := client.init(); err != nil {
   657  		t.Fatalf("err: %s", err)
   658  	}
   659  
   660  	if _, err := os.Stat(allocDir); err != nil {
   661  		t.Fatalf("err: %s", err)
   662  	}
   663  }
   664  
   665  func TestClient_BlockedAllocations(t *testing.T) {
   666  	t.Parallel()
   667  	s1, _ := testServer(t, nil)
   668  	defer s1.Shutdown()
   669  	testutil.WaitForLeader(t, s1.RPC)
   670  
   671  	c1 := TestClient(t, func(c *config.Config) {
   672  		c.RPCHandler = s1
   673  	})
   674  	defer c1.Shutdown()
   675  
   676  	// Wait for the node to be ready
   677  	state := s1.State()
   678  	testutil.WaitForResult(func() (bool, error) {
   679  		ws := memdb.NewWatchSet()
   680  		out, err := state.NodeByID(ws, c1.Node().ID)
   681  		if err != nil {
   682  			return false, err
   683  		}
   684  		if out == nil || out.Status != structs.NodeStatusReady {
   685  			return false, fmt.Errorf("bad node: %#v", out)
   686  		}
   687  		return true, nil
   688  	}, func(err error) {
   689  		t.Fatalf("err: %v", err)
   690  	})
   691  
   692  	// Add an allocation
   693  	alloc := mock.Alloc()
   694  	alloc.NodeID = c1.Node().ID
   695  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   696  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   697  		"kill_after":  "1s",
   698  		"run_for":     "100s",
   699  		"exit_code":   0,
   700  		"exit_signal": 0,
   701  		"exit_err":    "",
   702  	}
   703  
   704  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   705  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   706  
   707  	// Wait until the client downloads and starts the allocation
   708  	testutil.WaitForResult(func() (bool, error) {
   709  		ws := memdb.NewWatchSet()
   710  		out, err := state.AllocByID(ws, alloc.ID)
   711  		if err != nil {
   712  			return false, err
   713  		}
   714  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   715  			return false, fmt.Errorf("bad alloc: %#v", out)
   716  		}
   717  		return true, nil
   718  	}, func(err error) {
   719  		t.Fatalf("err: %v", err)
   720  	})
   721  
   722  	// Add a new chained alloc
   723  	alloc2 := alloc.Copy()
   724  	alloc2.ID = uuid.Generate()
   725  	alloc2.Job = alloc.Job
   726  	alloc2.JobID = alloc.JobID
   727  	alloc2.PreviousAllocation = alloc.ID
   728  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   729  		t.Fatalf("err: %v", err)
   730  	}
   731  
   732  	// Enusre that the chained allocation is being tracked as blocked
   733  	testutil.WaitForResult(func() (bool, error) {
   734  		ar := c1.getAllocRunners()[alloc2.ID]
   735  		if ar == nil {
   736  			return false, fmt.Errorf("alloc 2's alloc runner does not exist")
   737  		}
   738  		if !ar.IsWaiting() {
   739  			return false, fmt.Errorf("alloc 2 is not blocked")
   740  		}
   741  		return true, nil
   742  	}, func(err error) {
   743  		t.Fatalf("err: %v", err)
   744  	})
   745  
   746  	// Change the desired state of the parent alloc to stop
   747  	alloc1 := alloc.Copy()
   748  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   749  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   750  		t.Fatalf("err: %v", err)
   751  	}
   752  
   753  	// Ensure that there are no blocked allocations
   754  	testutil.WaitForResult(func() (bool, error) {
   755  		for id, ar := range c1.getAllocRunners() {
   756  			if ar.IsWaiting() {
   757  				return false, fmt.Errorf("%q still blocked", id)
   758  			}
   759  			if ar.IsMigrating() {
   760  				return false, fmt.Errorf("%q still migrating", id)
   761  			}
   762  		}
   763  		return true, nil
   764  	}, func(err error) {
   765  		t.Fatalf("err: %v", err)
   766  	})
   767  
   768  	// Destroy all the allocations
   769  	for _, ar := range c1.getAllocRunners() {
   770  		ar.Destroy()
   771  	}
   772  
   773  	for _, ar := range c1.getAllocRunners() {
   774  		<-ar.WaitCh()
   775  	}
   776  }
   777  
   778  func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) {
   779  	t.Parallel()
   780  	assert := assert.New(t)
   781  
   782  	c := TestClient(t, func(c *config.Config) {
   783  		c.ACLEnabled = true
   784  	})
   785  	defer c.Shutdown()
   786  
   787  	alloc := mock.Alloc()
   788  	validToken, err := structs.GenerateMigrateToken(alloc.ID, c.secretNodeID())
   789  	assert.Nil(err)
   790  
   791  	assert.Equal(c.ValidateMigrateToken(alloc.ID, validToken), true)
   792  }
   793  
   794  func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) {
   795  	t.Parallel()
   796  	assert := assert.New(t)
   797  
   798  	c := TestClient(t, func(c *config.Config) {
   799  		c.ACLEnabled = true
   800  	})
   801  	defer c.Shutdown()
   802  
   803  	assert.Equal(c.ValidateMigrateToken("", ""), false)
   804  
   805  	alloc := mock.Alloc()
   806  	assert.Equal(c.ValidateMigrateToken(alloc.ID, alloc.ID), false)
   807  	assert.Equal(c.ValidateMigrateToken(alloc.ID, ""), false)
   808  }
   809  
   810  func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) {
   811  	t.Parallel()
   812  	assert := assert.New(t)
   813  
   814  	c := TestClient(t, func(c *config.Config) {})
   815  	defer c.Shutdown()
   816  
   817  	assert.Equal(c.ValidateMigrateToken("", ""), true)
   818  }
   819  
   820  func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) {
   821  	t.Parallel()
   822  	assert := assert.New(t)
   823  
   824  	s1, addr := testServer(t, func(c *nomad.Config) {
   825  		c.Region = "regionFoo"
   826  	})
   827  	defer s1.Shutdown()
   828  	testutil.WaitForLeader(t, s1.RPC)
   829  
   830  	const (
   831  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   832  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   833  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   834  	)
   835  
   836  	c1 := TestClient(t, func(c *config.Config) {
   837  		c.Servers = []string{addr}
   838  	})
   839  	defer c1.Shutdown()
   840  
   841  	// Registering a node over plaintext should succeed
   842  	{
   843  		req := structs.NodeSpecificRequest{
   844  			NodeID:       c1.Node().ID,
   845  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   846  		}
   847  
   848  		testutil.WaitForResult(func() (bool, error) {
   849  			var out structs.SingleNodeResponse
   850  			err := c1.RPC("Node.GetNode", &req, &out)
   851  			if err != nil {
   852  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
   853  			}
   854  			return true, nil
   855  		},
   856  			func(err error) {
   857  				t.Fatalf(err.Error())
   858  			},
   859  		)
   860  	}
   861  
   862  	newConfig := &nconfig.TLSConfig{
   863  		EnableHTTP:           true,
   864  		EnableRPC:            true,
   865  		VerifyServerHostname: true,
   866  		CAFile:               cafile,
   867  		CertFile:             foocert,
   868  		KeyFile:              fookey,
   869  	}
   870  
   871  	err := c1.reloadTLSConnections(newConfig)
   872  	assert.Nil(err)
   873  
   874  	// Registering a node over plaintext should fail after the node has upgraded
   875  	// to TLS
   876  	{
   877  		req := structs.NodeSpecificRequest{
   878  			NodeID:       c1.Node().ID,
   879  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   880  		}
   881  		testutil.WaitForResult(func() (bool, error) {
   882  			var out structs.SingleNodeResponse
   883  			err := c1.RPC("Node.GetNode", &req, &out)
   884  			if err == nil {
   885  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", err)
   886  			}
   887  			return true, nil
   888  		},
   889  			func(err error) {
   890  				t.Fatalf(err.Error())
   891  			},
   892  		)
   893  	}
   894  }
   895  
   896  func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) {
   897  	t.Parallel()
   898  	assert := assert.New(t)
   899  
   900  	s1, addr := testServer(t, func(c *nomad.Config) {
   901  		c.Region = "regionFoo"
   902  	})
   903  	defer s1.Shutdown()
   904  	testutil.WaitForLeader(t, s1.RPC)
   905  
   906  	const (
   907  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   908  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   909  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   910  	)
   911  
   912  	c1 := TestClient(t, func(c *config.Config) {
   913  		c.Servers = []string{addr}
   914  		c.TLSConfig = &nconfig.TLSConfig{
   915  			EnableHTTP:           true,
   916  			EnableRPC:            true,
   917  			VerifyServerHostname: true,
   918  			CAFile:               cafile,
   919  			CertFile:             foocert,
   920  			KeyFile:              fookey,
   921  		}
   922  	})
   923  	defer c1.Shutdown()
   924  
   925  	// assert that when one node is running in encrypted mode, a RPC request to a
   926  	// node running in plaintext mode should fail
   927  	{
   928  		req := structs.NodeSpecificRequest{
   929  			NodeID:       c1.Node().ID,
   930  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   931  		}
   932  		testutil.WaitForResult(func() (bool, error) {
   933  			var out structs.SingleNodeResponse
   934  			err := c1.RPC("Node.GetNode", &req, &out)
   935  			if err == nil {
   936  				return false, fmt.Errorf("client RPC succeeded when it should have failed :\n%+v", err)
   937  			}
   938  			return true, nil
   939  		}, func(err error) {
   940  			t.Fatalf(err.Error())
   941  		},
   942  		)
   943  	}
   944  
   945  	newConfig := &nconfig.TLSConfig{}
   946  
   947  	err := c1.reloadTLSConnections(newConfig)
   948  	assert.Nil(err)
   949  
   950  	// assert that when both nodes are in plaintext mode, a RPC request should
   951  	// succeed
   952  	{
   953  		req := structs.NodeSpecificRequest{
   954  			NodeID:       c1.Node().ID,
   955  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   956  		}
   957  		testutil.WaitForResult(func() (bool, error) {
   958  			var out structs.SingleNodeResponse
   959  			err := c1.RPC("Node.GetNode", &req, &out)
   960  			if err != nil {
   961  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
   962  			}
   963  			return true, nil
   964  		}, func(err error) {
   965  			t.Fatalf(err.Error())
   966  		},
   967  		)
   968  	}
   969  }
   970  
   971  // TestClient_ServerList tests client methods that interact with the internal
   972  // nomad server list.
   973  func TestClient_ServerList(t *testing.T) {
   974  	t.Parallel()
   975  	client := TestClient(t, func(c *config.Config) {})
   976  
   977  	if s := client.GetServers(); len(s) != 0 {
   978  		t.Fatalf("expected server lit to be empty but found: %+q", s)
   979  	}
   980  	if err := client.SetServers(nil); err != noServersErr {
   981  		t.Fatalf("expected setting an empty list to return a 'no servers' error but received %v", err)
   982  	}
   983  	if err := client.SetServers([]string{"123.456.13123.123.13:80"}); err == nil {
   984  		t.Fatalf("expected setting a bad server to return an error")
   985  	}
   986  	if err := client.SetServers([]string{"123.456.13123.123.13:80", "127.0.0.1:1234", "127.0.0.1"}); err == nil {
   987  		t.Fatalf("expected setting at least one good server to succeed but received: %v", err)
   988  	}
   989  	s := client.GetServers()
   990  	if len(s) != 0 {
   991  		t.Fatalf("expected 2 servers but received: %+q", s)
   992  	}
   993  }