github.com/emate/nomad@v0.8.2-wo-binpacking/client/client_test.go (about)

     1  package client
     2  
     3  import (
     4  	"fmt"
     5  	"io/ioutil"
     6  	"log"
     7  	"os"
     8  	"path/filepath"
     9  	"testing"
    10  	"time"
    11  
    12  	memdb "github.com/hashicorp/go-memdb"
    13  	"github.com/hashicorp/nomad/client/config"
    14  	"github.com/hashicorp/nomad/client/driver"
    15  	"github.com/hashicorp/nomad/command/agent/consul"
    16  	"github.com/hashicorp/nomad/helper/uuid"
    17  	"github.com/hashicorp/nomad/nomad"
    18  	"github.com/hashicorp/nomad/nomad/mock"
    19  	"github.com/hashicorp/nomad/nomad/structs"
    20  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    21  	"github.com/hashicorp/nomad/testutil"
    22  	"github.com/stretchr/testify/assert"
    23  
    24  	ctestutil "github.com/hashicorp/nomad/client/testutil"
    25  )
    26  
    27  func testACLServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string, *structs.ACLToken) {
    28  	server, token := nomad.TestACLServer(t, cb)
    29  	return server, server.GetConfig().RPCAddr.String(), token
    30  }
    31  
    32  func testServer(t *testing.T, cb func(*nomad.Config)) (*nomad.Server, string) {
    33  	server := nomad.TestServer(t, cb)
    34  	return server, server.GetConfig().RPCAddr.String()
    35  }
    36  
    37  func TestClient_StartStop(t *testing.T) {
    38  	t.Parallel()
    39  	client := TestClient(t, nil)
    40  	if err := client.Shutdown(); err != nil {
    41  		t.Fatalf("err: %v", err)
    42  	}
    43  }
    44  
    45  // Certain labels for metrics are dependant on client initial setup. This tests
    46  // that the client has properly initialized before we assign values to labels
    47  func TestClient_BaseLabels(t *testing.T) {
    48  	t.Parallel()
    49  	assert := assert.New(t)
    50  
    51  	client := TestClient(t, nil)
    52  	if err := client.Shutdown(); err != nil {
    53  		t.Fatalf("err: %v", err)
    54  	}
    55  
    56  	// directly invoke this function, as otherwise this will fail on a CI build
    57  	// due to a race condition
    58  	client.emitStats()
    59  
    60  	baseLabels := client.baseLabels
    61  	assert.NotEqual(0, len(baseLabels))
    62  
    63  	nodeID := client.Node().ID
    64  	for _, e := range baseLabels {
    65  		if e.Name == "node_id" {
    66  			assert.Equal(nodeID, e.Value)
    67  		}
    68  	}
    69  }
    70  
    71  func TestClient_RPC(t *testing.T) {
    72  	t.Parallel()
    73  	s1, addr := testServer(t, nil)
    74  	defer s1.Shutdown()
    75  
    76  	c1 := TestClient(t, func(c *config.Config) {
    77  		c.Servers = []string{addr}
    78  	})
    79  	defer c1.Shutdown()
    80  
    81  	// RPC should succeed
    82  	testutil.WaitForResult(func() (bool, error) {
    83  		var out struct{}
    84  		err := c1.RPC("Status.Ping", struct{}{}, &out)
    85  		return err == nil, err
    86  	}, func(err error) {
    87  		t.Fatalf("err: %v", err)
    88  	})
    89  }
    90  
    91  func TestClient_RPC_FireRetryWatchers(t *testing.T) {
    92  	t.Parallel()
    93  	s1, addr := testServer(t, nil)
    94  	defer s1.Shutdown()
    95  
    96  	c1 := TestClient(t, func(c *config.Config) {
    97  		c.Servers = []string{addr}
    98  	})
    99  	defer c1.Shutdown()
   100  
   101  	watcher := c1.rpcRetryWatcher()
   102  
   103  	// RPC should succeed
   104  	testutil.WaitForResult(func() (bool, error) {
   105  		var out struct{}
   106  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   107  		return err == nil, err
   108  	}, func(err error) {
   109  		t.Fatalf("err: %v", err)
   110  	})
   111  
   112  	select {
   113  	case <-watcher:
   114  	default:
   115  		t.Fatal("watcher should be fired")
   116  	}
   117  }
   118  
   119  func TestClient_RPC_Passthrough(t *testing.T) {
   120  	t.Parallel()
   121  	s1, _ := testServer(t, nil)
   122  	defer s1.Shutdown()
   123  
   124  	c1 := TestClient(t, func(c *config.Config) {
   125  		c.RPCHandler = s1
   126  	})
   127  	defer c1.Shutdown()
   128  
   129  	// RPC should succeed
   130  	testutil.WaitForResult(func() (bool, error) {
   131  		var out struct{}
   132  		err := c1.RPC("Status.Ping", struct{}{}, &out)
   133  		return err == nil, err
   134  	}, func(err error) {
   135  		t.Fatalf("err: %v", err)
   136  	})
   137  }
   138  
   139  func TestClient_Fingerprint(t *testing.T) {
   140  	t.Parallel()
   141  
   142  	c := TestClient(t, nil)
   143  	defer c.Shutdown()
   144  
   145  	// Ensure we are fingerprinting
   146  	testutil.WaitForResult(func() (bool, error) {
   147  		node := c.Node()
   148  		if _, ok := node.Attributes["kernel.name"]; !ok {
   149  			return false, fmt.Errorf("Expected value for kernel.name")
   150  		}
   151  		if _, ok := node.Attributes["cpu.arch"]; !ok {
   152  			return false, fmt.Errorf("Expected value for cpu.arch")
   153  		}
   154  		return true, nil
   155  	}, func(err error) {
   156  		t.Fatalf("err: %v", err)
   157  	})
   158  }
   159  
   160  func TestClient_Fingerprint_Periodic(t *testing.T) {
   161  	t.Parallel()
   162  
   163  	c1 := TestClient(t, func(c *config.Config) {
   164  		c.Options = map[string]string{
   165  			driver.ShutdownPeriodicAfter:    "true",
   166  			driver.ShutdownPeriodicDuration: "1",
   167  		}
   168  	})
   169  	defer c1.Shutdown()
   170  
   171  	node := c1.config.Node
   172  	{
   173  		// Ensure the mock driver is registered on the client
   174  		testutil.WaitForResult(func() (bool, error) {
   175  			c1.configLock.Lock()
   176  			defer c1.configLock.Unlock()
   177  
   178  			// assert that the driver is set on the node attributes
   179  			mockDriverInfoAttr := node.Attributes["driver.mock_driver"]
   180  			if mockDriverInfoAttr == "" {
   181  				return false, fmt.Errorf("mock driver is empty when it should be set on the node attributes")
   182  			}
   183  
   184  			mockDriverInfo := node.Drivers["mock_driver"]
   185  
   186  			// assert that the Driver information for the node is also set correctly
   187  			if mockDriverInfo == nil {
   188  				return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers")
   189  			}
   190  			if !mockDriverInfo.Detected {
   191  				return false, fmt.Errorf("mock driver should be set as detected")
   192  			}
   193  			if !mockDriverInfo.Healthy {
   194  				return false, fmt.Errorf("mock driver should be set as healthy")
   195  			}
   196  			if mockDriverInfo.HealthDescription == "" {
   197  				return false, fmt.Errorf("mock driver description should not be empty")
   198  			}
   199  			return true, nil
   200  		}, func(err error) {
   201  			t.Fatalf("err: %v", err)
   202  		})
   203  	}
   204  
   205  	{
   206  		testutil.WaitForResult(func() (bool, error) {
   207  			c1.configLock.Lock()
   208  			defer c1.configLock.Unlock()
   209  			mockDriverInfo := node.Drivers["mock_driver"]
   210  			// assert that the Driver information for the node is also set correctly
   211  			if mockDriverInfo == nil {
   212  				return false, fmt.Errorf("mock driver is nil when it should be set on node Drivers")
   213  			}
   214  			if mockDriverInfo.Detected {
   215  				return false, fmt.Errorf("mock driver should be set as detected")
   216  			}
   217  			if mockDriverInfo.Healthy {
   218  				return false, fmt.Errorf("mock driver should be set as healthy")
   219  			}
   220  			if mockDriverInfo.HealthDescription == "" {
   221  				return false, fmt.Errorf("mock driver description should not be empty")
   222  			}
   223  			return true, nil
   224  		}, func(err error) {
   225  			t.Fatalf("err: %v", err)
   226  		})
   227  	}
   228  }
   229  
   230  // TestClient_MixedTLS asserts that when a server is running with TLS enabled
   231  // it will reject any RPC connections from clients that lack TLS. See #2525
   232  func TestClient_MixedTLS(t *testing.T) {
   233  	t.Parallel()
   234  	const (
   235  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   236  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   237  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   238  	)
   239  	s1, addr := testServer(t, func(c *nomad.Config) {
   240  		c.TLSConfig = &nconfig.TLSConfig{
   241  			EnableHTTP:           true,
   242  			EnableRPC:            true,
   243  			VerifyServerHostname: true,
   244  			CAFile:               cafile,
   245  			CertFile:             foocert,
   246  			KeyFile:              fookey,
   247  		}
   248  	})
   249  	defer s1.Shutdown()
   250  	testutil.WaitForLeader(t, s1.RPC)
   251  
   252  	c1 := TestClient(t, func(c *config.Config) {
   253  		c.Servers = []string{addr}
   254  	})
   255  	defer c1.Shutdown()
   256  
   257  	req := structs.NodeSpecificRequest{
   258  		NodeID:       c1.Node().ID,
   259  		QueryOptions: structs.QueryOptions{Region: "global"},
   260  	}
   261  	var out structs.SingleNodeResponse
   262  	testutil.AssertUntil(100*time.Millisecond,
   263  		func() (bool, error) {
   264  			err := c1.RPC("Node.GetNode", &req, &out)
   265  			if err == nil {
   266  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   267  			}
   268  			return true, nil
   269  		},
   270  		func(err error) {
   271  			t.Fatalf(err.Error())
   272  		},
   273  	)
   274  }
   275  
   276  // TestClient_BadTLS asserts that when a client and server are running with TLS
   277  // enabled -- but their certificates are signed by different CAs -- they're
   278  // unable to communicate.
   279  func TestClient_BadTLS(t *testing.T) {
   280  	t.Parallel()
   281  	const (
   282  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   283  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   284  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   285  		badca   = "../helper/tlsutil/testdata/ca-bad.pem"
   286  		badcert = "../helper/tlsutil/testdata/nomad-bad.pem"
   287  		badkey  = "../helper/tlsutil/testdata/nomad-bad-key.pem"
   288  	)
   289  	s1, addr := testServer(t, func(c *nomad.Config) {
   290  		c.TLSConfig = &nconfig.TLSConfig{
   291  			EnableHTTP:           true,
   292  			EnableRPC:            true,
   293  			VerifyServerHostname: true,
   294  			CAFile:               cafile,
   295  			CertFile:             foocert,
   296  			KeyFile:              fookey,
   297  		}
   298  	})
   299  	defer s1.Shutdown()
   300  	testutil.WaitForLeader(t, s1.RPC)
   301  
   302  	c1 := TestClient(t, func(c *config.Config) {
   303  		c.Servers = []string{addr}
   304  		c.TLSConfig = &nconfig.TLSConfig{
   305  			EnableHTTP:           true,
   306  			EnableRPC:            true,
   307  			VerifyServerHostname: true,
   308  			CAFile:               badca,
   309  			CertFile:             badcert,
   310  			KeyFile:              badkey,
   311  		}
   312  	})
   313  	defer c1.Shutdown()
   314  
   315  	req := structs.NodeSpecificRequest{
   316  		NodeID:       c1.Node().ID,
   317  		QueryOptions: structs.QueryOptions{Region: "global"},
   318  	}
   319  	var out structs.SingleNodeResponse
   320  	testutil.AssertUntil(100*time.Millisecond,
   321  		func() (bool, error) {
   322  			err := c1.RPC("Node.GetNode", &req, &out)
   323  			if err == nil {
   324  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", out)
   325  			}
   326  			return true, nil
   327  		},
   328  		func(err error) {
   329  			t.Fatalf(err.Error())
   330  		},
   331  	)
   332  }
   333  
   334  func TestClient_Register(t *testing.T) {
   335  	t.Parallel()
   336  	s1, _ := testServer(t, nil)
   337  	defer s1.Shutdown()
   338  	testutil.WaitForLeader(t, s1.RPC)
   339  
   340  	c1 := TestClient(t, func(c *config.Config) {
   341  		c.RPCHandler = s1
   342  	})
   343  	defer c1.Shutdown()
   344  
   345  	req := structs.NodeSpecificRequest{
   346  		NodeID:       c1.Node().ID,
   347  		QueryOptions: structs.QueryOptions{Region: "global"},
   348  	}
   349  	var out structs.SingleNodeResponse
   350  
   351  	// Register should succeed
   352  	testutil.WaitForResult(func() (bool, error) {
   353  		err := s1.RPC("Node.GetNode", &req, &out)
   354  		if err != nil {
   355  			return false, err
   356  		}
   357  		if out.Node == nil {
   358  			return false, fmt.Errorf("missing reg")
   359  		}
   360  		return out.Node.ID == req.NodeID, nil
   361  	}, func(err error) {
   362  		t.Fatalf("err: %v", err)
   363  	})
   364  }
   365  
   366  func TestClient_Heartbeat(t *testing.T) {
   367  	t.Parallel()
   368  	s1, _ := testServer(t, func(c *nomad.Config) {
   369  		c.MinHeartbeatTTL = 50 * time.Millisecond
   370  	})
   371  	defer s1.Shutdown()
   372  	testutil.WaitForLeader(t, s1.RPC)
   373  
   374  	c1 := TestClient(t, func(c *config.Config) {
   375  		c.RPCHandler = s1
   376  	})
   377  	defer c1.Shutdown()
   378  
   379  	req := structs.NodeSpecificRequest{
   380  		NodeID:       c1.Node().ID,
   381  		QueryOptions: structs.QueryOptions{Region: "global"},
   382  	}
   383  	var out structs.SingleNodeResponse
   384  
   385  	// Register should succeed
   386  	testutil.WaitForResult(func() (bool, error) {
   387  		err := s1.RPC("Node.GetNode", &req, &out)
   388  		if err != nil {
   389  			return false, err
   390  		}
   391  		if out.Node == nil {
   392  			return false, fmt.Errorf("missing reg")
   393  		}
   394  		return out.Node.Status == structs.NodeStatusReady, nil
   395  	}, func(err error) {
   396  		t.Fatalf("err: %v", err)
   397  	})
   398  }
   399  
   400  func TestClient_UpdateAllocStatus(t *testing.T) {
   401  	t.Parallel()
   402  	s1, _ := testServer(t, nil)
   403  	defer s1.Shutdown()
   404  	testutil.WaitForLeader(t, s1.RPC)
   405  
   406  	c1 := TestClient(t, func(c *config.Config) {
   407  		c.RPCHandler = s1
   408  	})
   409  	defer c1.Shutdown()
   410  
   411  	// Wait until the node is ready
   412  	waitTilNodeReady(c1, t)
   413  
   414  	job := mock.Job()
   415  	alloc := mock.Alloc()
   416  	alloc.NodeID = c1.Node().ID
   417  	alloc.Job = job
   418  	alloc.JobID = job.ID
   419  	originalStatus := "foo"
   420  	alloc.ClientStatus = originalStatus
   421  
   422  	// Insert at zero so they are pulled
   423  	state := s1.State()
   424  	if err := state.UpsertJob(0, job); err != nil {
   425  		t.Fatal(err)
   426  	}
   427  	if err := state.UpsertJobSummary(100, mock.JobSummary(alloc.JobID)); err != nil {
   428  		t.Fatal(err)
   429  	}
   430  	state.UpsertAllocs(101, []*structs.Allocation{alloc})
   431  
   432  	testutil.WaitForResult(func() (bool, error) {
   433  		ws := memdb.NewWatchSet()
   434  		out, err := state.AllocByID(ws, alloc.ID)
   435  		if err != nil {
   436  			return false, err
   437  		}
   438  		if out == nil {
   439  			return false, fmt.Errorf("no such alloc")
   440  		}
   441  		if out.ClientStatus == originalStatus {
   442  			return false, fmt.Errorf("Alloc client status not updated; got %v", out.ClientStatus)
   443  		}
   444  		return true, nil
   445  	}, func(err error) {
   446  		t.Fatalf("err: %v", err)
   447  	})
   448  }
   449  
   450  func TestClient_WatchAllocs(t *testing.T) {
   451  	t.Parallel()
   452  	ctestutil.ExecCompatible(t)
   453  	s1, _ := testServer(t, nil)
   454  	defer s1.Shutdown()
   455  	testutil.WaitForLeader(t, s1.RPC)
   456  
   457  	c1 := TestClient(t, func(c *config.Config) {
   458  		c.RPCHandler = s1
   459  	})
   460  	defer c1.Shutdown()
   461  
   462  	// Wait until the node is ready
   463  	waitTilNodeReady(c1, t)
   464  
   465  	// Create mock allocations
   466  	job := mock.Job()
   467  	alloc1 := mock.Alloc()
   468  	alloc1.JobID = job.ID
   469  	alloc1.Job = job
   470  	alloc1.NodeID = c1.Node().ID
   471  	alloc2 := mock.Alloc()
   472  	alloc2.NodeID = c1.Node().ID
   473  	alloc2.JobID = job.ID
   474  	alloc2.Job = job
   475  
   476  	state := s1.State()
   477  	if err := state.UpsertJob(100, job); err != nil {
   478  		t.Fatal(err)
   479  	}
   480  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   481  		t.Fatal(err)
   482  	}
   483  	err := state.UpsertAllocs(102, []*structs.Allocation{alloc1, alloc2})
   484  	if err != nil {
   485  		t.Fatalf("err: %v", err)
   486  	}
   487  
   488  	// Both allocations should get registered
   489  	testutil.WaitForResult(func() (bool, error) {
   490  		c1.allocLock.RLock()
   491  		num := len(c1.allocs)
   492  		c1.allocLock.RUnlock()
   493  		return num == 2, nil
   494  	}, func(err error) {
   495  		t.Fatalf("err: %v", err)
   496  	})
   497  
   498  	// Delete one allocation
   499  	if err := state.DeleteEval(103, nil, []string{alloc1.ID}); err != nil {
   500  		t.Fatalf("err: %v", err)
   501  	}
   502  
   503  	// Update the other allocation. Have to make a copy because the allocs are
   504  	// shared in memory in the test and the modify index would be updated in the
   505  	// alloc runner.
   506  	alloc2_2 := alloc2.Copy()
   507  	alloc2_2.DesiredStatus = structs.AllocDesiredStatusStop
   508  	if err := state.UpsertAllocs(104, []*structs.Allocation{alloc2_2}); err != nil {
   509  		t.Fatalf("err upserting stopped alloc: %v", err)
   510  	}
   511  
   512  	// One allocation should get GC'd and removed
   513  	testutil.WaitForResult(func() (bool, error) {
   514  		c1.allocLock.RLock()
   515  		num := len(c1.allocs)
   516  		c1.allocLock.RUnlock()
   517  		return num == 1, nil
   518  	}, func(err error) {
   519  		t.Fatalf("err: %v", err)
   520  	})
   521  
   522  	// One allocations should get updated
   523  	testutil.WaitForResult(func() (bool, error) {
   524  		c1.allocLock.RLock()
   525  		ar := c1.allocs[alloc2.ID]
   526  		c1.allocLock.RUnlock()
   527  		return ar.Alloc().DesiredStatus == structs.AllocDesiredStatusStop, nil
   528  	}, func(err error) {
   529  		t.Fatalf("err: %v", err)
   530  	})
   531  }
   532  
   533  func waitTilNodeReady(client *Client, t *testing.T) {
   534  	testutil.WaitForResult(func() (bool, error) {
   535  		n := client.Node()
   536  		if n.Status != structs.NodeStatusReady {
   537  			return false, fmt.Errorf("node not registered")
   538  		}
   539  		return true, nil
   540  	}, func(err error) {
   541  		t.Fatalf("err: %v", err)
   542  	})
   543  }
   544  
   545  func TestClient_SaveRestoreState(t *testing.T) {
   546  	t.Parallel()
   547  	ctestutil.ExecCompatible(t)
   548  	s1, _ := testServer(t, nil)
   549  	defer s1.Shutdown()
   550  	testutil.WaitForLeader(t, s1.RPC)
   551  
   552  	c1 := TestClient(t, func(c *config.Config) {
   553  		c.DevMode = false
   554  		c.RPCHandler = s1
   555  	})
   556  	defer c1.Shutdown()
   557  
   558  	// Wait until the node is ready
   559  	waitTilNodeReady(c1, t)
   560  
   561  	// Create mock allocations
   562  	job := mock.Job()
   563  	alloc1 := mock.Alloc()
   564  	alloc1.NodeID = c1.Node().ID
   565  	alloc1.Job = job
   566  	alloc1.JobID = job.ID
   567  	alloc1.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   568  	task := alloc1.Job.TaskGroups[0].Tasks[0]
   569  	task.Config["run_for"] = "10s"
   570  
   571  	state := s1.State()
   572  	if err := state.UpsertJob(100, job); err != nil {
   573  		t.Fatal(err)
   574  	}
   575  	if err := state.UpsertJobSummary(101, mock.JobSummary(alloc1.JobID)); err != nil {
   576  		t.Fatal(err)
   577  	}
   578  	if err := state.UpsertAllocs(102, []*structs.Allocation{alloc1}); err != nil {
   579  		t.Fatalf("err: %v", err)
   580  	}
   581  
   582  	// Allocations should get registered
   583  	testutil.WaitForResult(func() (bool, error) {
   584  		c1.allocLock.RLock()
   585  		ar := c1.allocs[alloc1.ID]
   586  		c1.allocLock.RUnlock()
   587  		if ar == nil {
   588  			return false, fmt.Errorf("nil alloc runner")
   589  		}
   590  		if ar.Alloc().ClientStatus != structs.AllocClientStatusRunning {
   591  			return false, fmt.Errorf("client status: got %v; want %v", ar.Alloc().ClientStatus, structs.AllocClientStatusRunning)
   592  		}
   593  		return true, nil
   594  	}, func(err error) {
   595  		t.Fatalf("err: %v", err)
   596  	})
   597  
   598  	// Shutdown the client, saves state
   599  	if err := c1.Shutdown(); err != nil {
   600  		t.Fatalf("err: %v", err)
   601  	}
   602  
   603  	// Create a new client
   604  	logger := log.New(c1.config.LogOutput, "", log.LstdFlags)
   605  	catalog := consul.NewMockCatalog(logger)
   606  	mockService := newMockConsulServiceClient(t)
   607  	mockService.logger = logger
   608  	c2, err := NewClient(c1.config, catalog, mockService, logger)
   609  	if err != nil {
   610  		t.Fatalf("err: %v", err)
   611  	}
   612  	defer c2.Shutdown()
   613  
   614  	// Ensure the allocation is running
   615  	testutil.WaitForResult(func() (bool, error) {
   616  		c2.allocLock.RLock()
   617  		ar := c2.allocs[alloc1.ID]
   618  		c2.allocLock.RUnlock()
   619  		status := ar.Alloc().ClientStatus
   620  		alive := status == structs.AllocClientStatusRunning || status == structs.AllocClientStatusPending
   621  		if !alive {
   622  			return false, fmt.Errorf("incorrect client status: %#v", ar.Alloc())
   623  		}
   624  		return true, nil
   625  	}, func(err error) {
   626  		t.Fatalf("err: %v", err)
   627  	})
   628  
   629  	// Destroy all the allocations
   630  	for _, ar := range c2.getAllocRunners() {
   631  		ar.Destroy()
   632  	}
   633  
   634  	for _, ar := range c2.getAllocRunners() {
   635  		<-ar.WaitCh()
   636  	}
   637  }
   638  
   639  func TestClient_Init(t *testing.T) {
   640  	t.Parallel()
   641  	dir, err := ioutil.TempDir("", "nomad")
   642  	if err != nil {
   643  		t.Fatalf("err: %s", err)
   644  	}
   645  	defer os.RemoveAll(dir)
   646  	allocDir := filepath.Join(dir, "alloc")
   647  
   648  	client := &Client{
   649  		config: &config.Config{
   650  			AllocDir: allocDir,
   651  		},
   652  		logger: log.New(os.Stderr, "", log.LstdFlags),
   653  	}
   654  	if err := client.init(); err != nil {
   655  		t.Fatalf("err: %s", err)
   656  	}
   657  
   658  	if _, err := os.Stat(allocDir); err != nil {
   659  		t.Fatalf("err: %s", err)
   660  	}
   661  }
   662  
   663  func TestClient_BlockedAllocations(t *testing.T) {
   664  	t.Parallel()
   665  	s1, _ := testServer(t, nil)
   666  	defer s1.Shutdown()
   667  	testutil.WaitForLeader(t, s1.RPC)
   668  
   669  	c1 := TestClient(t, func(c *config.Config) {
   670  		c.RPCHandler = s1
   671  	})
   672  	defer c1.Shutdown()
   673  
   674  	// Wait for the node to be ready
   675  	state := s1.State()
   676  	testutil.WaitForResult(func() (bool, error) {
   677  		ws := memdb.NewWatchSet()
   678  		out, err := state.NodeByID(ws, c1.Node().ID)
   679  		if err != nil {
   680  			return false, err
   681  		}
   682  		if out == nil || out.Status != structs.NodeStatusReady {
   683  			return false, fmt.Errorf("bad node: %#v", out)
   684  		}
   685  		return true, nil
   686  	}, func(err error) {
   687  		t.Fatalf("err: %v", err)
   688  	})
   689  
   690  	// Add an allocation
   691  	alloc := mock.Alloc()
   692  	alloc.NodeID = c1.Node().ID
   693  	alloc.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   694  	alloc.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   695  		"kill_after":  "1s",
   696  		"run_for":     "100s",
   697  		"exit_code":   0,
   698  		"exit_signal": 0,
   699  		"exit_err":    "",
   700  	}
   701  
   702  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
   703  	state.UpsertAllocs(100, []*structs.Allocation{alloc})
   704  
   705  	// Wait until the client downloads and starts the allocation
   706  	testutil.WaitForResult(func() (bool, error) {
   707  		ws := memdb.NewWatchSet()
   708  		out, err := state.AllocByID(ws, alloc.ID)
   709  		if err != nil {
   710  			return false, err
   711  		}
   712  		if out == nil || out.ClientStatus != structs.AllocClientStatusRunning {
   713  			return false, fmt.Errorf("bad alloc: %#v", out)
   714  		}
   715  		return true, nil
   716  	}, func(err error) {
   717  		t.Fatalf("err: %v", err)
   718  	})
   719  
   720  	// Add a new chained alloc
   721  	alloc2 := alloc.Copy()
   722  	alloc2.ID = uuid.Generate()
   723  	alloc2.Job = alloc.Job
   724  	alloc2.JobID = alloc.JobID
   725  	alloc2.PreviousAllocation = alloc.ID
   726  	if err := state.UpsertAllocs(200, []*structs.Allocation{alloc2}); err != nil {
   727  		t.Fatalf("err: %v", err)
   728  	}
   729  
   730  	// Enusre that the chained allocation is being tracked as blocked
   731  	testutil.WaitForResult(func() (bool, error) {
   732  		ar := c1.getAllocRunners()[alloc2.ID]
   733  		if ar == nil {
   734  			return false, fmt.Errorf("alloc 2's alloc runner does not exist")
   735  		}
   736  		if !ar.IsWaiting() {
   737  			return false, fmt.Errorf("alloc 2 is not blocked")
   738  		}
   739  		return true, nil
   740  	}, func(err error) {
   741  		t.Fatalf("err: %v", err)
   742  	})
   743  
   744  	// Change the desired state of the parent alloc to stop
   745  	alloc1 := alloc.Copy()
   746  	alloc1.DesiredStatus = structs.AllocDesiredStatusStop
   747  	if err := state.UpsertAllocs(300, []*structs.Allocation{alloc1}); err != nil {
   748  		t.Fatalf("err: %v", err)
   749  	}
   750  
   751  	// Ensure that there are no blocked allocations
   752  	testutil.WaitForResult(func() (bool, error) {
   753  		for id, ar := range c1.getAllocRunners() {
   754  			if ar.IsWaiting() {
   755  				return false, fmt.Errorf("%q still blocked", id)
   756  			}
   757  			if ar.IsMigrating() {
   758  				return false, fmt.Errorf("%q still migrating", id)
   759  			}
   760  		}
   761  		return true, nil
   762  	}, func(err error) {
   763  		t.Fatalf("err: %v", err)
   764  	})
   765  
   766  	// Destroy all the allocations
   767  	for _, ar := range c1.getAllocRunners() {
   768  		ar.Destroy()
   769  	}
   770  
   771  	for _, ar := range c1.getAllocRunners() {
   772  		<-ar.WaitCh()
   773  	}
   774  }
   775  
   776  func TestClient_ValidateMigrateToken_ValidToken(t *testing.T) {
   777  	t.Parallel()
   778  	assert := assert.New(t)
   779  
   780  	c := TestClient(t, func(c *config.Config) {
   781  		c.ACLEnabled = true
   782  	})
   783  	defer c.Shutdown()
   784  
   785  	alloc := mock.Alloc()
   786  	validToken, err := structs.GenerateMigrateToken(alloc.ID, c.secretNodeID())
   787  	assert.Nil(err)
   788  
   789  	assert.Equal(c.ValidateMigrateToken(alloc.ID, validToken), true)
   790  }
   791  
   792  func TestClient_ValidateMigrateToken_InvalidToken(t *testing.T) {
   793  	t.Parallel()
   794  	assert := assert.New(t)
   795  
   796  	c := TestClient(t, func(c *config.Config) {
   797  		c.ACLEnabled = true
   798  	})
   799  	defer c.Shutdown()
   800  
   801  	assert.Equal(c.ValidateMigrateToken("", ""), false)
   802  
   803  	alloc := mock.Alloc()
   804  	assert.Equal(c.ValidateMigrateToken(alloc.ID, alloc.ID), false)
   805  	assert.Equal(c.ValidateMigrateToken(alloc.ID, ""), false)
   806  }
   807  
   808  func TestClient_ValidateMigrateToken_ACLDisabled(t *testing.T) {
   809  	t.Parallel()
   810  	assert := assert.New(t)
   811  
   812  	c := TestClient(t, func(c *config.Config) {})
   813  	defer c.Shutdown()
   814  
   815  	assert.Equal(c.ValidateMigrateToken("", ""), true)
   816  }
   817  
   818  func TestClient_ReloadTLS_UpgradePlaintextToTLS(t *testing.T) {
   819  	t.Parallel()
   820  	assert := assert.New(t)
   821  
   822  	s1, addr := testServer(t, func(c *nomad.Config) {
   823  		c.Region = "regionFoo"
   824  	})
   825  	defer s1.Shutdown()
   826  	testutil.WaitForLeader(t, s1.RPC)
   827  
   828  	const (
   829  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   830  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   831  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   832  	)
   833  
   834  	c1 := TestClient(t, func(c *config.Config) {
   835  		c.Servers = []string{addr}
   836  	})
   837  	defer c1.Shutdown()
   838  
   839  	// Registering a node over plaintext should succeed
   840  	{
   841  		req := structs.NodeSpecificRequest{
   842  			NodeID:       c1.Node().ID,
   843  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   844  		}
   845  
   846  		testutil.WaitForResult(func() (bool, error) {
   847  			var out structs.SingleNodeResponse
   848  			err := c1.RPC("Node.GetNode", &req, &out)
   849  			if err != nil {
   850  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
   851  			}
   852  			return true, nil
   853  		},
   854  			func(err error) {
   855  				t.Fatalf(err.Error())
   856  			},
   857  		)
   858  	}
   859  
   860  	newConfig := &nconfig.TLSConfig{
   861  		EnableHTTP:           true,
   862  		EnableRPC:            true,
   863  		VerifyServerHostname: true,
   864  		CAFile:               cafile,
   865  		CertFile:             foocert,
   866  		KeyFile:              fookey,
   867  	}
   868  
   869  	err := c1.reloadTLSConnections(newConfig)
   870  	assert.Nil(err)
   871  
   872  	// Registering a node over plaintext should fail after the node has upgraded
   873  	// to TLS
   874  	{
   875  		req := structs.NodeSpecificRequest{
   876  			NodeID:       c1.Node().ID,
   877  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   878  		}
   879  		testutil.WaitForResult(func() (bool, error) {
   880  			var out structs.SingleNodeResponse
   881  			err := c1.RPC("Node.GetNode", &req, &out)
   882  			if err == nil {
   883  				return false, fmt.Errorf("client RPC succeeded when it should have failed:\n%+v", err)
   884  			}
   885  			return true, nil
   886  		},
   887  			func(err error) {
   888  				t.Fatalf(err.Error())
   889  			},
   890  		)
   891  	}
   892  }
   893  
   894  func TestClient_ReloadTLS_DowngradeTLSToPlaintext(t *testing.T) {
   895  	t.Parallel()
   896  	assert := assert.New(t)
   897  
   898  	s1, addr := testServer(t, func(c *nomad.Config) {
   899  		c.Region = "regionFoo"
   900  	})
   901  	defer s1.Shutdown()
   902  	testutil.WaitForLeader(t, s1.RPC)
   903  
   904  	const (
   905  		cafile  = "../helper/tlsutil/testdata/ca.pem"
   906  		foocert = "../helper/tlsutil/testdata/nomad-foo.pem"
   907  		fookey  = "../helper/tlsutil/testdata/nomad-foo-key.pem"
   908  	)
   909  
   910  	c1 := TestClient(t, func(c *config.Config) {
   911  		c.Servers = []string{addr}
   912  		c.TLSConfig = &nconfig.TLSConfig{
   913  			EnableHTTP:           true,
   914  			EnableRPC:            true,
   915  			VerifyServerHostname: true,
   916  			CAFile:               cafile,
   917  			CertFile:             foocert,
   918  			KeyFile:              fookey,
   919  		}
   920  	})
   921  	defer c1.Shutdown()
   922  
   923  	// assert that when one node is running in encrypted mode, a RPC request to a
   924  	// node running in plaintext mode should fail
   925  	{
   926  		req := structs.NodeSpecificRequest{
   927  			NodeID:       c1.Node().ID,
   928  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   929  		}
   930  		testutil.WaitForResult(func() (bool, error) {
   931  			var out structs.SingleNodeResponse
   932  			err := c1.RPC("Node.GetNode", &req, &out)
   933  			if err == nil {
   934  				return false, fmt.Errorf("client RPC succeeded when it should have failed :\n%+v", err)
   935  			}
   936  			return true, nil
   937  		}, func(err error) {
   938  			t.Fatalf(err.Error())
   939  		},
   940  		)
   941  	}
   942  
   943  	newConfig := &nconfig.TLSConfig{}
   944  
   945  	err := c1.reloadTLSConnections(newConfig)
   946  	assert.Nil(err)
   947  
   948  	// assert that when both nodes are in plaintext mode, a RPC request should
   949  	// succeed
   950  	{
   951  		req := structs.NodeSpecificRequest{
   952  			NodeID:       c1.Node().ID,
   953  			QueryOptions: structs.QueryOptions{Region: "regionFoo"},
   954  		}
   955  		testutil.WaitForResult(func() (bool, error) {
   956  			var out structs.SingleNodeResponse
   957  			err := c1.RPC("Node.GetNode", &req, &out)
   958  			if err != nil {
   959  				return false, fmt.Errorf("client RPC failed when it should have succeeded:\n%+v", err)
   960  			}
   961  			return true, nil
   962  		}, func(err error) {
   963  			t.Fatalf(err.Error())
   964  		},
   965  		)
   966  	}
   967  }
   968  
   969  // TestClient_ServerList tests client methods that interact with the internal
   970  // nomad server list.
   971  func TestClient_ServerList(t *testing.T) {
   972  	t.Parallel()
   973  	client := TestClient(t, func(c *config.Config) {})
   974  
   975  	if s := client.GetServers(); len(s) != 0 {
   976  		t.Fatalf("expected server lit to be empty but found: %+q", s)
   977  	}
   978  	if err := client.SetServers(nil); err != noServersErr {
   979  		t.Fatalf("expected setting an empty list to return a 'no servers' error but received %v", err)
   980  	}
   981  	if err := client.SetServers([]string{"123.456.13123.123.13:80"}); err == nil {
   982  		t.Fatalf("expected setting a bad server to return an error")
   983  	}
   984  	if err := client.SetServers([]string{"123.456.13123.123.13:80", "127.0.0.1:1234", "127.0.0.1"}); err == nil {
   985  		t.Fatalf("expected setting at least one good server to succeed but received: %v", err)
   986  	}
   987  	s := client.GetServers()
   988  	if len(s) != 0 {
   989  		t.Fatalf("expected 2 servers but received: %+q", s)
   990  	}
   991  }