github.com/bigcommerce/nomad@v0.9.3-bc/nomad/node_endpoint_test.go (about)

     1  package nomad
     2  
     3  import (
     4  	"fmt"
     5  	"net"
     6  	"reflect"
     7  	"strings"
     8  	"testing"
     9  	"time"
    10  
    11  	memdb "github.com/hashicorp/go-memdb"
    12  	msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
    13  	"github.com/hashicorp/nomad/acl"
    14  	"github.com/hashicorp/nomad/helper/uuid"
    15  	"github.com/hashicorp/nomad/nomad/mock"
    16  	"github.com/hashicorp/nomad/nomad/state"
    17  	"github.com/hashicorp/nomad/nomad/structs"
    18  	"github.com/hashicorp/nomad/testutil"
    19  	vapi "github.com/hashicorp/vault/api"
    20  	"github.com/stretchr/testify/assert"
    21  	"github.com/stretchr/testify/require"
    22  )
    23  
    24  func TestClientEndpoint_Register(t *testing.T) {
    25  	t.Parallel()
    26  	require := require.New(t)
    27  	s1 := TestServer(t, nil)
    28  	defer s1.Shutdown()
    29  	codec := rpcClient(t, s1)
    30  	testutil.WaitForLeader(t, s1.RPC)
    31  
    32  	// Check that we have no client connections
    33  	require.Empty(s1.connectedNodes())
    34  
    35  	// Create the register request
    36  	node := mock.Node()
    37  	req := &structs.NodeRegisterRequest{
    38  		Node:         node,
    39  		WriteRequest: structs.WriteRequest{Region: "global"},
    40  	}
    41  
    42  	// Fetch the response
    43  	var resp structs.GenericResponse
    44  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
    45  		t.Fatalf("err: %v", err)
    46  	}
    47  	if resp.Index == 0 {
    48  		t.Fatalf("bad index: %d", resp.Index)
    49  	}
    50  
    51  	// Check that we have the client connections
    52  	nodes := s1.connectedNodes()
    53  	require.Len(nodes, 1)
    54  	require.Contains(nodes, node.ID)
    55  
    56  	// Check for the node in the FSM
    57  	state := s1.fsm.State()
    58  	ws := memdb.NewWatchSet()
    59  	out, err := state.NodeByID(ws, node.ID)
    60  	if err != nil {
    61  		t.Fatalf("err: %v", err)
    62  	}
    63  	if out == nil {
    64  		t.Fatalf("expected node")
    65  	}
    66  	if out.CreateIndex != resp.Index {
    67  		t.Fatalf("index mis-match")
    68  	}
    69  	if out.ComputedClass == "" {
    70  		t.Fatal("ComputedClass not set")
    71  	}
    72  
    73  	// Close the connection and check that we remove the client connections
    74  	require.Nil(codec.Close())
    75  	testutil.WaitForResult(func() (bool, error) {
    76  		nodes := s1.connectedNodes()
    77  		return len(nodes) == 0, nil
    78  	}, func(err error) {
    79  		t.Fatalf("should have no clients")
    80  	})
    81  }
    82  
    83  // This test asserts that we only track node connections if they are not from
    84  // forwarded RPCs. This is essential otherwise we will think a Yamux session to
    85  // a Nomad server is actually the session to the node.
    86  func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {
    87  	t.Parallel()
    88  	require := require.New(t)
    89  	s1 := TestServer(t, func(c *Config) {
    90  		c.BootstrapExpect = 2
    91  	})
    92  
    93  	defer s1.Shutdown()
    94  	s2 := TestServer(t, func(c *Config) {
    95  		c.DevDisableBootstrap = true
    96  	})
    97  	defer s2.Shutdown()
    98  	TestJoin(t, s1, s2)
    99  	testutil.WaitForLeader(t, s1.RPC)
   100  	testutil.WaitForLeader(t, s2.RPC)
   101  
   102  	// Determine the non-leader server
   103  	var leader, nonLeader *Server
   104  	if s1.IsLeader() {
   105  		leader = s1
   106  		nonLeader = s2
   107  	} else {
   108  		leader = s2
   109  		nonLeader = s1
   110  	}
   111  
   112  	// Send the requests to the non-leader
   113  	codec := rpcClient(t, nonLeader)
   114  
   115  	// Check that we have no client connections
   116  	require.Empty(nonLeader.connectedNodes())
   117  	require.Empty(leader.connectedNodes())
   118  
   119  	// Create the register request
   120  	node := mock.Node()
   121  	req := &structs.NodeRegisterRequest{
   122  		Node:         node,
   123  		WriteRequest: structs.WriteRequest{Region: "global"},
   124  	}
   125  
   126  	// Fetch the response
   127  	var resp structs.GenericResponse
   128  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
   129  		t.Fatalf("err: %v", err)
   130  	}
   131  	if resp.Index == 0 {
   132  		t.Fatalf("bad index: %d", resp.Index)
   133  	}
   134  
   135  	// Check that we have the client connections on the non leader
   136  	nodes := nonLeader.connectedNodes()
   137  	require.Len(nodes, 1)
   138  	require.Contains(nodes, node.ID)
   139  
   140  	// Check that we have no client connections on the leader
   141  	nodes = leader.connectedNodes()
   142  	require.Empty(nodes)
   143  
   144  	// Check for the node in the FSM
   145  	state := leader.State()
   146  	testutil.WaitForResult(func() (bool, error) {
   147  		out, err := state.NodeByID(nil, node.ID)
   148  		if err != nil {
   149  			return false, err
   150  		}
   151  		if out == nil {
   152  			return false, fmt.Errorf("expected node")
   153  		}
   154  		if out.CreateIndex != resp.Index {
   155  			return false, fmt.Errorf("index mis-match")
   156  		}
   157  		if out.ComputedClass == "" {
   158  			return false, fmt.Errorf("ComputedClass not set")
   159  		}
   160  
   161  		return true, nil
   162  	}, func(err error) {
   163  		t.Fatalf("err: %v", err)
   164  	})
   165  
   166  	// Close the connection and check that we remove the client connections
   167  	require.Nil(codec.Close())
   168  	testutil.WaitForResult(func() (bool, error) {
   169  		nodes := nonLeader.connectedNodes()
   170  		return len(nodes) == 0, nil
   171  	}, func(err error) {
   172  		t.Fatalf("should have no clients")
   173  	})
   174  }
   175  
   176  func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
   177  	t.Parallel()
   178  	s1 := TestServer(t, nil)
   179  	defer s1.Shutdown()
   180  	codec := rpcClient(t, s1)
   181  	testutil.WaitForLeader(t, s1.RPC)
   182  
   183  	// Create the register request
   184  	node := mock.Node()
   185  	req := &structs.NodeRegisterRequest{
   186  		Node:         node,
   187  		WriteRequest: structs.WriteRequest{Region: "global"},
   188  	}
   189  
   190  	// Fetch the response
   191  	var resp structs.GenericResponse
   192  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
   193  		t.Fatalf("err: %v", err)
   194  	}
   195  
   196  	// Update the nodes SecretID
   197  	node.SecretID = uuid.Generate()
   198  	err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
   199  	if err == nil || !strings.Contains(err.Error(), "Not registering") {
   200  		t.Fatalf("Expecting error regarding mismatching secret id: %v", err)
   201  	}
   202  }
   203  
   204  func TestClientEndpoint_Deregister(t *testing.T) {
   205  	t.Parallel()
   206  	s1 := TestServer(t, nil)
   207  	defer s1.Shutdown()
   208  	codec := rpcClient(t, s1)
   209  	testutil.WaitForLeader(t, s1.RPC)
   210  
   211  	// Create the register request
   212  	node := mock.Node()
   213  	reg := &structs.NodeRegisterRequest{
   214  		Node:         node,
   215  		WriteRequest: structs.WriteRequest{Region: "global"},
   216  	}
   217  
   218  	// Fetch the response
   219  	var resp structs.GenericResponse
   220  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   221  		t.Fatalf("err: %v", err)
   222  	}
   223  
   224  	// Deregister
   225  	dereg := &structs.NodeDeregisterRequest{
   226  		NodeID:       node.ID,
   227  		WriteRequest: structs.WriteRequest{Region: "global"},
   228  	}
   229  	var resp2 structs.GenericResponse
   230  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp2); err != nil {
   231  		t.Fatalf("err: %v", err)
   232  	}
   233  	if resp2.Index == 0 {
   234  		t.Fatalf("bad index: %d", resp2.Index)
   235  	}
   236  
   237  	// Check for the node in the FSM
   238  	state := s1.fsm.State()
   239  	ws := memdb.NewWatchSet()
   240  	out, err := state.NodeByID(ws, node.ID)
   241  	if err != nil {
   242  		t.Fatalf("err: %v", err)
   243  	}
   244  	if out != nil {
   245  		t.Fatalf("unexpected node")
   246  	}
   247  }
   248  
   249  func TestClientEndpoint_Deregister_ACL(t *testing.T) {
   250  	t.Parallel()
   251  	s1, root := TestACLServer(t, nil)
   252  	defer s1.Shutdown()
   253  	codec := rpcClient(t, s1)
   254  	testutil.WaitForLeader(t, s1.RPC)
   255  
   256  	// Create the node
   257  	node := mock.Node()
   258  	node1 := mock.Node()
   259  	state := s1.fsm.State()
   260  	if err := state.UpsertNode(1, node); err != nil {
   261  		t.Fatalf("err: %v", err)
   262  	}
   263  	if err := state.UpsertNode(2, node1); err != nil {
   264  		t.Fatalf("err: %v", err)
   265  	}
   266  
   267  	// Create the policy and tokens
   268  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
   269  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
   270  
   271  	// Deregister without any token and expect it to fail
   272  	dereg := &structs.NodeDeregisterRequest{
   273  		NodeID:       node.ID,
   274  		WriteRequest: structs.WriteRequest{Region: "global"},
   275  	}
   276  	var resp structs.GenericResponse
   277  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp); err == nil {
   278  		t.Fatalf("node de-register succeeded")
   279  	}
   280  
   281  	// Deregister with a valid token
   282  	dereg.AuthToken = validToken.SecretID
   283  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp); err != nil {
   284  		t.Fatalf("err: %v", err)
   285  	}
   286  
   287  	// Check for the node in the FSM
   288  	ws := memdb.NewWatchSet()
   289  	out, err := state.NodeByID(ws, node.ID)
   290  	if err != nil {
   291  		t.Fatalf("err: %v", err)
   292  	}
   293  	if out != nil {
   294  		t.Fatalf("unexpected node")
   295  	}
   296  
   297  	// Deregister with an invalid token.
   298  	dereg1 := &structs.NodeDeregisterRequest{
   299  		NodeID:       node1.ID,
   300  		WriteRequest: structs.WriteRequest{Region: "global"},
   301  	}
   302  	dereg1.AuthToken = invalidToken.SecretID
   303  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg1, &resp); err == nil {
   304  		t.Fatalf("rpc should not have succeeded")
   305  	}
   306  
   307  	// Try with a root token
   308  	dereg1.AuthToken = root.SecretID
   309  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg1, &resp); err != nil {
   310  		t.Fatalf("err: %v", err)
   311  	}
   312  }
   313  
   314  func TestClientEndpoint_Deregister_Vault(t *testing.T) {
   315  	t.Parallel()
   316  	s1 := TestServer(t, nil)
   317  	defer s1.Shutdown()
   318  	codec := rpcClient(t, s1)
   319  	testutil.WaitForLeader(t, s1.RPC)
   320  
   321  	// Create the register request
   322  	node := mock.Node()
   323  	reg := &structs.NodeRegisterRequest{
   324  		Node:         node,
   325  		WriteRequest: structs.WriteRequest{Region: "global"},
   326  	}
   327  
   328  	// Fetch the response
   329  	var resp structs.GenericResponse
   330  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   331  		t.Fatalf("err: %v", err)
   332  	}
   333  
   334  	// Swap the servers Vault Client
   335  	tvc := &TestVaultClient{}
   336  	s1.vault = tvc
   337  
   338  	// Put some Vault accessors in the state store for that node
   339  	state := s1.fsm.State()
   340  	va1 := mock.VaultAccessor()
   341  	va1.NodeID = node.ID
   342  	va2 := mock.VaultAccessor()
   343  	va2.NodeID = node.ID
   344  	state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
   345  
   346  	// Deregister
   347  	dereg := &structs.NodeDeregisterRequest{
   348  		NodeID:       node.ID,
   349  		WriteRequest: structs.WriteRequest{Region: "global"},
   350  	}
   351  	var resp2 structs.GenericResponse
   352  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp2); err != nil {
   353  		t.Fatalf("err: %v", err)
   354  	}
   355  	if resp2.Index == 0 {
   356  		t.Fatalf("bad index: %d", resp2.Index)
   357  	}
   358  
   359  	// Check for the node in the FSM
   360  	ws := memdb.NewWatchSet()
   361  	out, err := state.NodeByID(ws, node.ID)
   362  	if err != nil {
   363  		t.Fatalf("err: %v", err)
   364  	}
   365  	if out != nil {
   366  		t.Fatalf("unexpected node")
   367  	}
   368  
   369  	// Check that the endpoint revoked the tokens
   370  	if l := len(tvc.RevokedTokens); l != 2 {
   371  		t.Fatalf("Deregister revoked %d tokens; want 2", l)
   372  	}
   373  }
   374  
   375  func TestClientEndpoint_UpdateStatus(t *testing.T) {
   376  	t.Parallel()
   377  	require := require.New(t)
   378  	s1 := TestServer(t, nil)
   379  	defer s1.Shutdown()
   380  	codec := rpcClient(t, s1)
   381  	testutil.WaitForLeader(t, s1.RPC)
   382  
   383  	// Check that we have no client connections
   384  	require.Empty(s1.connectedNodes())
   385  
   386  	// Create the register request
   387  	node := mock.Node()
   388  	reg := &structs.NodeRegisterRequest{
   389  		Node:         node,
   390  		WriteRequest: structs.WriteRequest{Region: "global"},
   391  	}
   392  
   393  	// Fetch the response
   394  	var resp structs.NodeUpdateResponse
   395  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   396  		t.Fatalf("err: %v", err)
   397  	}
   398  
   399  	// Check for heartbeat interval
   400  	ttl := resp.HeartbeatTTL
   401  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   402  		t.Fatalf("bad: %#v", ttl)
   403  	}
   404  
   405  	// Update the status
   406  	dereg := &structs.NodeUpdateStatusRequest{
   407  		NodeID:       node.ID,
   408  		Status:       structs.NodeStatusInit,
   409  		WriteRequest: structs.WriteRequest{Region: "global"},
   410  	}
   411  	var resp2 structs.NodeUpdateResponse
   412  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   413  		t.Fatalf("err: %v", err)
   414  	}
   415  	if resp2.Index == 0 {
   416  		t.Fatalf("bad index: %d", resp2.Index)
   417  	}
   418  
   419  	// Check for heartbeat interval
   420  	ttl = resp2.HeartbeatTTL
   421  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   422  		t.Fatalf("bad: %#v", ttl)
   423  	}
   424  
   425  	// Check that we have the client connections
   426  	nodes := s1.connectedNodes()
   427  	require.Len(nodes, 1)
   428  	require.Contains(nodes, node.ID)
   429  
   430  	// Check for the node in the FSM
   431  	state := s1.fsm.State()
   432  	ws := memdb.NewWatchSet()
   433  	out, err := state.NodeByID(ws, node.ID)
   434  	if err != nil {
   435  		t.Fatalf("err: %v", err)
   436  	}
   437  	if out == nil {
   438  		t.Fatalf("expected node")
   439  	}
   440  	if out.ModifyIndex != resp2.Index {
   441  		t.Fatalf("index mis-match")
   442  	}
   443  
   444  	// Close the connection and check that we remove the client connections
   445  	require.Nil(codec.Close())
   446  	testutil.WaitForResult(func() (bool, error) {
   447  		nodes := s1.connectedNodes()
   448  		return len(nodes) == 0, nil
   449  	}, func(err error) {
   450  		t.Fatalf("should have no clients")
   451  	})
   452  }
   453  
   454  func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) {
   455  	t.Parallel()
   456  	s1 := TestServer(t, nil)
   457  	defer s1.Shutdown()
   458  	codec := rpcClient(t, s1)
   459  	testutil.WaitForLeader(t, s1.RPC)
   460  
   461  	// Create the register request
   462  	node := mock.Node()
   463  	reg := &structs.NodeRegisterRequest{
   464  		Node:         node,
   465  		WriteRequest: structs.WriteRequest{Region: "global"},
   466  	}
   467  
   468  	// Fetch the response
   469  	var resp structs.NodeUpdateResponse
   470  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   471  		t.Fatalf("err: %v", err)
   472  	}
   473  
   474  	// Check for heartbeat interval
   475  	ttl := resp.HeartbeatTTL
   476  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   477  		t.Fatalf("bad: %#v", ttl)
   478  	}
   479  
   480  	// Swap the servers Vault Client
   481  	tvc := &TestVaultClient{}
   482  	s1.vault = tvc
   483  
   484  	// Put some Vault accessors in the state store for that node
   485  	state := s1.fsm.State()
   486  	va1 := mock.VaultAccessor()
   487  	va1.NodeID = node.ID
   488  	va2 := mock.VaultAccessor()
   489  	va2.NodeID = node.ID
   490  	state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
   491  
   492  	// Update the status to be down
   493  	dereg := &structs.NodeUpdateStatusRequest{
   494  		NodeID:       node.ID,
   495  		Status:       structs.NodeStatusDown,
   496  		WriteRequest: structs.WriteRequest{Region: "global"},
   497  	}
   498  	var resp2 structs.NodeUpdateResponse
   499  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   500  		t.Fatalf("err: %v", err)
   501  	}
   502  	if resp2.Index == 0 {
   503  		t.Fatalf("bad index: %d", resp2.Index)
   504  	}
   505  
   506  	// Check that the endpoint revoked the tokens
   507  	if l := len(tvc.RevokedTokens); l != 2 {
   508  		t.Fatalf("Deregister revoked %d tokens; want 2", l)
   509  	}
   510  }
   511  
   512  func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) {
   513  	t.Parallel()
   514  	require := require.New(t)
   515  	s1 := TestServer(t, nil)
   516  	defer s1.Shutdown()
   517  	codec := rpcClient(t, s1)
   518  	testutil.WaitForLeader(t, s1.RPC)
   519  
   520  	// Check that we have no client connections
   521  	require.Empty(s1.connectedNodes())
   522  
   523  	// Create the register request but make the node down
   524  	node := mock.Node()
   525  	node.Status = structs.NodeStatusDown
   526  	reg := &structs.NodeRegisterRequest{
   527  		Node:         node,
   528  		WriteRequest: structs.WriteRequest{Region: "global"},
   529  	}
   530  
   531  	// Fetch the response
   532  	var resp structs.NodeUpdateResponse
   533  	require.NoError(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
   534  
   535  	// Update the status
   536  	dereg := &structs.NodeUpdateStatusRequest{
   537  		NodeID:       node.ID,
   538  		Status:       structs.NodeStatusInit,
   539  		WriteRequest: structs.WriteRequest{Region: "global"},
   540  	}
   541  	var resp2 structs.NodeUpdateResponse
   542  	require.NoError(msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2))
   543  	require.NotZero(resp2.Index)
   544  
   545  	// Check for heartbeat interval
   546  	ttl := resp2.HeartbeatTTL
   547  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   548  		t.Fatalf("bad: %#v", ttl)
   549  	}
   550  
   551  	// Check for the node in the FSM
   552  	state := s1.fsm.State()
   553  	ws := memdb.NewWatchSet()
   554  	out, err := state.NodeByID(ws, node.ID)
   555  	require.NoError(err)
   556  	require.NotNil(out)
   557  	require.EqualValues(resp2.Index, out.ModifyIndex)
   558  	require.Len(out.Events, 2)
   559  	require.Equal(NodeHeartbeatEventReregistered, out.Events[1].Message)
   560  }
   561  
   562  func TestClientEndpoint_Register_GetEvals(t *testing.T) {
   563  	t.Parallel()
   564  	s1 := TestServer(t, nil)
   565  	defer s1.Shutdown()
   566  	codec := rpcClient(t, s1)
   567  	testutil.WaitForLeader(t, s1.RPC)
   568  
   569  	// Register a system job.
   570  	job := mock.SystemJob()
   571  	state := s1.fsm.State()
   572  	if err := state.UpsertJob(1, job); err != nil {
   573  		t.Fatalf("err: %v", err)
   574  	}
   575  
   576  	// Create the register request going directly to ready
   577  	node := mock.Node()
   578  	node.Status = structs.NodeStatusReady
   579  	reg := &structs.NodeRegisterRequest{
   580  		Node:         node,
   581  		WriteRequest: structs.WriteRequest{Region: "global"},
   582  	}
   583  
   584  	// Fetch the response
   585  	var resp structs.NodeUpdateResponse
   586  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   587  		t.Fatalf("err: %v", err)
   588  	}
   589  
   590  	// Check for heartbeat interval
   591  	ttl := resp.HeartbeatTTL
   592  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   593  		t.Fatalf("bad: %#v", ttl)
   594  	}
   595  
   596  	// Check for an eval caused by the system job.
   597  	if len(resp.EvalIDs) != 1 {
   598  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   599  	}
   600  
   601  	evalID := resp.EvalIDs[0]
   602  	ws := memdb.NewWatchSet()
   603  	eval, err := state.EvalByID(ws, evalID)
   604  	if err != nil {
   605  		t.Fatalf("could not get eval %v", evalID)
   606  	}
   607  
   608  	if eval.Type != "system" {
   609  		t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
   610  	}
   611  
   612  	// Check for the node in the FSM
   613  	out, err := state.NodeByID(ws, node.ID)
   614  	if err != nil {
   615  		t.Fatalf("err: %v", err)
   616  	}
   617  	if out == nil {
   618  		t.Fatalf("expected node")
   619  	}
   620  	if out.ModifyIndex != resp.Index {
   621  		t.Fatalf("index mis-match")
   622  	}
   623  
   624  	// Transition it to down and then ready
   625  	node.Status = structs.NodeStatusDown
   626  	reg = &structs.NodeRegisterRequest{
   627  		Node:         node,
   628  		WriteRequest: structs.WriteRequest{Region: "global"},
   629  	}
   630  
   631  	// Fetch the response
   632  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   633  		t.Fatalf("err: %v", err)
   634  	}
   635  
   636  	if len(resp.EvalIDs) != 1 {
   637  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   638  	}
   639  
   640  	node.Status = structs.NodeStatusReady
   641  	reg = &structs.NodeRegisterRequest{
   642  		Node:         node,
   643  		WriteRequest: structs.WriteRequest{Region: "global"},
   644  	}
   645  
   646  	// Fetch the response
   647  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   648  		t.Fatalf("err: %v", err)
   649  	}
   650  
   651  	if len(resp.EvalIDs) != 1 {
   652  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   653  	}
   654  }
   655  
   656  func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
   657  	t.Parallel()
   658  	s1 := TestServer(t, nil)
   659  	defer s1.Shutdown()
   660  	codec := rpcClient(t, s1)
   661  	testutil.WaitForLeader(t, s1.RPC)
   662  
   663  	// Register a system job.
   664  	job := mock.SystemJob()
   665  	state := s1.fsm.State()
   666  	if err := state.UpsertJob(1, job); err != nil {
   667  		t.Fatalf("err: %v", err)
   668  	}
   669  
   670  	// Create the register request
   671  	node := mock.Node()
   672  	node.Status = structs.NodeStatusInit
   673  	reg := &structs.NodeRegisterRequest{
   674  		Node:         node,
   675  		WriteRequest: structs.WriteRequest{Region: "global"},
   676  	}
   677  
   678  	// Fetch the response
   679  	var resp structs.NodeUpdateResponse
   680  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   681  		t.Fatalf("err: %v", err)
   682  	}
   683  
   684  	// Check for heartbeat interval
   685  	ttl := resp.HeartbeatTTL
   686  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   687  		t.Fatalf("bad: %#v", ttl)
   688  	}
   689  
   690  	// Update the status
   691  	update := &structs.NodeUpdateStatusRequest{
   692  		NodeID:       node.ID,
   693  		Status:       structs.NodeStatusReady,
   694  		WriteRequest: structs.WriteRequest{Region: "global"},
   695  	}
   696  	var resp2 structs.NodeUpdateResponse
   697  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", update, &resp2); err != nil {
   698  		t.Fatalf("err: %v", err)
   699  	}
   700  	if resp2.Index == 0 {
   701  		t.Fatalf("bad index: %d", resp2.Index)
   702  	}
   703  
   704  	// Check for an eval caused by the system job.
   705  	if len(resp2.EvalIDs) != 1 {
   706  		t.Fatalf("expected one eval; got %#v", resp2.EvalIDs)
   707  	}
   708  
   709  	evalID := resp2.EvalIDs[0]
   710  	ws := memdb.NewWatchSet()
   711  	eval, err := state.EvalByID(ws, evalID)
   712  	if err != nil {
   713  		t.Fatalf("could not get eval %v", evalID)
   714  	}
   715  
   716  	if eval.Type != "system" {
   717  		t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
   718  	}
   719  
   720  	// Check for heartbeat interval
   721  	ttl = resp2.HeartbeatTTL
   722  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   723  		t.Fatalf("bad: %#v", ttl)
   724  	}
   725  
   726  	// Check for the node in the FSM
   727  	out, err := state.NodeByID(ws, node.ID)
   728  	if err != nil {
   729  		t.Fatalf("err: %v", err)
   730  	}
   731  	if out == nil {
   732  		t.Fatalf("expected node")
   733  	}
   734  	if out.ModifyIndex != resp2.Index {
   735  		t.Fatalf("index mis-match")
   736  	}
   737  }
   738  
   739  func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
   740  	t.Parallel()
   741  	s1 := TestServer(t, nil)
   742  	defer s1.Shutdown()
   743  
   744  	s2 := TestServer(t, func(c *Config) {
   745  		c.DevDisableBootstrap = true
   746  	})
   747  	defer s2.Shutdown()
   748  
   749  	s3 := TestServer(t, func(c *Config) {
   750  		c.DevDisableBootstrap = true
   751  	})
   752  	defer s3.Shutdown()
   753  	servers := []*Server{s1, s2, s3}
   754  	TestJoin(t, s1, s2, s3)
   755  
   756  	for _, s := range servers {
   757  		testutil.WaitForResult(func() (bool, error) {
   758  			peers, _ := s.numPeers()
   759  			return peers == 3, nil
   760  		}, func(err error) {
   761  			t.Fatalf("should have 3 peers")
   762  		})
   763  	}
   764  
   765  	codec := rpcClient(t, s1)
   766  	testutil.WaitForLeader(t, s1.RPC)
   767  
   768  	// Create the register request
   769  	node := mock.Node()
   770  	reg := &structs.NodeRegisterRequest{
   771  		Node:         node,
   772  		WriteRequest: structs.WriteRequest{Region: "global"},
   773  	}
   774  
   775  	// Fetch the response
   776  	var resp structs.NodeUpdateResponse
   777  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   778  		t.Fatalf("err: %v", err)
   779  	}
   780  
   781  	// Check for heartbeat interval
   782  	ttl := resp.HeartbeatTTL
   783  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   784  		t.Fatalf("bad: %#v", ttl)
   785  	}
   786  
   787  	// Check for heartbeat servers
   788  	serverAddrs := resp.Servers
   789  	if len(serverAddrs) == 0 {
   790  		t.Fatalf("bad: %#v", serverAddrs)
   791  	}
   792  
   793  	// Update the status, static state
   794  	dereg := &structs.NodeUpdateStatusRequest{
   795  		NodeID:       node.ID,
   796  		Status:       node.Status,
   797  		WriteRequest: structs.WriteRequest{Region: "global"},
   798  	}
   799  	var resp2 structs.NodeUpdateResponse
   800  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   801  		t.Fatalf("err: %v", err)
   802  	}
   803  	if resp2.Index != 0 {
   804  		t.Fatalf("bad index: %d", resp2.Index)
   805  	}
   806  
   807  	// Check for heartbeat interval
   808  	ttl = resp2.HeartbeatTTL
   809  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   810  		t.Fatalf("bad: %#v", ttl)
   811  	}
   812  }
   813  
   814  func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) {
   815  	t.Parallel()
   816  	require := require.New(t)
   817  
   818  	advAddr := "127.0.1.1:1234"
   819  	adv, err := net.ResolveTCPAddr("tcp", advAddr)
   820  	require.Nil(err)
   821  
   822  	s1 := TestServer(t, func(c *Config) {
   823  		c.ClientRPCAdvertise = adv
   824  	})
   825  	defer s1.Shutdown()
   826  	codec := rpcClient(t, s1)
   827  	testutil.WaitForLeader(t, s1.RPC)
   828  
   829  	// Create the register request
   830  	node := mock.Node()
   831  	reg := &structs.NodeRegisterRequest{
   832  		Node:         node,
   833  		WriteRequest: structs.WriteRequest{Region: "global"},
   834  	}
   835  
   836  	// Fetch the response
   837  	var resp structs.NodeUpdateResponse
   838  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   839  		t.Fatalf("err: %v", err)
   840  	}
   841  
   842  	// Check for heartbeat interval
   843  	ttl := resp.HeartbeatTTL
   844  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   845  		t.Fatalf("bad: %#v", ttl)
   846  	}
   847  
   848  	// Check for heartbeat servers
   849  	require.Len(resp.Servers, 1)
   850  	require.Equal(resp.Servers[0].RPCAdvertiseAddr, advAddr)
   851  }
   852  
   853  func TestClientEndpoint_UpdateDrain(t *testing.T) {
   854  	t.Parallel()
   855  	require := require.New(t)
   856  	s1 := TestServer(t, nil)
   857  	defer s1.Shutdown()
   858  	codec := rpcClient(t, s1)
   859  	testutil.WaitForLeader(t, s1.RPC)
   860  
   861  	// Disable drainer to prevent drain from completing during test
   862  	s1.nodeDrainer.SetEnabled(false, nil)
   863  
   864  	// Create the register request
   865  	node := mock.Node()
   866  	reg := &structs.NodeRegisterRequest{
   867  		Node:         node,
   868  		WriteRequest: structs.WriteRequest{Region: "global"},
   869  	}
   870  
   871  	// Fetch the response
   872  	var resp structs.NodeUpdateResponse
   873  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
   874  
   875  	beforeUpdate := time.Now()
   876  	strategy := &structs.DrainStrategy{
   877  		DrainSpec: structs.DrainSpec{
   878  			Deadline: 10 * time.Second,
   879  		},
   880  	}
   881  
   882  	// Update the status
   883  	dereg := &structs.NodeUpdateDrainRequest{
   884  		NodeID:        node.ID,
   885  		DrainStrategy: strategy,
   886  		WriteRequest:  structs.WriteRequest{Region: "global"},
   887  	}
   888  	var resp2 structs.NodeDrainUpdateResponse
   889  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
   890  	require.NotZero(resp2.Index)
   891  
   892  	// Check for the node in the FSM
   893  	state := s1.fsm.State()
   894  	ws := memdb.NewWatchSet()
   895  	out, err := state.NodeByID(ws, node.ID)
   896  	require.Nil(err)
   897  	require.True(out.Drain)
   898  	require.Equal(strategy.Deadline, out.DrainStrategy.Deadline)
   899  	require.Len(out.Events, 2)
   900  	require.Equal(NodeDrainEventDrainSet, out.Events[1].Message)
   901  
   902  	// before+deadline should be before the forced deadline
   903  	require.True(beforeUpdate.Add(strategy.Deadline).Before(out.DrainStrategy.ForceDeadline))
   904  
   905  	// now+deadline should be after the forced deadline
   906  	require.True(time.Now().Add(strategy.Deadline).After(out.DrainStrategy.ForceDeadline))
   907  
   908  	// Register a system job
   909  	job := mock.SystemJob()
   910  	require.Nil(s1.State().UpsertJob(10, job))
   911  
   912  	// Update the eligibility and expect evals
   913  	dereg.DrainStrategy = nil
   914  	dereg.MarkEligible = true
   915  	var resp3 structs.NodeDrainUpdateResponse
   916  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
   917  	require.NotZero(resp3.Index)
   918  	require.NotZero(resp3.EvalCreateIndex)
   919  	require.Len(resp3.EvalIDs, 1)
   920  
   921  	// Check for updated node in the FSM
   922  	ws = memdb.NewWatchSet()
   923  	out, err = state.NodeByID(ws, node.ID)
   924  	require.NoError(err)
   925  	require.Len(out.Events, 3)
   926  	require.Equal(NodeDrainEventDrainDisabled, out.Events[2].Message)
   927  
   928  	// Check that calling UpdateDrain with the same DrainStrategy does not emit
   929  	// a node event.
   930  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
   931  	ws = memdb.NewWatchSet()
   932  	out, err = state.NodeByID(ws, node.ID)
   933  	require.NoError(err)
   934  	require.Len(out.Events, 3)
   935  }
   936  
   937  func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) {
   938  	t.Parallel()
   939  	s1, root := TestACLServer(t, nil)
   940  	defer s1.Shutdown()
   941  	codec := rpcClient(t, s1)
   942  	testutil.WaitForLeader(t, s1.RPC)
   943  	require := require.New(t)
   944  
   945  	// Create the node
   946  	node := mock.Node()
   947  	state := s1.fsm.State()
   948  
   949  	require.Nil(state.UpsertNode(1, node), "UpsertNode")
   950  
   951  	// Create the policy and tokens
   952  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
   953  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
   954  
   955  	// Update the status without a token and expect failure
   956  	dereg := &structs.NodeUpdateDrainRequest{
   957  		NodeID: node.ID,
   958  		DrainStrategy: &structs.DrainStrategy{
   959  			DrainSpec: structs.DrainSpec{
   960  				Deadline: 10 * time.Second,
   961  			},
   962  		},
   963  		WriteRequest: structs.WriteRequest{Region: "global"},
   964  	}
   965  	{
   966  		var resp structs.NodeDrainUpdateResponse
   967  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
   968  		require.NotNil(err, "RPC")
   969  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
   970  	}
   971  
   972  	// Try with a valid token
   973  	dereg.AuthToken = validToken.SecretID
   974  	{
   975  		var resp structs.NodeDrainUpdateResponse
   976  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
   977  	}
   978  
   979  	// Try with a invalid token
   980  	dereg.AuthToken = invalidToken.SecretID
   981  	{
   982  		var resp structs.NodeDrainUpdateResponse
   983  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
   984  		require.NotNil(err, "RPC")
   985  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
   986  	}
   987  
   988  	// Try with a root token
   989  	dereg.AuthToken = root.SecretID
   990  	{
   991  		var resp structs.NodeDrainUpdateResponse
   992  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
   993  	}
   994  }
   995  
   996  // This test ensures that Nomad marks client state of allocations which are in
   997  // pending/running state to lost when a node is marked as down.
   998  func TestClientEndpoint_Drain_Down(t *testing.T) {
   999  	t.Parallel()
  1000  	s1 := TestServer(t, nil)
  1001  	defer s1.Shutdown()
  1002  	codec := rpcClient(t, s1)
  1003  	testutil.WaitForLeader(t, s1.RPC)
  1004  	require := require.New(t)
  1005  
  1006  	// Register a node
  1007  	node := mock.Node()
  1008  	reg := &structs.NodeRegisterRequest{
  1009  		Node:         node,
  1010  		WriteRequest: structs.WriteRequest{Region: "global"},
  1011  	}
  1012  	// Fetch the response
  1013  	var resp structs.NodeUpdateResponse
  1014  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1015  
  1016  	// Register a service job
  1017  	var jobResp structs.JobRegisterResponse
  1018  	job := mock.Job()
  1019  	job.TaskGroups[0].Count = 1
  1020  	jobReq := &structs.JobRegisterRequest{
  1021  		Job: job,
  1022  		WriteRequest: structs.WriteRequest{
  1023  			Region:    "global",
  1024  			Namespace: job.Namespace,
  1025  		},
  1026  	}
  1027  	require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp))
  1028  
  1029  	// Register a system job
  1030  	var jobResp1 structs.JobRegisterResponse
  1031  	job1 := mock.SystemJob()
  1032  	job1.TaskGroups[0].Count = 1
  1033  	jobReq1 := &structs.JobRegisterRequest{
  1034  		Job: job1,
  1035  		WriteRequest: structs.WriteRequest{
  1036  			Region:    "global",
  1037  			Namespace: job1.Namespace,
  1038  		},
  1039  	}
  1040  	require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq1, &jobResp1))
  1041  
  1042  	// Wait for the scheduler to create an allocation
  1043  	testutil.WaitForResult(func() (bool, error) {
  1044  		ws := memdb.NewWatchSet()
  1045  		allocs, err := s1.fsm.state.AllocsByJob(ws, job.Namespace, job.ID, true)
  1046  		if err != nil {
  1047  			return false, err
  1048  		}
  1049  		allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.Namespace, job1.ID, true)
  1050  		if err != nil {
  1051  			return false, err
  1052  		}
  1053  		return len(allocs) > 0 && len(allocs1) > 0, nil
  1054  	}, func(err error) {
  1055  		t.Fatalf("err: %v", err)
  1056  	})
  1057  
  1058  	// Drain the node
  1059  	dereg := &structs.NodeUpdateDrainRequest{
  1060  		NodeID: node.ID,
  1061  		DrainStrategy: &structs.DrainStrategy{
  1062  			DrainSpec: structs.DrainSpec{
  1063  				Deadline: -1 * time.Second,
  1064  			},
  1065  		},
  1066  		WriteRequest: structs.WriteRequest{Region: "global"},
  1067  	}
  1068  	var resp2 structs.NodeDrainUpdateResponse
  1069  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
  1070  
  1071  	// Mark the node as down
  1072  	node.Status = structs.NodeStatusDown
  1073  	reg = &structs.NodeRegisterRequest{
  1074  		Node:         node,
  1075  		WriteRequest: structs.WriteRequest{Region: "global"},
  1076  	}
  1077  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1078  
  1079  	// Ensure that the allocation has transitioned to lost
  1080  	testutil.WaitForResult(func() (bool, error) {
  1081  		ws := memdb.NewWatchSet()
  1082  		summary, err := s1.fsm.state.JobSummaryByID(ws, job.Namespace, job.ID)
  1083  		if err != nil {
  1084  			return false, err
  1085  		}
  1086  		expectedSummary := &structs.JobSummary{
  1087  			JobID:     job.ID,
  1088  			Namespace: job.Namespace,
  1089  			Summary: map[string]structs.TaskGroupSummary{
  1090  				"web": {
  1091  					Queued: 1,
  1092  					Lost:   1,
  1093  				},
  1094  			},
  1095  			Children:    new(structs.JobChildrenSummary),
  1096  			CreateIndex: jobResp.JobModifyIndex,
  1097  			ModifyIndex: summary.ModifyIndex,
  1098  		}
  1099  		if !reflect.DeepEqual(summary, expectedSummary) {
  1100  			return false, fmt.Errorf("Service: expected: %#v, actual: %#v", expectedSummary, summary)
  1101  		}
  1102  
  1103  		summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.Namespace, job1.ID)
  1104  		if err != nil {
  1105  			return false, err
  1106  		}
  1107  		expectedSummary1 := &structs.JobSummary{
  1108  			JobID:     job1.ID,
  1109  			Namespace: job1.Namespace,
  1110  			Summary: map[string]structs.TaskGroupSummary{
  1111  				"web": {
  1112  					Lost: 1,
  1113  				},
  1114  			},
  1115  			Children:    new(structs.JobChildrenSummary),
  1116  			CreateIndex: jobResp1.JobModifyIndex,
  1117  			ModifyIndex: summary1.ModifyIndex,
  1118  		}
  1119  		if !reflect.DeepEqual(summary1, expectedSummary1) {
  1120  			return false, fmt.Errorf("System: expected: %#v, actual: %#v", expectedSummary1, summary1)
  1121  		}
  1122  		return true, nil
  1123  	}, func(err error) {
  1124  		t.Fatalf("err: %v", err)
  1125  	})
  1126  }
  1127  
  1128  func TestClientEndpoint_UpdateEligibility(t *testing.T) {
  1129  	t.Parallel()
  1130  	require := require.New(t)
  1131  	s1 := TestServer(t, nil)
  1132  	defer s1.Shutdown()
  1133  	codec := rpcClient(t, s1)
  1134  	testutil.WaitForLeader(t, s1.RPC)
  1135  
  1136  	// Create the register request
  1137  	node := mock.Node()
  1138  	reg := &structs.NodeRegisterRequest{
  1139  		Node:         node,
  1140  		WriteRequest: structs.WriteRequest{Region: "global"},
  1141  	}
  1142  
  1143  	// Fetch the response
  1144  	var resp structs.NodeUpdateResponse
  1145  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1146  
  1147  	// Update the eligibility
  1148  	elig := &structs.NodeUpdateEligibilityRequest{
  1149  		NodeID:       node.ID,
  1150  		Eligibility:  structs.NodeSchedulingIneligible,
  1151  		WriteRequest: structs.WriteRequest{Region: "global"},
  1152  	}
  1153  	var resp2 structs.NodeEligibilityUpdateResponse
  1154  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp2))
  1155  	require.NotZero(resp2.Index)
  1156  	require.Zero(resp2.EvalCreateIndex)
  1157  	require.Empty(resp2.EvalIDs)
  1158  
  1159  	// Check for the node in the FSM
  1160  	state := s1.fsm.State()
  1161  	out, err := state.NodeByID(nil, node.ID)
  1162  	require.Nil(err)
  1163  	require.Equal(out.SchedulingEligibility, structs.NodeSchedulingIneligible)
  1164  	require.Len(out.Events, 2)
  1165  	require.Equal(NodeEligibilityEventIneligible, out.Events[1].Message)
  1166  
  1167  	// Register a system job
  1168  	job := mock.SystemJob()
  1169  	require.Nil(s1.State().UpsertJob(10, job))
  1170  
  1171  	// Update the eligibility and expect evals
  1172  	elig.Eligibility = structs.NodeSchedulingEligible
  1173  	var resp3 structs.NodeEligibilityUpdateResponse
  1174  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp3))
  1175  	require.NotZero(resp3.Index)
  1176  	require.NotZero(resp3.EvalCreateIndex)
  1177  	require.Len(resp3.EvalIDs, 1)
  1178  
  1179  	out, err = state.NodeByID(nil, node.ID)
  1180  	require.Nil(err)
  1181  	require.Len(out.Events, 3)
  1182  	require.Equal(NodeEligibilityEventEligible, out.Events[2].Message)
  1183  }
  1184  
  1185  func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) {
  1186  	t.Parallel()
  1187  	s1, root := TestACLServer(t, nil)
  1188  	defer s1.Shutdown()
  1189  	codec := rpcClient(t, s1)
  1190  	testutil.WaitForLeader(t, s1.RPC)
  1191  	require := require.New(t)
  1192  
  1193  	// Create the node
  1194  	node := mock.Node()
  1195  	state := s1.fsm.State()
  1196  
  1197  	require.Nil(state.UpsertNode(1, node), "UpsertNode")
  1198  
  1199  	// Create the policy and tokens
  1200  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
  1201  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
  1202  
  1203  	// Update the status without a token and expect failure
  1204  	dereg := &structs.NodeUpdateEligibilityRequest{
  1205  		NodeID:       node.ID,
  1206  		Eligibility:  structs.NodeSchedulingIneligible,
  1207  		WriteRequest: structs.WriteRequest{Region: "global"},
  1208  	}
  1209  	{
  1210  		var resp structs.NodeEligibilityUpdateResponse
  1211  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
  1212  		require.NotNil(err, "RPC")
  1213  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1214  	}
  1215  
  1216  	// Try with a valid token
  1217  	dereg.AuthToken = validToken.SecretID
  1218  	{
  1219  		var resp structs.NodeEligibilityUpdateResponse
  1220  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
  1221  	}
  1222  
  1223  	// Try with a invalid token
  1224  	dereg.AuthToken = invalidToken.SecretID
  1225  	{
  1226  		var resp structs.NodeEligibilityUpdateResponse
  1227  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
  1228  		require.NotNil(err, "RPC")
  1229  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1230  	}
  1231  
  1232  	// Try with a root token
  1233  	dereg.AuthToken = root.SecretID
  1234  	{
  1235  		var resp structs.NodeEligibilityUpdateResponse
  1236  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
  1237  	}
  1238  }
  1239  
  1240  func TestClientEndpoint_GetNode(t *testing.T) {
  1241  	t.Parallel()
  1242  	s1 := TestServer(t, nil)
  1243  	defer s1.Shutdown()
  1244  	codec := rpcClient(t, s1)
  1245  	testutil.WaitForLeader(t, s1.RPC)
  1246  
  1247  	// Create the register request
  1248  	node := mock.Node()
  1249  	reg := &structs.NodeRegisterRequest{
  1250  		Node:         node,
  1251  		WriteRequest: structs.WriteRequest{Region: "global"},
  1252  	}
  1253  
  1254  	// Fetch the response
  1255  	var resp structs.GenericResponse
  1256  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1257  		t.Fatalf("err: %v", err)
  1258  	}
  1259  	node.CreateIndex = resp.Index
  1260  	node.ModifyIndex = resp.Index
  1261  
  1262  	// Lookup the node
  1263  	get := &structs.NodeSpecificRequest{
  1264  		NodeID:       node.ID,
  1265  		QueryOptions: structs.QueryOptions{Region: "global"},
  1266  	}
  1267  	var resp2 structs.SingleNodeResponse
  1268  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
  1269  		t.Fatalf("err: %v", err)
  1270  	}
  1271  	if resp2.Index != resp.Index {
  1272  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  1273  	}
  1274  
  1275  	if resp2.Node.ComputedClass == "" {
  1276  		t.Fatalf("bad ComputedClass: %#v", resp2.Node)
  1277  	}
  1278  
  1279  	// Update the status updated at value
  1280  	node.StatusUpdatedAt = resp2.Node.StatusUpdatedAt
  1281  	node.SecretID = ""
  1282  	node.Events = resp2.Node.Events
  1283  	if !reflect.DeepEqual(node, resp2.Node) {
  1284  		t.Fatalf("bad: %#v \n %#v", node, resp2.Node)
  1285  	}
  1286  
  1287  	// assert that the node register event was set correctly
  1288  	if len(resp2.Node.Events) != 1 {
  1289  		t.Fatalf("Did not set node events: %#v", resp2.Node)
  1290  	}
  1291  	if resp2.Node.Events[0].Message != state.NodeRegisterEventRegistered {
  1292  		t.Fatalf("Did not set node register event correctly: %#v", resp2.Node)
  1293  	}
  1294  
  1295  	// Lookup non-existing node
  1296  	get.NodeID = "12345678-abcd-efab-cdef-123456789abc"
  1297  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
  1298  		t.Fatalf("err: %v", err)
  1299  	}
  1300  	if resp2.Index != resp.Index {
  1301  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  1302  	}
  1303  	if resp2.Node != nil {
  1304  		t.Fatalf("unexpected node")
  1305  	}
  1306  }
  1307  
  1308  func TestClientEndpoint_GetNode_ACL(t *testing.T) {
  1309  	t.Parallel()
  1310  	s1, root := TestACLServer(t, nil)
  1311  	defer s1.Shutdown()
  1312  	codec := rpcClient(t, s1)
  1313  	testutil.WaitForLeader(t, s1.RPC)
  1314  	assert := assert.New(t)
  1315  
  1316  	// Create the node
  1317  	node := mock.Node()
  1318  	state := s1.fsm.State()
  1319  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  1320  
  1321  	// Create the policy and tokens
  1322  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
  1323  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
  1324  
  1325  	// Lookup the node without a token and expect failure
  1326  	req := &structs.NodeSpecificRequest{
  1327  		NodeID:       node.ID,
  1328  		QueryOptions: structs.QueryOptions{Region: "global"},
  1329  	}
  1330  	{
  1331  		var resp structs.SingleNodeResponse
  1332  		err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
  1333  		assert.NotNil(err, "RPC")
  1334  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1335  	}
  1336  
  1337  	// Try with a valid token
  1338  	req.AuthToken = validToken.SecretID
  1339  	{
  1340  		var resp structs.SingleNodeResponse
  1341  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1342  		assert.Equal(node.ID, resp.Node.ID)
  1343  	}
  1344  
  1345  	// Try with a Node.SecretID
  1346  	req.AuthToken = node.SecretID
  1347  	{
  1348  		var resp structs.SingleNodeResponse
  1349  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1350  		assert.Equal(node.ID, resp.Node.ID)
  1351  	}
  1352  
  1353  	// Try with a invalid token
  1354  	req.AuthToken = invalidToken.SecretID
  1355  	{
  1356  		var resp structs.SingleNodeResponse
  1357  		err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
  1358  		assert.NotNil(err, "RPC")
  1359  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1360  	}
  1361  
  1362  	// Try with a root token
  1363  	req.AuthToken = root.SecretID
  1364  	{
  1365  		var resp structs.SingleNodeResponse
  1366  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1367  		assert.Equal(node.ID, resp.Node.ID)
  1368  	}
  1369  }
  1370  
  1371  func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
  1372  	t.Parallel()
  1373  	s1 := TestServer(t, nil)
  1374  	defer s1.Shutdown()
  1375  	state := s1.fsm.State()
  1376  	codec := rpcClient(t, s1)
  1377  	testutil.WaitForLeader(t, s1.RPC)
  1378  
  1379  	// Create the node
  1380  	node1 := mock.Node()
  1381  	node2 := mock.Node()
  1382  
  1383  	// First create an unrelated node.
  1384  	time.AfterFunc(100*time.Millisecond, func() {
  1385  		if err := state.UpsertNode(100, node1); err != nil {
  1386  			t.Fatalf("err: %v", err)
  1387  		}
  1388  	})
  1389  
  1390  	// Upsert the node we are watching later
  1391  	time.AfterFunc(200*time.Millisecond, func() {
  1392  		if err := state.UpsertNode(200, node2); err != nil {
  1393  			t.Fatalf("err: %v", err)
  1394  		}
  1395  	})
  1396  
  1397  	// Lookup the node
  1398  	req := &structs.NodeSpecificRequest{
  1399  		NodeID: node2.ID,
  1400  		QueryOptions: structs.QueryOptions{
  1401  			Region:        "global",
  1402  			MinQueryIndex: 150,
  1403  		},
  1404  	}
  1405  	var resp structs.SingleNodeResponse
  1406  	start := time.Now()
  1407  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
  1408  		t.Fatalf("err: %v", err)
  1409  	}
  1410  
  1411  	if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
  1412  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1413  	}
  1414  	if resp.Index != 200 {
  1415  		t.Fatalf("Bad index: %d %d", resp.Index, 200)
  1416  	}
  1417  	if resp.Node == nil || resp.Node.ID != node2.ID {
  1418  		t.Fatalf("bad: %#v", resp.Node)
  1419  	}
  1420  
  1421  	// Node update triggers watches
  1422  	time.AfterFunc(100*time.Millisecond, func() {
  1423  		nodeUpdate := mock.Node()
  1424  		nodeUpdate.ID = node2.ID
  1425  		nodeUpdate.Status = structs.NodeStatusDown
  1426  		if err := state.UpsertNode(300, nodeUpdate); err != nil {
  1427  			t.Fatalf("err: %v", err)
  1428  		}
  1429  	})
  1430  
  1431  	req.QueryOptions.MinQueryIndex = 250
  1432  	var resp2 structs.SingleNodeResponse
  1433  	start = time.Now()
  1434  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp2); err != nil {
  1435  		t.Fatalf("err: %v", err)
  1436  	}
  1437  
  1438  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  1439  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1440  	}
  1441  	if resp2.Index != 300 {
  1442  		t.Fatalf("Bad index: %d %d", resp2.Index, 300)
  1443  	}
  1444  	if resp2.Node == nil || resp2.Node.Status != structs.NodeStatusDown {
  1445  		t.Fatalf("bad: %#v", resp2.Node)
  1446  	}
  1447  
  1448  	// Node delete triggers watches
  1449  	time.AfterFunc(100*time.Millisecond, func() {
  1450  		if err := state.DeleteNode(400, node2.ID); err != nil {
  1451  			t.Fatalf("err: %v", err)
  1452  		}
  1453  	})
  1454  
  1455  	req.QueryOptions.MinQueryIndex = 350
  1456  	var resp3 structs.SingleNodeResponse
  1457  	start = time.Now()
  1458  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp3); err != nil {
  1459  		t.Fatalf("err: %v", err)
  1460  	}
  1461  
  1462  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  1463  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1464  	}
  1465  	if resp3.Index != 400 {
  1466  		t.Fatalf("Bad index: %d %d", resp2.Index, 400)
  1467  	}
  1468  	if resp3.Node != nil {
  1469  		t.Fatalf("bad: %#v", resp3.Node)
  1470  	}
  1471  }
  1472  
  1473  func TestClientEndpoint_GetAllocs(t *testing.T) {
  1474  	t.Parallel()
  1475  	s1 := TestServer(t, nil)
  1476  	defer s1.Shutdown()
  1477  	codec := rpcClient(t, s1)
  1478  	testutil.WaitForLeader(t, s1.RPC)
  1479  
  1480  	// Create the register request
  1481  	node := mock.Node()
  1482  	reg := &structs.NodeRegisterRequest{
  1483  		Node:         node,
  1484  		WriteRequest: structs.WriteRequest{Region: "global"},
  1485  	}
  1486  
  1487  	// Fetch the response
  1488  	var resp structs.GenericResponse
  1489  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1490  		t.Fatalf("err: %v", err)
  1491  	}
  1492  	node.CreateIndex = resp.Index
  1493  	node.ModifyIndex = resp.Index
  1494  
  1495  	// Inject fake evaluations
  1496  	alloc := mock.Alloc()
  1497  	alloc.NodeID = node.ID
  1498  	state := s1.fsm.State()
  1499  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1500  	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1501  	if err != nil {
  1502  		t.Fatalf("err: %v", err)
  1503  	}
  1504  
  1505  	// Lookup the allocs
  1506  	get := &structs.NodeSpecificRequest{
  1507  		NodeID:       node.ID,
  1508  		QueryOptions: structs.QueryOptions{Region: "global"},
  1509  	}
  1510  	var resp2 structs.NodeAllocsResponse
  1511  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
  1512  		t.Fatalf("err: %v", err)
  1513  	}
  1514  	if resp2.Index != 100 {
  1515  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1516  	}
  1517  
  1518  	if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
  1519  		t.Fatalf("bad: %#v", resp2.Allocs)
  1520  	}
  1521  
  1522  	// Lookup non-existing node
  1523  	get.NodeID = "foobarbaz"
  1524  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
  1525  		t.Fatalf("err: %v", err)
  1526  	}
  1527  	if resp2.Index != 100 {
  1528  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1529  	}
  1530  	if len(resp2.Allocs) != 0 {
  1531  		t.Fatalf("unexpected node")
  1532  	}
  1533  }
  1534  
  1535  func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) {
  1536  	t.Parallel()
  1537  	s1, root := TestACLServer(t, nil)
  1538  	defer s1.Shutdown()
  1539  	codec := rpcClient(t, s1)
  1540  	testutil.WaitForLeader(t, s1.RPC)
  1541  	assert := assert.New(t)
  1542  
  1543  	// Create the node
  1544  	allocDefaultNS := mock.Alloc()
  1545  	node := mock.Node()
  1546  	allocDefaultNS.NodeID = node.ID
  1547  	state := s1.fsm.State()
  1548  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  1549  	assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
  1550  	allocs := []*structs.Allocation{allocDefaultNS}
  1551  	assert.Nil(state.UpsertAllocs(5, allocs), "UpsertAllocs")
  1552  
  1553  	// Create the namespace policy and tokens
  1554  	validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
  1555  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1556  	invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
  1557  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1558  
  1559  	req := &structs.NodeSpecificRequest{
  1560  		NodeID: node.ID,
  1561  		QueryOptions: structs.QueryOptions{
  1562  			Region: "global",
  1563  		},
  1564  	}
  1565  
  1566  	// Lookup the node without a token and expect failure
  1567  	{
  1568  		var resp structs.NodeAllocsResponse
  1569  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1570  		assert.NotNil(err, "RPC")
  1571  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1572  	}
  1573  
  1574  	// Try with a valid token for the default namespace
  1575  	req.AuthToken = validDefaultToken.SecretID
  1576  	{
  1577  		var resp structs.NodeAllocsResponse
  1578  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1579  		assert.Len(resp.Allocs, 1)
  1580  		assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
  1581  	}
  1582  
  1583  	// Try with a invalid token
  1584  	req.AuthToken = invalidToken.SecretID
  1585  	{
  1586  		var resp structs.NodeAllocsResponse
  1587  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1588  		assert.NotNil(err, "RPC")
  1589  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1590  	}
  1591  
  1592  	// Try with a root token
  1593  	req.AuthToken = root.SecretID
  1594  	{
  1595  		var resp structs.NodeAllocsResponse
  1596  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1597  		assert.Len(resp.Allocs, 1)
  1598  		for _, alloc := range resp.Allocs {
  1599  			switch alloc.ID {
  1600  			case allocDefaultNS.ID:
  1601  				// expected
  1602  			default:
  1603  				t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
  1604  			}
  1605  		}
  1606  	}
  1607  }
  1608  
  1609  func TestClientEndpoint_GetClientAllocs(t *testing.T) {
  1610  	t.Parallel()
  1611  	require := require.New(t)
  1612  	s1 := TestServer(t, nil)
  1613  	defer s1.Shutdown()
  1614  	codec := rpcClient(t, s1)
  1615  	testutil.WaitForLeader(t, s1.RPC)
  1616  
  1617  	// Check that we have no client connections
  1618  	require.Empty(s1.connectedNodes())
  1619  
  1620  	// Create the register request
  1621  	node := mock.Node()
  1622  	state := s1.fsm.State()
  1623  	require.Nil(state.UpsertNode(98, node))
  1624  
  1625  	// Inject fake evaluations
  1626  	alloc := mock.Alloc()
  1627  	alloc.NodeID = node.ID
  1628  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1629  	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1630  	if err != nil {
  1631  		t.Fatalf("err: %v", err)
  1632  	}
  1633  
  1634  	// Lookup the allocs
  1635  	get := &structs.NodeSpecificRequest{
  1636  		NodeID:       node.ID,
  1637  		SecretID:     node.SecretID,
  1638  		QueryOptions: structs.QueryOptions{Region: "global"},
  1639  	}
  1640  	var resp2 structs.NodeClientAllocsResponse
  1641  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2); err != nil {
  1642  		t.Fatalf("err: %v", err)
  1643  	}
  1644  	if resp2.Index != 100 {
  1645  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1646  	}
  1647  
  1648  	if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
  1649  		t.Fatalf("bad: %#v", resp2.Allocs)
  1650  	}
  1651  
  1652  	// Check that we have the client connections
  1653  	nodes := s1.connectedNodes()
  1654  	require.Len(nodes, 1)
  1655  	require.Contains(nodes, node.ID)
  1656  
  1657  	// Lookup node with bad SecretID
  1658  	get.SecretID = "foobarbaz"
  1659  	var resp3 structs.NodeClientAllocsResponse
  1660  	err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp3)
  1661  	if err == nil || !strings.Contains(err.Error(), "does not match") {
  1662  		t.Fatalf("err: %v", err)
  1663  	}
  1664  
  1665  	// Lookup non-existing node
  1666  	get.NodeID = uuid.Generate()
  1667  	var resp4 structs.NodeClientAllocsResponse
  1668  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp4); err != nil {
  1669  		t.Fatalf("err: %v", err)
  1670  	}
  1671  	if resp4.Index != 100 {
  1672  		t.Fatalf("Bad index: %d %d", resp3.Index, 100)
  1673  	}
  1674  	if len(resp4.Allocs) != 0 {
  1675  		t.Fatalf("unexpected node %#v", resp3.Allocs)
  1676  	}
  1677  
  1678  	// Close the connection and check that we remove the client connections
  1679  	require.Nil(codec.Close())
  1680  	testutil.WaitForResult(func() (bool, error) {
  1681  		nodes := s1.connectedNodes()
  1682  		return len(nodes) == 0, nil
  1683  	}, func(err error) {
  1684  		t.Fatalf("should have no clients")
  1685  	})
  1686  }
  1687  
  1688  func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
  1689  	t.Parallel()
  1690  	s1 := TestServer(t, nil)
  1691  	defer s1.Shutdown()
  1692  	codec := rpcClient(t, s1)
  1693  	testutil.WaitForLeader(t, s1.RPC)
  1694  
  1695  	// Create the register request
  1696  	node := mock.Node()
  1697  	reg := &structs.NodeRegisterRequest{
  1698  		Node:         node,
  1699  		WriteRequest: structs.WriteRequest{Region: "global"},
  1700  	}
  1701  
  1702  	// Fetch the response
  1703  	var resp structs.GenericResponse
  1704  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1705  		t.Fatalf("err: %v", err)
  1706  	}
  1707  	node.CreateIndex = resp.Index
  1708  	node.ModifyIndex = resp.Index
  1709  
  1710  	// Inject fake evaluations async
  1711  	now := time.Now().UTC().UnixNano()
  1712  	alloc := mock.Alloc()
  1713  	alloc.NodeID = node.ID
  1714  	alloc.ModifyTime = now
  1715  	state := s1.fsm.State()
  1716  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1717  	start := time.Now()
  1718  	time.AfterFunc(100*time.Millisecond, func() {
  1719  		err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1720  		if err != nil {
  1721  			t.Fatalf("err: %v", err)
  1722  		}
  1723  	})
  1724  
  1725  	// Lookup the allocs in a blocking query
  1726  	req := &structs.NodeSpecificRequest{
  1727  		NodeID:   node.ID,
  1728  		SecretID: node.SecretID,
  1729  		QueryOptions: structs.QueryOptions{
  1730  			Region:        "global",
  1731  			MinQueryIndex: 50,
  1732  			MaxQueryTime:  time.Second,
  1733  		},
  1734  	}
  1735  	var resp2 structs.NodeClientAllocsResponse
  1736  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2); err != nil {
  1737  		t.Fatalf("err: %v", err)
  1738  	}
  1739  
  1740  	// Should block at least 100ms
  1741  	if time.Since(start) < 100*time.Millisecond {
  1742  		t.Fatalf("too fast")
  1743  	}
  1744  
  1745  	if resp2.Index != 100 {
  1746  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1747  	}
  1748  
  1749  	if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
  1750  		t.Fatalf("bad: %#v", resp2.Allocs)
  1751  	}
  1752  
  1753  	iter, err := state.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID)
  1754  	if err != nil {
  1755  		t.Fatalf("err: %v", err)
  1756  	}
  1757  
  1758  	getAllocs := func(iter memdb.ResultIterator) []*structs.Allocation {
  1759  		var allocs []*structs.Allocation
  1760  		for {
  1761  			raw := iter.Next()
  1762  			if raw == nil {
  1763  				break
  1764  			}
  1765  			allocs = append(allocs, raw.(*structs.Allocation))
  1766  		}
  1767  		return allocs
  1768  	}
  1769  	out := getAllocs(iter)
  1770  
  1771  	if len(out) != 1 {
  1772  		t.Fatalf("Expected to get one allocation but got:%v", out)
  1773  	}
  1774  
  1775  	if out[0].ModifyTime != now {
  1776  		t.Fatalf("Invalid modify time %v", out[0].ModifyTime)
  1777  	}
  1778  
  1779  	// Alloc updates fire watches
  1780  	time.AfterFunc(100*time.Millisecond, func() {
  1781  		allocUpdate := mock.Alloc()
  1782  		allocUpdate.NodeID = alloc.NodeID
  1783  		allocUpdate.ID = alloc.ID
  1784  		allocUpdate.ClientStatus = structs.AllocClientStatusRunning
  1785  		state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
  1786  		err := state.UpsertAllocs(200, []*structs.Allocation{allocUpdate})
  1787  		if err != nil {
  1788  			t.Fatalf("err: %v", err)
  1789  		}
  1790  	})
  1791  
  1792  	req.QueryOptions.MinQueryIndex = 150
  1793  	var resp3 structs.NodeClientAllocsResponse
  1794  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3); err != nil {
  1795  		t.Fatalf("err: %v", err)
  1796  	}
  1797  
  1798  	if time.Since(start) < 100*time.Millisecond {
  1799  		t.Fatalf("too fast")
  1800  	}
  1801  	if resp3.Index != 200 {
  1802  		t.Fatalf("Bad index: %d %d", resp3.Index, 200)
  1803  	}
  1804  	if len(resp3.Allocs) != 1 || resp3.Allocs[alloc.ID] != 200 {
  1805  		t.Fatalf("bad: %#v", resp3.Allocs)
  1806  	}
  1807  }
  1808  
  1809  func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) {
  1810  	t.Parallel()
  1811  	assert := assert.New(t)
  1812  	s1 := TestServer(t, nil)
  1813  	defer s1.Shutdown()
  1814  	codec := rpcClient(t, s1)
  1815  	testutil.WaitForLeader(t, s1.RPC)
  1816  
  1817  	// Create the register request
  1818  	node := mock.Node()
  1819  	reg := &structs.NodeRegisterRequest{
  1820  		Node:         node,
  1821  		WriteRequest: structs.WriteRequest{Region: "global"},
  1822  	}
  1823  
  1824  	// Fetch the response
  1825  	var resp structs.GenericResponse
  1826  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1827  	node.CreateIndex = resp.Index
  1828  	node.ModifyIndex = resp.Index
  1829  
  1830  	// Inject fake allocations async
  1831  	alloc1 := mock.Alloc()
  1832  	alloc1.NodeID = node.ID
  1833  	alloc2 := mock.Alloc()
  1834  	alloc2.NodeID = node.ID
  1835  	state := s1.fsm.State()
  1836  	state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
  1837  	start := time.Now()
  1838  	time.AfterFunc(100*time.Millisecond, func() {
  1839  		assert.Nil(state.UpsertAllocs(100, []*structs.Allocation{alloc1, alloc2}))
  1840  	})
  1841  
  1842  	// Lookup the allocs in a blocking query
  1843  	req := &structs.NodeSpecificRequest{
  1844  		NodeID:   node.ID,
  1845  		SecretID: node.SecretID,
  1846  		QueryOptions: structs.QueryOptions{
  1847  			Region:        "global",
  1848  			MinQueryIndex: 50,
  1849  			MaxQueryTime:  time.Second,
  1850  		},
  1851  	}
  1852  	var resp2 structs.NodeClientAllocsResponse
  1853  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2))
  1854  
  1855  	// Should block at least 100ms
  1856  	if time.Since(start) < 100*time.Millisecond {
  1857  		t.Fatalf("too fast")
  1858  	}
  1859  
  1860  	assert.EqualValues(100, resp2.Index)
  1861  	if assert.Len(resp2.Allocs, 2) {
  1862  		assert.EqualValues(100, resp2.Allocs[alloc1.ID])
  1863  	}
  1864  
  1865  	// Delete an allocation
  1866  	time.AfterFunc(100*time.Millisecond, func() {
  1867  		assert.Nil(state.DeleteEval(200, nil, []string{alloc2.ID}))
  1868  	})
  1869  
  1870  	req.QueryOptions.MinQueryIndex = 150
  1871  	var resp3 structs.NodeClientAllocsResponse
  1872  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3))
  1873  
  1874  	if time.Since(start) < 100*time.Millisecond {
  1875  		t.Fatalf("too fast")
  1876  	}
  1877  	assert.EqualValues(200, resp3.Index)
  1878  	if assert.Len(resp3.Allocs, 1) {
  1879  		assert.EqualValues(100, resp3.Allocs[alloc1.ID])
  1880  	}
  1881  }
  1882  
  1883  // A MigrateToken should not be created if an allocation shares the same node
  1884  // with its previous allocation
  1885  func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) {
  1886  	t.Parallel()
  1887  	assert := assert.New(t)
  1888  
  1889  	s1 := TestServer(t, nil)
  1890  	defer s1.Shutdown()
  1891  	codec := rpcClient(t, s1)
  1892  	testutil.WaitForLeader(t, s1.RPC)
  1893  
  1894  	// Create the register request
  1895  	node := mock.Node()
  1896  	reg := &structs.NodeRegisterRequest{
  1897  		Node:         node,
  1898  		WriteRequest: structs.WriteRequest{Region: "global"},
  1899  	}
  1900  
  1901  	// Fetch the response
  1902  	var resp structs.GenericResponse
  1903  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1904  		t.Fatalf("err: %v", err)
  1905  	}
  1906  	node.CreateIndex = resp.Index
  1907  	node.ModifyIndex = resp.Index
  1908  
  1909  	// Inject fake evaluations
  1910  	prevAlloc := mock.Alloc()
  1911  	prevAlloc.NodeID = node.ID
  1912  	alloc := mock.Alloc()
  1913  	alloc.NodeID = node.ID
  1914  	alloc.PreviousAllocation = prevAlloc.ID
  1915  	alloc.DesiredStatus = structs.AllocClientStatusComplete
  1916  	state := s1.fsm.State()
  1917  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1918  	err := state.UpsertAllocs(100, []*structs.Allocation{prevAlloc, alloc})
  1919  	assert.Nil(err)
  1920  
  1921  	// Lookup the allocs
  1922  	get := &structs.NodeSpecificRequest{
  1923  		NodeID:       node.ID,
  1924  		SecretID:     node.SecretID,
  1925  		QueryOptions: structs.QueryOptions{Region: "global"},
  1926  	}
  1927  	var resp2 structs.NodeClientAllocsResponse
  1928  
  1929  	err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2)
  1930  	assert.Nil(err)
  1931  
  1932  	assert.Equal(uint64(100), resp2.Index)
  1933  	assert.Equal(2, len(resp2.Allocs))
  1934  	assert.Equal(uint64(100), resp2.Allocs[alloc.ID])
  1935  	assert.Equal(0, len(resp2.MigrateTokens))
  1936  }
  1937  
  1938  func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
  1939  	t.Parallel()
  1940  	s1 := TestServer(t, nil)
  1941  	defer s1.Shutdown()
  1942  	codec := rpcClient(t, s1)
  1943  	testutil.WaitForLeader(t, s1.RPC)
  1944  
  1945  	// Create the register request
  1946  	node := mock.Node()
  1947  	reg := &structs.NodeRegisterRequest{
  1948  		Node:         node,
  1949  		WriteRequest: structs.WriteRequest{Region: "global"},
  1950  	}
  1951  
  1952  	// Fetch the response
  1953  	var resp structs.GenericResponse
  1954  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1955  		t.Fatalf("err: %v", err)
  1956  	}
  1957  	node.CreateIndex = resp.Index
  1958  	node.ModifyIndex = resp.Index
  1959  
  1960  	// Inject fake evaluations async
  1961  	alloc := mock.Alloc()
  1962  	alloc.NodeID = node.ID
  1963  	state := s1.fsm.State()
  1964  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1965  	start := time.Now()
  1966  	time.AfterFunc(100*time.Millisecond, func() {
  1967  		err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1968  		if err != nil {
  1969  			t.Fatalf("err: %v", err)
  1970  		}
  1971  	})
  1972  
  1973  	// Lookup the allocs in a blocking query
  1974  	req := &structs.NodeSpecificRequest{
  1975  		NodeID: node.ID,
  1976  		QueryOptions: structs.QueryOptions{
  1977  			Region:        "global",
  1978  			MinQueryIndex: 50,
  1979  			MaxQueryTime:  time.Second,
  1980  		},
  1981  	}
  1982  	var resp2 structs.NodeAllocsResponse
  1983  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp2); err != nil {
  1984  		t.Fatalf("err: %v", err)
  1985  	}
  1986  
  1987  	// Should block at least 100ms
  1988  	if time.Since(start) < 100*time.Millisecond {
  1989  		t.Fatalf("too fast")
  1990  	}
  1991  
  1992  	if resp2.Index != 100 {
  1993  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1994  	}
  1995  
  1996  	if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
  1997  		t.Fatalf("bad: %#v", resp2.Allocs)
  1998  	}
  1999  
  2000  	// Alloc updates fire watches
  2001  	time.AfterFunc(100*time.Millisecond, func() {
  2002  		allocUpdate := mock.Alloc()
  2003  		allocUpdate.NodeID = alloc.NodeID
  2004  		allocUpdate.ID = alloc.ID
  2005  		allocUpdate.ClientStatus = structs.AllocClientStatusRunning
  2006  		state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
  2007  		err := state.UpdateAllocsFromClient(200, []*structs.Allocation{allocUpdate})
  2008  		if err != nil {
  2009  			t.Fatalf("err: %v", err)
  2010  		}
  2011  	})
  2012  
  2013  	req.QueryOptions.MinQueryIndex = 150
  2014  	var resp3 structs.NodeAllocsResponse
  2015  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp3); err != nil {
  2016  		t.Fatalf("err: %v", err)
  2017  	}
  2018  
  2019  	if time.Since(start) < 100*time.Millisecond {
  2020  		t.Fatalf("too fast")
  2021  	}
  2022  	if resp3.Index != 200 {
  2023  		t.Fatalf("Bad index: %d %d", resp3.Index, 200)
  2024  	}
  2025  	if len(resp3.Allocs) != 1 || resp3.Allocs[0].ClientStatus != structs.AllocClientStatusRunning {
  2026  		t.Fatalf("bad: %#v", resp3.Allocs[0])
  2027  	}
  2028  }
  2029  
  2030  func TestClientEndpoint_UpdateAlloc(t *testing.T) {
  2031  	t.Parallel()
  2032  	s1 := TestServer(t, func(c *Config) {
  2033  		// Disabling scheduling in this test so that we can
  2034  		// ensure that the state store doesn't accumulate more evals
  2035  		// than what we expect the unit test to add
  2036  		c.NumSchedulers = 0
  2037  	})
  2038  
  2039  	defer s1.Shutdown()
  2040  	codec := rpcClient(t, s1)
  2041  	testutil.WaitForLeader(t, s1.RPC)
  2042  	require := require.New(t)
  2043  
  2044  	// Create the register request
  2045  	node := mock.Node()
  2046  	reg := &structs.NodeRegisterRequest{
  2047  		Node:         node,
  2048  		WriteRequest: structs.WriteRequest{Region: "global"},
  2049  	}
  2050  
  2051  	// Fetch the response
  2052  	var resp structs.GenericResponse
  2053  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2054  		t.Fatalf("err: %v", err)
  2055  	}
  2056  
  2057  	state := s1.fsm.State()
  2058  	// Inject mock job
  2059  	job := mock.Job()
  2060  	job.ID = "mytestjob"
  2061  	err := state.UpsertJob(101, job)
  2062  	require.Nil(err)
  2063  
  2064  	// Inject fake allocations
  2065  	alloc := mock.Alloc()
  2066  	alloc.JobID = job.ID
  2067  	alloc.NodeID = node.ID
  2068  	err = state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2069  	require.Nil(err)
  2070  	alloc.TaskGroup = job.TaskGroups[0].Name
  2071  
  2072  	alloc2 := mock.Alloc()
  2073  	alloc2.JobID = job.ID
  2074  	alloc2.NodeID = node.ID
  2075  	err = state.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID))
  2076  	require.Nil(err)
  2077  	alloc2.TaskGroup = job.TaskGroups[0].Name
  2078  
  2079  	err = state.UpsertAllocs(100, []*structs.Allocation{alloc, alloc2})
  2080  	require.Nil(err)
  2081  
  2082  	// Attempt updates of more than one alloc for the same job
  2083  	clientAlloc1 := new(structs.Allocation)
  2084  	*clientAlloc1 = *alloc
  2085  	clientAlloc1.ClientStatus = structs.AllocClientStatusFailed
  2086  
  2087  	clientAlloc2 := new(structs.Allocation)
  2088  	*clientAlloc2 = *alloc2
  2089  	clientAlloc2.ClientStatus = structs.AllocClientStatusFailed
  2090  
  2091  	// Update the alloc
  2092  	update := &structs.AllocUpdateRequest{
  2093  		Alloc:        []*structs.Allocation{clientAlloc1, clientAlloc2},
  2094  		WriteRequest: structs.WriteRequest{Region: "global"},
  2095  	}
  2096  	var resp2 structs.NodeAllocsResponse
  2097  	start := time.Now()
  2098  	err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2)
  2099  	require.Nil(err)
  2100  	require.NotEqual(0, resp2.Index)
  2101  
  2102  	if diff := time.Since(start); diff < batchUpdateInterval {
  2103  		t.Fatalf("too fast: %v", diff)
  2104  	}
  2105  
  2106  	// Lookup the alloc
  2107  	ws := memdb.NewWatchSet()
  2108  	out, err := state.AllocByID(ws, alloc.ID)
  2109  	require.Nil(err)
  2110  	require.Equal(structs.AllocClientStatusFailed, out.ClientStatus)
  2111  	require.True(out.ModifyTime > 0)
  2112  
  2113  	// Assert that exactly one eval with TriggeredBy EvalTriggerRetryFailedAlloc exists
  2114  	evaluations, err := state.EvalsByJob(ws, job.Namespace, job.ID)
  2115  	require.Nil(err)
  2116  	require.True(len(evaluations) != 0)
  2117  	foundCount := 0
  2118  	for _, resultEval := range evaluations {
  2119  		if resultEval.TriggeredBy == structs.EvalTriggerRetryFailedAlloc && resultEval.WaitUntil.IsZero() {
  2120  			foundCount++
  2121  		}
  2122  	}
  2123  	require.Equal(1, foundCount, "Should create exactly one eval for failed allocs")
  2124  
  2125  }
  2126  
  2127  func TestClientEndpoint_BatchUpdate(t *testing.T) {
  2128  	t.Parallel()
  2129  	s1 := TestServer(t, nil)
  2130  	defer s1.Shutdown()
  2131  	codec := rpcClient(t, s1)
  2132  	testutil.WaitForLeader(t, s1.RPC)
  2133  
  2134  	// Create the register request
  2135  	node := mock.Node()
  2136  	reg := &structs.NodeRegisterRequest{
  2137  		Node:         node,
  2138  		WriteRequest: structs.WriteRequest{Region: "global"},
  2139  	}
  2140  
  2141  	// Fetch the response
  2142  	var resp structs.GenericResponse
  2143  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2144  		t.Fatalf("err: %v", err)
  2145  	}
  2146  
  2147  	// Inject fake evaluations
  2148  	alloc := mock.Alloc()
  2149  	alloc.NodeID = node.ID
  2150  	state := s1.fsm.State()
  2151  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2152  	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  2153  	if err != nil {
  2154  		t.Fatalf("err: %v", err)
  2155  	}
  2156  
  2157  	// Attempt update
  2158  	clientAlloc := new(structs.Allocation)
  2159  	*clientAlloc = *alloc
  2160  	clientAlloc.ClientStatus = structs.AllocClientStatusFailed
  2161  
  2162  	// Call to do the batch update
  2163  	bf := structs.NewBatchFuture()
  2164  	endpoint := s1.staticEndpoints.Node
  2165  	endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc}, nil)
  2166  	if err := bf.Wait(); err != nil {
  2167  		t.Fatalf("err: %v", err)
  2168  	}
  2169  	if bf.Index() == 0 {
  2170  		t.Fatalf("Bad index: %d", bf.Index())
  2171  	}
  2172  
  2173  	// Lookup the alloc
  2174  	ws := memdb.NewWatchSet()
  2175  	out, err := state.AllocByID(ws, alloc.ID)
  2176  	if err != nil {
  2177  		t.Fatalf("err: %v", err)
  2178  	}
  2179  	if out.ClientStatus != structs.AllocClientStatusFailed {
  2180  		t.Fatalf("Bad: %#v", out)
  2181  	}
  2182  }
  2183  
  2184  func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) {
  2185  	t.Parallel()
  2186  	s1 := TestServer(t, nil)
  2187  	defer s1.Shutdown()
  2188  	codec := rpcClient(t, s1)
  2189  	testutil.WaitForLeader(t, s1.RPC)
  2190  
  2191  	// Create the register request
  2192  	node := mock.Node()
  2193  	reg := &structs.NodeRegisterRequest{
  2194  		Node:         node,
  2195  		WriteRequest: structs.WriteRequest{Region: "global"},
  2196  	}
  2197  
  2198  	// Fetch the response
  2199  	var resp structs.GenericResponse
  2200  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2201  		t.Fatalf("err: %v", err)
  2202  	}
  2203  
  2204  	// Swap the servers Vault Client
  2205  	tvc := &TestVaultClient{}
  2206  	s1.vault = tvc
  2207  
  2208  	// Inject fake allocation and vault accessor
  2209  	alloc := mock.Alloc()
  2210  	alloc.NodeID = node.ID
  2211  	state := s1.fsm.State()
  2212  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2213  	if err := state.UpsertAllocs(100, []*structs.Allocation{alloc}); err != nil {
  2214  		t.Fatalf("err: %v", err)
  2215  	}
  2216  
  2217  	va := mock.VaultAccessor()
  2218  	va.NodeID = node.ID
  2219  	va.AllocID = alloc.ID
  2220  	if err := state.UpsertVaultAccessor(101, []*structs.VaultAccessor{va}); err != nil {
  2221  		t.Fatalf("err: %v", err)
  2222  	}
  2223  
  2224  	// Inject mock job
  2225  	job := mock.Job()
  2226  	job.ID = alloc.JobID
  2227  	err := state.UpsertJob(101, job)
  2228  	if err != nil {
  2229  		t.Fatalf("err: %v", err)
  2230  	}
  2231  
  2232  	// Attempt update
  2233  	clientAlloc := new(structs.Allocation)
  2234  	*clientAlloc = *alloc
  2235  	clientAlloc.ClientStatus = structs.AllocClientStatusFailed
  2236  
  2237  	// Update the alloc
  2238  	update := &structs.AllocUpdateRequest{
  2239  		Alloc:        []*structs.Allocation{clientAlloc},
  2240  		WriteRequest: structs.WriteRequest{Region: "global"},
  2241  	}
  2242  	var resp2 structs.NodeAllocsResponse
  2243  	start := time.Now()
  2244  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2); err != nil {
  2245  		t.Fatalf("err: %v", err)
  2246  	}
  2247  	if resp2.Index == 0 {
  2248  		t.Fatalf("Bad index: %d", resp2.Index)
  2249  	}
  2250  	if diff := time.Since(start); diff < batchUpdateInterval {
  2251  		t.Fatalf("too fast: %v", diff)
  2252  	}
  2253  
  2254  	// Lookup the alloc
  2255  	ws := memdb.NewWatchSet()
  2256  	out, err := state.AllocByID(ws, alloc.ID)
  2257  	if err != nil {
  2258  		t.Fatalf("err: %v", err)
  2259  	}
  2260  	if out.ClientStatus != structs.AllocClientStatusFailed {
  2261  		t.Fatalf("Bad: %#v", out)
  2262  	}
  2263  
  2264  	if l := len(tvc.RevokedTokens); l != 1 {
  2265  		t.Fatalf("Deregister revoked %d tokens; want 1", l)
  2266  	}
  2267  }
  2268  
  2269  func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
  2270  	t.Parallel()
  2271  	s1 := TestServer(t, nil)
  2272  	defer s1.Shutdown()
  2273  	testutil.WaitForLeader(t, s1.RPC)
  2274  
  2275  	// Inject fake evaluations
  2276  	alloc := mock.Alloc()
  2277  	state := s1.fsm.State()
  2278  	state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
  2279  	if err := state.UpsertAllocs(2, []*structs.Allocation{alloc}); err != nil {
  2280  		t.Fatalf("err: %v", err)
  2281  	}
  2282  
  2283  	// Inject a fake system job.
  2284  	job := mock.SystemJob()
  2285  	if err := state.UpsertJob(3, job); err != nil {
  2286  		t.Fatalf("err: %v", err)
  2287  	}
  2288  
  2289  	// Create some evaluations
  2290  	ids, index, err := s1.staticEndpoints.Node.createNodeEvals(alloc.NodeID, 1)
  2291  	if err != nil {
  2292  		t.Fatalf("err: %v", err)
  2293  	}
  2294  	if index == 0 {
  2295  		t.Fatalf("bad: %d", index)
  2296  	}
  2297  	if len(ids) != 2 {
  2298  		t.Fatalf("bad: %s", ids)
  2299  	}
  2300  
  2301  	// Lookup the evaluations
  2302  	ws := memdb.NewWatchSet()
  2303  	evalByType := make(map[string]*structs.Evaluation, 2)
  2304  	for _, id := range ids {
  2305  		eval, err := state.EvalByID(ws, id)
  2306  		if err != nil {
  2307  			t.Fatalf("err: %v", err)
  2308  		}
  2309  		if eval == nil {
  2310  			t.Fatalf("expected eval")
  2311  		}
  2312  
  2313  		if old, ok := evalByType[eval.Type]; ok {
  2314  			t.Fatalf("multiple evals of the same type: %v and %v", old, eval)
  2315  		}
  2316  
  2317  		evalByType[eval.Type] = eval
  2318  	}
  2319  
  2320  	if len(evalByType) != 2 {
  2321  		t.Fatalf("Expected a service and system job; got %#v", evalByType)
  2322  	}
  2323  
  2324  	// Ensure the evals are correct.
  2325  	for schedType, eval := range evalByType {
  2326  		expPriority := alloc.Job.Priority
  2327  		expJobID := alloc.JobID
  2328  		if schedType == "system" {
  2329  			expPriority = job.Priority
  2330  			expJobID = job.ID
  2331  		}
  2332  
  2333  		if eval.CreateIndex != index {
  2334  			t.Fatalf("CreateIndex mis-match on type %v: %#v", schedType, eval)
  2335  		}
  2336  		if eval.TriggeredBy != structs.EvalTriggerNodeUpdate {
  2337  			t.Fatalf("TriggeredBy incorrect on type %v: %#v", schedType, eval)
  2338  		}
  2339  		if eval.NodeID != alloc.NodeID {
  2340  			t.Fatalf("NodeID incorrect on type %v: %#v", schedType, eval)
  2341  		}
  2342  		if eval.NodeModifyIndex != 1 {
  2343  			t.Fatalf("NodeModifyIndex incorrect on type %v: %#v", schedType, eval)
  2344  		}
  2345  		if eval.Status != structs.EvalStatusPending {
  2346  			t.Fatalf("Status incorrect on type %v: %#v", schedType, eval)
  2347  		}
  2348  		if eval.Priority != expPriority {
  2349  			t.Fatalf("Priority incorrect on type %v: %#v", schedType, eval)
  2350  		}
  2351  		if eval.JobID != expJobID {
  2352  			t.Fatalf("JobID incorrect on type %v: %#v", schedType, eval)
  2353  		}
  2354  	}
  2355  }
  2356  
  2357  func TestClientEndpoint_Evaluate(t *testing.T) {
  2358  	t.Parallel()
  2359  	s1 := TestServer(t, func(c *Config) {
  2360  		c.NumSchedulers = 0 // Prevent automatic dequeue
  2361  	})
  2362  	defer s1.Shutdown()
  2363  	codec := rpcClient(t, s1)
  2364  	testutil.WaitForLeader(t, s1.RPC)
  2365  
  2366  	// Inject fake evaluations
  2367  	alloc := mock.Alloc()
  2368  	node := mock.Node()
  2369  	node.ID = alloc.NodeID
  2370  	state := s1.fsm.State()
  2371  	err := state.UpsertNode(1, node)
  2372  	if err != nil {
  2373  		t.Fatalf("err: %v", err)
  2374  	}
  2375  	state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID))
  2376  	err = state.UpsertAllocs(3, []*structs.Allocation{alloc})
  2377  	if err != nil {
  2378  		t.Fatalf("err: %v", err)
  2379  	}
  2380  
  2381  	// Re-evaluate
  2382  	req := &structs.NodeEvaluateRequest{
  2383  		NodeID:       alloc.NodeID,
  2384  		WriteRequest: structs.WriteRequest{Region: "global"},
  2385  	}
  2386  
  2387  	// Fetch the response
  2388  	var resp structs.NodeUpdateResponse
  2389  	if err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp); err != nil {
  2390  		t.Fatalf("err: %v", err)
  2391  	}
  2392  	if resp.Index == 0 {
  2393  		t.Fatalf("bad index: %d", resp.Index)
  2394  	}
  2395  
  2396  	// Create some evaluations
  2397  	ids := resp.EvalIDs
  2398  	if len(ids) != 1 {
  2399  		t.Fatalf("bad: %s", ids)
  2400  	}
  2401  
  2402  	// Lookup the evaluation
  2403  	ws := memdb.NewWatchSet()
  2404  	eval, err := state.EvalByID(ws, ids[0])
  2405  	if err != nil {
  2406  		t.Fatalf("err: %v", err)
  2407  	}
  2408  	if eval == nil {
  2409  		t.Fatalf("expected eval")
  2410  	}
  2411  	if eval.CreateIndex != resp.Index {
  2412  		t.Fatalf("index mis-match")
  2413  	}
  2414  
  2415  	if eval.Priority != alloc.Job.Priority {
  2416  		t.Fatalf("bad: %#v", eval)
  2417  	}
  2418  	if eval.Type != alloc.Job.Type {
  2419  		t.Fatalf("bad: %#v", eval)
  2420  	}
  2421  	if eval.TriggeredBy != structs.EvalTriggerNodeUpdate {
  2422  		t.Fatalf("bad: %#v", eval)
  2423  	}
  2424  	if eval.JobID != alloc.JobID {
  2425  		t.Fatalf("bad: %#v", eval)
  2426  	}
  2427  	if eval.NodeID != alloc.NodeID {
  2428  		t.Fatalf("bad: %#v", eval)
  2429  	}
  2430  	if eval.NodeModifyIndex != 1 {
  2431  		t.Fatalf("bad: %#v", eval)
  2432  	}
  2433  	if eval.Status != structs.EvalStatusPending {
  2434  		t.Fatalf("bad: %#v", eval)
  2435  	}
  2436  }
  2437  
  2438  func TestClientEndpoint_Evaluate_ACL(t *testing.T) {
  2439  	t.Parallel()
  2440  	s1, root := TestACLServer(t, nil)
  2441  	defer s1.Shutdown()
  2442  	codec := rpcClient(t, s1)
  2443  	testutil.WaitForLeader(t, s1.RPC)
  2444  	assert := assert.New(t)
  2445  
  2446  	// Create the node with an alloc
  2447  	alloc := mock.Alloc()
  2448  	node := mock.Node()
  2449  	node.ID = alloc.NodeID
  2450  	state := s1.fsm.State()
  2451  
  2452  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  2453  	assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary")
  2454  	assert.Nil(state.UpsertAllocs(3, []*structs.Allocation{alloc}), "UpsertAllocs")
  2455  
  2456  	// Create the policy and tokens
  2457  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
  2458  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
  2459  
  2460  	// Re-evaluate without a token and expect failure
  2461  	req := &structs.NodeEvaluateRequest{
  2462  		NodeID:       alloc.NodeID,
  2463  		WriteRequest: structs.WriteRequest{Region: "global"},
  2464  	}
  2465  	{
  2466  		var resp structs.NodeUpdateResponse
  2467  		err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
  2468  		assert.NotNil(err, "RPC")
  2469  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2470  	}
  2471  
  2472  	// Try with a valid token
  2473  	req.AuthToken = validToken.SecretID
  2474  	{
  2475  		var resp structs.NodeUpdateResponse
  2476  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
  2477  	}
  2478  
  2479  	// Try with a invalid token
  2480  	req.AuthToken = invalidToken.SecretID
  2481  	{
  2482  		var resp structs.NodeUpdateResponse
  2483  		err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
  2484  		assert.NotNil(err, "RPC")
  2485  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2486  	}
  2487  
  2488  	// Try with a root token
  2489  	req.AuthToken = root.SecretID
  2490  	{
  2491  		var resp structs.NodeUpdateResponse
  2492  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
  2493  	}
  2494  }
  2495  
  2496  func TestClientEndpoint_ListNodes(t *testing.T) {
  2497  	t.Parallel()
  2498  	s1 := TestServer(t, nil)
  2499  	defer s1.Shutdown()
  2500  	codec := rpcClient(t, s1)
  2501  	testutil.WaitForLeader(t, s1.RPC)
  2502  
  2503  	// Create the register request
  2504  	node := mock.Node()
  2505  	reg := &structs.NodeRegisterRequest{
  2506  		Node:         node,
  2507  		WriteRequest: structs.WriteRequest{Region: "global"},
  2508  	}
  2509  
  2510  	// Fetch the response
  2511  	var resp structs.GenericResponse
  2512  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2513  		t.Fatalf("err: %v", err)
  2514  	}
  2515  	node.CreateIndex = resp.Index
  2516  	node.ModifyIndex = resp.Index
  2517  
  2518  	// Lookup the node
  2519  	get := &structs.NodeListRequest{
  2520  		QueryOptions: structs.QueryOptions{Region: "global"},
  2521  	}
  2522  	var resp2 structs.NodeListResponse
  2523  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2); err != nil {
  2524  		t.Fatalf("err: %v", err)
  2525  	}
  2526  	if resp2.Index != resp.Index {
  2527  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  2528  	}
  2529  
  2530  	if len(resp2.Nodes) != 1 {
  2531  		t.Fatalf("bad: %#v", resp2.Nodes)
  2532  	}
  2533  	if resp2.Nodes[0].ID != node.ID {
  2534  		t.Fatalf("bad: %#v", resp2.Nodes[0])
  2535  	}
  2536  
  2537  	// Lookup the node with prefix
  2538  	get = &structs.NodeListRequest{
  2539  		QueryOptions: structs.QueryOptions{Region: "global", Prefix: node.ID[:4]},
  2540  	}
  2541  	var resp3 structs.NodeListResponse
  2542  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp3); err != nil {
  2543  		t.Fatalf("err: %v", err)
  2544  	}
  2545  	if resp3.Index != resp.Index {
  2546  		t.Fatalf("Bad index: %d %d", resp3.Index, resp2.Index)
  2547  	}
  2548  
  2549  	if len(resp3.Nodes) != 1 {
  2550  		t.Fatalf("bad: %#v", resp3.Nodes)
  2551  	}
  2552  	if resp3.Nodes[0].ID != node.ID {
  2553  		t.Fatalf("bad: %#v", resp3.Nodes[0])
  2554  	}
  2555  }
  2556  
  2557  func TestClientEndpoint_ListNodes_ACL(t *testing.T) {
  2558  	t.Parallel()
  2559  	s1, root := TestACLServer(t, nil)
  2560  	defer s1.Shutdown()
  2561  	codec := rpcClient(t, s1)
  2562  	testutil.WaitForLeader(t, s1.RPC)
  2563  	assert := assert.New(t)
  2564  
  2565  	// Create the node
  2566  	node := mock.Node()
  2567  	state := s1.fsm.State()
  2568  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  2569  
  2570  	// Create the namespace policy and tokens
  2571  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
  2572  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
  2573  
  2574  	// Lookup the node without a token and expect failure
  2575  	req := &structs.NodeListRequest{
  2576  		QueryOptions: structs.QueryOptions{Region: "global"},
  2577  	}
  2578  	{
  2579  		var resp structs.NodeListResponse
  2580  		err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
  2581  		assert.NotNil(err, "RPC")
  2582  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2583  	}
  2584  
  2585  	// Try with a valid token
  2586  	req.AuthToken = validToken.SecretID
  2587  	{
  2588  		var resp structs.NodeListResponse
  2589  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
  2590  		assert.Equal(node.ID, resp.Nodes[0].ID)
  2591  	}
  2592  
  2593  	// Try with a invalid token
  2594  	req.AuthToken = invalidToken.SecretID
  2595  	{
  2596  		var resp structs.NodeListResponse
  2597  		err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
  2598  		assert.NotNil(err, "RPC")
  2599  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2600  	}
  2601  
  2602  	// Try with a root token
  2603  	req.AuthToken = root.SecretID
  2604  	{
  2605  		var resp structs.NodeListResponse
  2606  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
  2607  		assert.Equal(node.ID, resp.Nodes[0].ID)
  2608  	}
  2609  }
  2610  
  2611  func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
  2612  	t.Parallel()
  2613  	s1 := TestServer(t, nil)
  2614  	defer s1.Shutdown()
  2615  	state := s1.fsm.State()
  2616  	codec := rpcClient(t, s1)
  2617  	testutil.WaitForLeader(t, s1.RPC)
  2618  
  2619  	// Disable drainer to prevent drain from completing during test
  2620  	s1.nodeDrainer.SetEnabled(false, nil)
  2621  
  2622  	// Create the node
  2623  	node := mock.Node()
  2624  
  2625  	// Node upsert triggers watches
  2626  	errCh := make(chan error, 1)
  2627  	timer := time.AfterFunc(100*time.Millisecond, func() {
  2628  		errCh <- state.UpsertNode(2, node)
  2629  	})
  2630  	defer timer.Stop()
  2631  
  2632  	req := &structs.NodeListRequest{
  2633  		QueryOptions: structs.QueryOptions{
  2634  			Region:        "global",
  2635  			MinQueryIndex: 1,
  2636  		},
  2637  	}
  2638  	start := time.Now()
  2639  	var resp structs.NodeListResponse
  2640  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp); err != nil {
  2641  		t.Fatalf("err: %v", err)
  2642  	}
  2643  
  2644  	if err := <-errCh; err != nil {
  2645  		t.Fatalf("error from timer: %v", err)
  2646  	}
  2647  
  2648  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2649  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  2650  	}
  2651  	if resp.Index != 2 {
  2652  		t.Fatalf("Bad index: %d %d", resp.Index, 2)
  2653  	}
  2654  	if len(resp.Nodes) != 1 || resp.Nodes[0].ID != node.ID {
  2655  		t.Fatalf("bad: %#v", resp.Nodes)
  2656  	}
  2657  
  2658  	// Node drain updates trigger watches.
  2659  	time.AfterFunc(100*time.Millisecond, func() {
  2660  		s := &structs.DrainStrategy{
  2661  			DrainSpec: structs.DrainSpec{
  2662  				Deadline: 10 * time.Second,
  2663  			},
  2664  		}
  2665  		errCh <- state.UpdateNodeDrain(3, node.ID, s, false, 0, nil)
  2666  	})
  2667  
  2668  	req.MinQueryIndex = 2
  2669  	var resp2 structs.NodeListResponse
  2670  	start = time.Now()
  2671  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp2); err != nil {
  2672  		t.Fatalf("err: %v", err)
  2673  	}
  2674  
  2675  	if err := <-errCh; err != nil {
  2676  		t.Fatalf("error from timer: %v", err)
  2677  	}
  2678  
  2679  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2680  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
  2681  	}
  2682  	if resp2.Index != 3 {
  2683  		t.Fatalf("Bad index: %d %d", resp2.Index, 3)
  2684  	}
  2685  	if len(resp2.Nodes) != 1 || !resp2.Nodes[0].Drain {
  2686  		t.Fatalf("bad: %#v", resp2.Nodes)
  2687  	}
  2688  
  2689  	// Node status update triggers watches
  2690  	time.AfterFunc(100*time.Millisecond, func() {
  2691  		errCh <- state.UpdateNodeStatus(40, node.ID, structs.NodeStatusDown, 0, nil)
  2692  	})
  2693  
  2694  	req.MinQueryIndex = 38
  2695  	var resp3 structs.NodeListResponse
  2696  	start = time.Now()
  2697  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp3); err != nil {
  2698  		t.Fatalf("err: %v", err)
  2699  	}
  2700  
  2701  	if err := <-errCh; err != nil {
  2702  		t.Fatalf("error from timer: %v", err)
  2703  	}
  2704  
  2705  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2706  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp3)
  2707  	}
  2708  	if resp3.Index != 40 {
  2709  		t.Fatalf("Bad index: %d %d", resp3.Index, 40)
  2710  	}
  2711  	if len(resp3.Nodes) != 1 || resp3.Nodes[0].Status != structs.NodeStatusDown {
  2712  		t.Fatalf("bad: %#v", resp3.Nodes)
  2713  	}
  2714  
  2715  	// Node delete triggers watches.
  2716  	time.AfterFunc(100*time.Millisecond, func() {
  2717  		errCh <- state.DeleteNode(50, node.ID)
  2718  	})
  2719  
  2720  	req.MinQueryIndex = 45
  2721  	var resp4 structs.NodeListResponse
  2722  	start = time.Now()
  2723  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp4); err != nil {
  2724  		t.Fatalf("err: %v", err)
  2725  	}
  2726  
  2727  	if err := <-errCh; err != nil {
  2728  		t.Fatalf("error from timer: %v", err)
  2729  	}
  2730  
  2731  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2732  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp4)
  2733  	}
  2734  	if resp4.Index != 50 {
  2735  		t.Fatalf("Bad index: %d %d", resp4.Index, 50)
  2736  	}
  2737  	if len(resp4.Nodes) != 0 {
  2738  		t.Fatalf("bad: %#v", resp4.Nodes)
  2739  	}
  2740  }
  2741  
  2742  func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) {
  2743  	t.Parallel()
  2744  	s1 := TestServer(t, nil)
  2745  	defer s1.Shutdown()
  2746  	state := s1.fsm.State()
  2747  	codec := rpcClient(t, s1)
  2748  	testutil.WaitForLeader(t, s1.RPC)
  2749  
  2750  	// Create the node
  2751  	node := mock.Node()
  2752  	if err := state.UpsertNode(2, node); err != nil {
  2753  		t.Fatalf("err: %v", err)
  2754  	}
  2755  
  2756  	// Create an alloc
  2757  	alloc := mock.Alloc()
  2758  	task := alloc.Job.TaskGroups[0].Tasks[0]
  2759  	tasks := []string{task.Name}
  2760  	if err := state.UpsertAllocs(3, []*structs.Allocation{alloc}); err != nil {
  2761  		t.Fatalf("err: %v", err)
  2762  	}
  2763  
  2764  	req := &structs.DeriveVaultTokenRequest{
  2765  		NodeID:   node.ID,
  2766  		SecretID: uuid.Generate(),
  2767  		AllocID:  alloc.ID,
  2768  		Tasks:    tasks,
  2769  		QueryOptions: structs.QueryOptions{
  2770  			Region: "global",
  2771  		},
  2772  	}
  2773  
  2774  	var resp structs.DeriveVaultTokenResponse
  2775  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2776  		t.Fatalf("bad: %v", err)
  2777  	}
  2778  
  2779  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "SecretID mismatch") {
  2780  		t.Fatalf("Expected SecretID mismatch: %v", resp.Error)
  2781  	}
  2782  
  2783  	// Put the correct SecretID
  2784  	req.SecretID = node.SecretID
  2785  
  2786  	// Now we should get an error about the allocation not running on the node
  2787  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2788  		t.Fatalf("bad: %v", err)
  2789  	}
  2790  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "not running on Node") {
  2791  		t.Fatalf("Expected not running on node error: %v", resp.Error)
  2792  	}
  2793  
  2794  	// Update to be running on the node
  2795  	alloc.NodeID = node.ID
  2796  	if err := state.UpsertAllocs(4, []*structs.Allocation{alloc}); err != nil {
  2797  		t.Fatalf("err: %v", err)
  2798  	}
  2799  
  2800  	// Now we should get an error about the job not needing any Vault secrets
  2801  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2802  		t.Fatalf("bad: %v", err)
  2803  	}
  2804  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "does not require") {
  2805  		t.Fatalf("Expected no policies error: %v", resp.Error)
  2806  	}
  2807  
  2808  	// Update to be terminal
  2809  	alloc.DesiredStatus = structs.AllocDesiredStatusStop
  2810  	if err := state.UpsertAllocs(5, []*structs.Allocation{alloc}); err != nil {
  2811  		t.Fatalf("err: %v", err)
  2812  	}
  2813  
  2814  	// Now we should get an error about the job not needing any Vault secrets
  2815  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2816  		t.Fatalf("bad: %v", err)
  2817  	}
  2818  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "terminal") {
  2819  		t.Fatalf("Expected terminal allocation error: %v", resp.Error)
  2820  	}
  2821  }
  2822  
  2823  func TestClientEndpoint_DeriveVaultToken(t *testing.T) {
  2824  	t.Parallel()
  2825  	s1 := TestServer(t, nil)
  2826  	defer s1.Shutdown()
  2827  	state := s1.fsm.State()
  2828  	codec := rpcClient(t, s1)
  2829  	testutil.WaitForLeader(t, s1.RPC)
  2830  
  2831  	// Enable vault and allow authenticated
  2832  	tr := true
  2833  	s1.config.VaultConfig.Enabled = &tr
  2834  	s1.config.VaultConfig.AllowUnauthenticated = &tr
  2835  
  2836  	// Replace the Vault Client on the server
  2837  	tvc := &TestVaultClient{}
  2838  	s1.vault = tvc
  2839  
  2840  	// Create the node
  2841  	node := mock.Node()
  2842  	if err := state.UpsertNode(2, node); err != nil {
  2843  		t.Fatalf("err: %v", err)
  2844  	}
  2845  
  2846  	// Create an alloc an allocation that has vault policies required
  2847  	alloc := mock.Alloc()
  2848  	alloc.NodeID = node.ID
  2849  	task := alloc.Job.TaskGroups[0].Tasks[0]
  2850  	tasks := []string{task.Name}
  2851  	task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
  2852  	if err := state.UpsertAllocs(3, []*structs.Allocation{alloc}); err != nil {
  2853  		t.Fatalf("err: %v", err)
  2854  	}
  2855  
  2856  	// Return a secret for the task
  2857  	token := uuid.Generate()
  2858  	accessor := uuid.Generate()
  2859  	ttl := 10
  2860  	secret := &vapi.Secret{
  2861  		WrapInfo: &vapi.SecretWrapInfo{
  2862  			Token:           token,
  2863  			WrappedAccessor: accessor,
  2864  			TTL:             ttl,
  2865  		},
  2866  	}
  2867  	tvc.SetCreateTokenSecret(alloc.ID, task.Name, secret)
  2868  
  2869  	req := &structs.DeriveVaultTokenRequest{
  2870  		NodeID:   node.ID,
  2871  		SecretID: node.SecretID,
  2872  		AllocID:  alloc.ID,
  2873  		Tasks:    tasks,
  2874  		QueryOptions: structs.QueryOptions{
  2875  			Region: "global",
  2876  		},
  2877  	}
  2878  
  2879  	var resp structs.DeriveVaultTokenResponse
  2880  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2881  		t.Fatalf("bad: %v", err)
  2882  	}
  2883  	if resp.Error != nil {
  2884  		t.Fatalf("bad: %v", resp.Error)
  2885  	}
  2886  
  2887  	// Check the state store and ensure that we created a VaultAccessor
  2888  	ws := memdb.NewWatchSet()
  2889  	va, err := state.VaultAccessor(ws, accessor)
  2890  	if err != nil {
  2891  		t.Fatalf("bad: %v", err)
  2892  	}
  2893  	if va == nil {
  2894  		t.Fatalf("bad: %v", va)
  2895  	}
  2896  
  2897  	if va.CreateIndex == 0 {
  2898  		t.Fatalf("bad: %v", va)
  2899  	}
  2900  
  2901  	va.CreateIndex = 0
  2902  	expected := &structs.VaultAccessor{
  2903  		AllocID:     alloc.ID,
  2904  		Task:        task.Name,
  2905  		NodeID:      alloc.NodeID,
  2906  		Accessor:    accessor,
  2907  		CreationTTL: ttl,
  2908  	}
  2909  
  2910  	if !reflect.DeepEqual(expected, va) {
  2911  		t.Fatalf("Got %#v; want %#v", va, expected)
  2912  	}
  2913  }
  2914  
  2915  func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) {
  2916  	t.Parallel()
  2917  	s1 := TestServer(t, nil)
  2918  	defer s1.Shutdown()
  2919  	state := s1.fsm.State()
  2920  	codec := rpcClient(t, s1)
  2921  	testutil.WaitForLeader(t, s1.RPC)
  2922  
  2923  	// Enable vault and allow authenticated
  2924  	tr := true
  2925  	s1.config.VaultConfig.Enabled = &tr
  2926  	s1.config.VaultConfig.AllowUnauthenticated = &tr
  2927  
  2928  	// Replace the Vault Client on the server
  2929  	tvc := &TestVaultClient{}
  2930  	s1.vault = tvc
  2931  
  2932  	// Create the node
  2933  	node := mock.Node()
  2934  	if err := state.UpsertNode(2, node); err != nil {
  2935  		t.Fatalf("err: %v", err)
  2936  	}
  2937  
  2938  	// Create an alloc an allocation that has vault policies required
  2939  	alloc := mock.Alloc()
  2940  	alloc.NodeID = node.ID
  2941  	task := alloc.Job.TaskGroups[0].Tasks[0]
  2942  	tasks := []string{task.Name}
  2943  	task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
  2944  	if err := state.UpsertAllocs(3, []*structs.Allocation{alloc}); err != nil {
  2945  		t.Fatalf("err: %v", err)
  2946  	}
  2947  
  2948  	// Return an error when creating the token
  2949  	tvc.SetCreateTokenError(alloc.ID, task.Name,
  2950  		structs.NewRecoverableError(fmt.Errorf("recover"), true))
  2951  
  2952  	req := &structs.DeriveVaultTokenRequest{
  2953  		NodeID:   node.ID,
  2954  		SecretID: node.SecretID,
  2955  		AllocID:  alloc.ID,
  2956  		Tasks:    tasks,
  2957  		QueryOptions: structs.QueryOptions{
  2958  			Region: "global",
  2959  		},
  2960  	}
  2961  
  2962  	var resp structs.DeriveVaultTokenResponse
  2963  	err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp)
  2964  	if err != nil {
  2965  		t.Fatalf("bad: %v", err)
  2966  	}
  2967  	if resp.Error == nil || !resp.Error.IsRecoverable() {
  2968  		t.Fatalf("bad: %+v", resp.Error)
  2969  	}
  2970  }
  2971  
  2972  func TestClientEndpoint_EmitEvents(t *testing.T) {
  2973  	t.Parallel()
  2974  	require := require.New(t)
  2975  
  2976  	s1 := TestServer(t, nil)
  2977  	state := s1.fsm.State()
  2978  	defer s1.Shutdown()
  2979  	codec := rpcClient(t, s1)
  2980  	testutil.WaitForLeader(t, s1.RPC)
  2981  
  2982  	// create a node that we can register our event to
  2983  	node := mock.Node()
  2984  	err := state.UpsertNode(2, node)
  2985  	require.Nil(err)
  2986  
  2987  	nodeEvent := &structs.NodeEvent{
  2988  		Message:   "Registration failed",
  2989  		Subsystem: "Server",
  2990  		Timestamp: time.Now(),
  2991  	}
  2992  
  2993  	nodeEvents := map[string][]*structs.NodeEvent{node.ID: {nodeEvent}}
  2994  	req := structs.EmitNodeEventsRequest{
  2995  		NodeEvents:   nodeEvents,
  2996  		WriteRequest: structs.WriteRequest{Region: "global"},
  2997  	}
  2998  
  2999  	var resp structs.GenericResponse
  3000  	err = msgpackrpc.CallWithCodec(codec, "Node.EmitEvents", &req, &resp)
  3001  	require.Nil(err)
  3002  	require.NotEqual(0, resp.Index)
  3003  
  3004  	// Check for the node in the FSM
  3005  	ws := memdb.NewWatchSet()
  3006  	out, err := state.NodeByID(ws, node.ID)
  3007  	require.Nil(err)
  3008  	require.False(len(out.Events) < 2)
  3009  }