gopkg.in/hashicorp/nomad.v0@v0.11.8/nomad/node_endpoint_test.go (about)

     1  package nomad
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"net"
     7  	"reflect"
     8  	"strings"
     9  	"testing"
    10  	"time"
    11  
    12  	memdb "github.com/hashicorp/go-memdb"
    13  	msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
    14  	"github.com/hashicorp/nomad/acl"
    15  	"github.com/hashicorp/nomad/command/agent/consul"
    16  	"github.com/hashicorp/nomad/helper"
    17  	"github.com/hashicorp/nomad/helper/uuid"
    18  	"github.com/hashicorp/nomad/nomad/mock"
    19  	"github.com/hashicorp/nomad/nomad/state"
    20  	"github.com/hashicorp/nomad/nomad/structs"
    21  	"github.com/hashicorp/nomad/testutil"
    22  	vapi "github.com/hashicorp/vault/api"
    23  	"github.com/kr/pretty"
    24  	"github.com/stretchr/testify/assert"
    25  	"github.com/stretchr/testify/require"
    26  )
    27  
    28  func TestClientEndpoint_Register(t *testing.T) {
    29  	t.Parallel()
    30  	require := require.New(t)
    31  
    32  	s1, cleanupS1 := TestServer(t, nil)
    33  	defer cleanupS1()
    34  	codec := rpcClient(t, s1)
    35  	testutil.WaitForLeader(t, s1.RPC)
    36  
    37  	// Check that we have no client connections
    38  	require.Empty(s1.connectedNodes())
    39  
    40  	// Create the register request
    41  	node := mock.Node()
    42  	req := &structs.NodeRegisterRequest{
    43  		Node:         node,
    44  		WriteRequest: structs.WriteRequest{Region: "global"},
    45  	}
    46  
    47  	// Fetch the response
    48  	var resp structs.GenericResponse
    49  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
    50  		t.Fatalf("err: %v", err)
    51  	}
    52  	if resp.Index == 0 {
    53  		t.Fatalf("bad index: %d", resp.Index)
    54  	}
    55  
    56  	// Check that we have the client connections
    57  	nodes := s1.connectedNodes()
    58  	require.Len(nodes, 1)
    59  	require.Contains(nodes, node.ID)
    60  
    61  	// Check for the node in the FSM
    62  	state := s1.fsm.State()
    63  	ws := memdb.NewWatchSet()
    64  	out, err := state.NodeByID(ws, node.ID)
    65  	if err != nil {
    66  		t.Fatalf("err: %v", err)
    67  	}
    68  	if out == nil {
    69  		t.Fatalf("expected node")
    70  	}
    71  	if out.CreateIndex != resp.Index {
    72  		t.Fatalf("index mis-match")
    73  	}
    74  	if out.ComputedClass == "" {
    75  		t.Fatal("ComputedClass not set")
    76  	}
    77  
    78  	// Close the connection and check that we remove the client connections
    79  	require.Nil(codec.Close())
    80  	testutil.WaitForResult(func() (bool, error) {
    81  		nodes := s1.connectedNodes()
    82  		return len(nodes) == 0, nil
    83  	}, func(err error) {
    84  		t.Fatalf("should have no clients")
    85  	})
    86  }
    87  
    88  // This test asserts that we only track node connections if they are not from
    89  // forwarded RPCs. This is essential otherwise we will think a Yamux session to
    90  // a Nomad server is actually the session to the node.
    91  func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {
    92  	t.Parallel()
    93  	require := require.New(t)
    94  
    95  	s1, cleanupS1 := TestServer(t, func(c *Config) {
    96  		c.BootstrapExpect = 2
    97  	})
    98  
    99  	defer cleanupS1()
   100  	s2, cleanupS2 := TestServer(t, func(c *Config) {
   101  		c.BootstrapExpect = 2
   102  	})
   103  	defer cleanupS2()
   104  	TestJoin(t, s1, s2)
   105  	testutil.WaitForLeader(t, s1.RPC)
   106  	testutil.WaitForLeader(t, s2.RPC)
   107  
   108  	// Determine the non-leader server
   109  	var leader, nonLeader *Server
   110  	if s1.IsLeader() {
   111  		leader = s1
   112  		nonLeader = s2
   113  	} else {
   114  		leader = s2
   115  		nonLeader = s1
   116  	}
   117  
   118  	// Send the requests to the non-leader
   119  	codec := rpcClient(t, nonLeader)
   120  
   121  	// Check that we have no client connections
   122  	require.Empty(nonLeader.connectedNodes())
   123  	require.Empty(leader.connectedNodes())
   124  
   125  	// Create the register request
   126  	node := mock.Node()
   127  	req := &structs.NodeRegisterRequest{
   128  		Node:         node,
   129  		WriteRequest: structs.WriteRequest{Region: "global"},
   130  	}
   131  
   132  	// Fetch the response
   133  	var resp structs.GenericResponse
   134  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
   135  		t.Fatalf("err: %v", err)
   136  	}
   137  	if resp.Index == 0 {
   138  		t.Fatalf("bad index: %d", resp.Index)
   139  	}
   140  
   141  	// Check that we have the client connections on the non leader
   142  	nodes := nonLeader.connectedNodes()
   143  	require.Len(nodes, 1)
   144  	require.Contains(nodes, node.ID)
   145  
   146  	// Check that we have no client connections on the leader
   147  	nodes = leader.connectedNodes()
   148  	require.Empty(nodes)
   149  
   150  	// Check for the node in the FSM
   151  	state := leader.State()
   152  	testutil.WaitForResult(func() (bool, error) {
   153  		out, err := state.NodeByID(nil, node.ID)
   154  		if err != nil {
   155  			return false, err
   156  		}
   157  		if out == nil {
   158  			return false, fmt.Errorf("expected node")
   159  		}
   160  		if out.CreateIndex != resp.Index {
   161  			return false, fmt.Errorf("index mis-match")
   162  		}
   163  		if out.ComputedClass == "" {
   164  			return false, fmt.Errorf("ComputedClass not set")
   165  		}
   166  
   167  		return true, nil
   168  	}, func(err error) {
   169  		t.Fatalf("err: %v", err)
   170  	})
   171  
   172  	// Close the connection and check that we remove the client connections
   173  	require.Nil(codec.Close())
   174  	testutil.WaitForResult(func() (bool, error) {
   175  		nodes := nonLeader.connectedNodes()
   176  		return len(nodes) == 0, nil
   177  	}, func(err error) {
   178  		t.Fatalf("should have no clients")
   179  	})
   180  }
   181  
   182  func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
   183  	t.Parallel()
   184  
   185  	s1, cleanupS1 := TestServer(t, nil)
   186  	defer cleanupS1()
   187  	codec := rpcClient(t, s1)
   188  	testutil.WaitForLeader(t, s1.RPC)
   189  
   190  	// Create the register request
   191  	node := mock.Node()
   192  	req := &structs.NodeRegisterRequest{
   193  		Node:         node,
   194  		WriteRequest: structs.WriteRequest{Region: "global"},
   195  	}
   196  
   197  	// Fetch the response
   198  	var resp structs.GenericResponse
   199  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
   200  		t.Fatalf("err: %v", err)
   201  	}
   202  
   203  	// Update the nodes SecretID
   204  	node.SecretID = uuid.Generate()
   205  	err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
   206  	if err == nil || !strings.Contains(err.Error(), "Not registering") {
   207  		t.Fatalf("Expecting error regarding mismatching secret id: %v", err)
   208  	}
   209  }
   210  
   211  // Test the deprecated single node deregistration path
   212  func TestClientEndpoint_DeregisterOne(t *testing.T) {
   213  	t.Parallel()
   214  
   215  	s1, cleanupS1 := TestServer(t, nil)
   216  	defer cleanupS1()
   217  	codec := rpcClient(t, s1)
   218  	testutil.WaitForLeader(t, s1.RPC)
   219  
   220  	// Create the register request
   221  	node := mock.Node()
   222  	reg := &structs.NodeRegisterRequest{
   223  		Node:         node,
   224  		WriteRequest: structs.WriteRequest{Region: "global"},
   225  	}
   226  
   227  	// Fetch the response
   228  	var resp structs.GenericResponse
   229  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   230  		t.Fatalf("err: %v", err)
   231  	}
   232  
   233  	// Deregister
   234  	dereg := &structs.NodeDeregisterRequest{
   235  		NodeID:       node.ID,
   236  		WriteRequest: structs.WriteRequest{Region: "global"},
   237  	}
   238  	var resp2 structs.GenericResponse
   239  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp2); err != nil {
   240  		t.Fatalf("err: %v", err)
   241  	}
   242  	if resp2.Index == 0 {
   243  		t.Fatalf("bad index: %d", resp2.Index)
   244  	}
   245  
   246  	// Check for the node in the FSM
   247  	state := s1.fsm.State()
   248  	ws := memdb.NewWatchSet()
   249  	out, err := state.NodeByID(ws, node.ID)
   250  	if err != nil {
   251  		t.Fatalf("err: %v", err)
   252  	}
   253  	if out != nil {
   254  		t.Fatalf("unexpected node")
   255  	}
   256  }
   257  
   258  func TestClientEndpoint_Deregister_ACL(t *testing.T) {
   259  	t.Parallel()
   260  
   261  	s1, root, cleanupS1 := TestACLServer(t, nil)
   262  	defer cleanupS1()
   263  	codec := rpcClient(t, s1)
   264  	testutil.WaitForLeader(t, s1.RPC)
   265  
   266  	// Create the node
   267  	node := mock.Node()
   268  	node1 := mock.Node()
   269  	state := s1.fsm.State()
   270  	if err := state.UpsertNode(1, node); err != nil {
   271  		t.Fatalf("err: %v", err)
   272  	}
   273  	if err := state.UpsertNode(2, node1); err != nil {
   274  		t.Fatalf("err: %v", err)
   275  	}
   276  
   277  	// Create the policy and tokens
   278  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
   279  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
   280  
   281  	// Deregister without any token and expect it to fail
   282  	dereg := &structs.NodeBatchDeregisterRequest{
   283  		NodeIDs:      []string{node.ID},
   284  		WriteRequest: structs.WriteRequest{Region: "global"},
   285  	}
   286  	var resp structs.GenericResponse
   287  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err == nil {
   288  		t.Fatalf("node de-register succeeded")
   289  	}
   290  
   291  	// Deregister with a valid token
   292  	dereg.AuthToken = validToken.SecretID
   293  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err != nil {
   294  		t.Fatalf("err: %v", err)
   295  	}
   296  
   297  	// Check for the node in the FSM
   298  	ws := memdb.NewWatchSet()
   299  	out, err := state.NodeByID(ws, node.ID)
   300  	if err != nil {
   301  		t.Fatalf("err: %v", err)
   302  	}
   303  	if out != nil {
   304  		t.Fatalf("unexpected node")
   305  	}
   306  
   307  	// Deregister with an invalid token.
   308  	dereg1 := &structs.NodeBatchDeregisterRequest{
   309  		NodeIDs:      []string{node1.ID},
   310  		WriteRequest: structs.WriteRequest{Region: "global"},
   311  	}
   312  	dereg1.AuthToken = invalidToken.SecretID
   313  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err == nil {
   314  		t.Fatalf("rpc should not have succeeded")
   315  	}
   316  
   317  	// Try with a root token
   318  	dereg1.AuthToken = root.SecretID
   319  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err != nil {
   320  		t.Fatalf("err: %v", err)
   321  	}
   322  }
   323  
   324  func TestClientEndpoint_Deregister_Vault(t *testing.T) {
   325  	t.Parallel()
   326  
   327  	s1, cleanupS1 := TestServer(t, nil)
   328  	defer cleanupS1()
   329  	codec := rpcClient(t, s1)
   330  	testutil.WaitForLeader(t, s1.RPC)
   331  
   332  	// Create the register request
   333  	node := mock.Node()
   334  	reg := &structs.NodeRegisterRequest{
   335  		Node:         node,
   336  		WriteRequest: structs.WriteRequest{Region: "global"},
   337  	}
   338  
   339  	// Fetch the response
   340  	var resp structs.GenericResponse
   341  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   342  		t.Fatalf("err: %v", err)
   343  	}
   344  
   345  	// Swap the servers Vault Client
   346  	tvc := &TestVaultClient{}
   347  	s1.vault = tvc
   348  
   349  	// Put some Vault accessors in the state store for that node
   350  	state := s1.fsm.State()
   351  	va1 := mock.VaultAccessor()
   352  	va1.NodeID = node.ID
   353  	va2 := mock.VaultAccessor()
   354  	va2.NodeID = node.ID
   355  	state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
   356  
   357  	// Deregister
   358  	dereg := &structs.NodeBatchDeregisterRequest{
   359  		NodeIDs:      []string{node.ID},
   360  		WriteRequest: structs.WriteRequest{Region: "global"},
   361  	}
   362  	var resp2 structs.GenericResponse
   363  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp2); err != nil {
   364  		t.Fatalf("err: %v", err)
   365  	}
   366  	if resp2.Index == 0 {
   367  		t.Fatalf("bad index: %d", resp2.Index)
   368  	}
   369  
   370  	// Check for the node in the FSM
   371  	ws := memdb.NewWatchSet()
   372  	out, err := state.NodeByID(ws, node.ID)
   373  	if err != nil {
   374  		t.Fatalf("err: %v", err)
   375  	}
   376  	if out != nil {
   377  		t.Fatalf("unexpected node")
   378  	}
   379  
   380  	// Check that the endpoint revoked the tokens
   381  	if l := len(tvc.RevokedTokens); l != 2 {
   382  		t.Fatalf("Deregister revoked %d tokens; want 2", l)
   383  	}
   384  }
   385  
   386  func TestClientEndpoint_UpdateStatus(t *testing.T) {
   387  	t.Parallel()
   388  	require := require.New(t)
   389  
   390  	s1, cleanupS1 := TestServer(t, nil)
   391  	defer cleanupS1()
   392  	codec := rpcClient(t, s1)
   393  	testutil.WaitForLeader(t, s1.RPC)
   394  
   395  	// Check that we have no client connections
   396  	require.Empty(s1.connectedNodes())
   397  
   398  	// Create the register request
   399  	node := mock.Node()
   400  	reg := &structs.NodeRegisterRequest{
   401  		Node:         node,
   402  		WriteRequest: structs.WriteRequest{Region: "global"},
   403  	}
   404  
   405  	// Fetch the response
   406  	var resp structs.NodeUpdateResponse
   407  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   408  		t.Fatalf("err: %v", err)
   409  	}
   410  
   411  	// Check for heartbeat interval
   412  	ttl := resp.HeartbeatTTL
   413  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   414  		t.Fatalf("bad: %#v", ttl)
   415  	}
   416  
   417  	// Update the status
   418  	dereg := &structs.NodeUpdateStatusRequest{
   419  		NodeID:       node.ID,
   420  		Status:       structs.NodeStatusInit,
   421  		WriteRequest: structs.WriteRequest{Region: "global"},
   422  	}
   423  	var resp2 structs.NodeUpdateResponse
   424  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   425  		t.Fatalf("err: %v", err)
   426  	}
   427  	if resp2.Index == 0 {
   428  		t.Fatalf("bad index: %d", resp2.Index)
   429  	}
   430  
   431  	// Check for heartbeat interval
   432  	ttl = resp2.HeartbeatTTL
   433  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   434  		t.Fatalf("bad: %#v", ttl)
   435  	}
   436  
   437  	// Check that we have the client connections
   438  	nodes := s1.connectedNodes()
   439  	require.Len(nodes, 1)
   440  	require.Contains(nodes, node.ID)
   441  
   442  	// Check for the node in the FSM
   443  	state := s1.fsm.State()
   444  	ws := memdb.NewWatchSet()
   445  	out, err := state.NodeByID(ws, node.ID)
   446  	if err != nil {
   447  		t.Fatalf("err: %v", err)
   448  	}
   449  	if out == nil {
   450  		t.Fatalf("expected node")
   451  	}
   452  	if out.ModifyIndex != resp2.Index {
   453  		t.Fatalf("index mis-match")
   454  	}
   455  
   456  	// Close the connection and check that we remove the client connections
   457  	require.Nil(codec.Close())
   458  	testutil.WaitForResult(func() (bool, error) {
   459  		nodes := s1.connectedNodes()
   460  		return len(nodes) == 0, nil
   461  	}, func(err error) {
   462  		t.Fatalf("should have no clients")
   463  	})
   464  }
   465  
   466  func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) {
   467  	t.Parallel()
   468  
   469  	s1, cleanupS1 := TestServer(t, nil)
   470  	defer cleanupS1()
   471  	codec := rpcClient(t, s1)
   472  	testutil.WaitForLeader(t, s1.RPC)
   473  
   474  	// Create the register request
   475  	node := mock.Node()
   476  	reg := &structs.NodeRegisterRequest{
   477  		Node:         node,
   478  		WriteRequest: structs.WriteRequest{Region: "global"},
   479  	}
   480  
   481  	// Fetch the response
   482  	var resp structs.NodeUpdateResponse
   483  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   484  		t.Fatalf("err: %v", err)
   485  	}
   486  
   487  	// Check for heartbeat interval
   488  	ttl := resp.HeartbeatTTL
   489  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   490  		t.Fatalf("bad: %#v", ttl)
   491  	}
   492  
   493  	// Swap the servers Vault Client
   494  	tvc := &TestVaultClient{}
   495  	s1.vault = tvc
   496  
   497  	// Put some Vault accessors in the state store for that node
   498  	state := s1.fsm.State()
   499  	va1 := mock.VaultAccessor()
   500  	va1.NodeID = node.ID
   501  	va2 := mock.VaultAccessor()
   502  	va2.NodeID = node.ID
   503  	state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
   504  
   505  	// Update the status to be down
   506  	dereg := &structs.NodeUpdateStatusRequest{
   507  		NodeID:       node.ID,
   508  		Status:       structs.NodeStatusDown,
   509  		WriteRequest: structs.WriteRequest{Region: "global"},
   510  	}
   511  	var resp2 structs.NodeUpdateResponse
   512  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   513  		t.Fatalf("err: %v", err)
   514  	}
   515  	if resp2.Index == 0 {
   516  		t.Fatalf("bad index: %d", resp2.Index)
   517  	}
   518  
   519  	// Check that the endpoint revoked the tokens
   520  	if l := len(tvc.RevokedTokens); l != 2 {
   521  		t.Fatalf("Deregister revoked %d tokens; want 2", l)
   522  	}
   523  }
   524  
   525  func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) {
   526  	t.Parallel()
   527  	require := require.New(t)
   528  
   529  	s1, cleanupS1 := TestServer(t, nil)
   530  	defer cleanupS1()
   531  	codec := rpcClient(t, s1)
   532  	testutil.WaitForLeader(t, s1.RPC)
   533  
   534  	// Check that we have no client connections
   535  	require.Empty(s1.connectedNodes())
   536  
   537  	// Create the register request but make the node down
   538  	node := mock.Node()
   539  	node.Status = structs.NodeStatusDown
   540  	reg := &structs.NodeRegisterRequest{
   541  		Node:         node,
   542  		WriteRequest: structs.WriteRequest{Region: "global"},
   543  	}
   544  
   545  	// Fetch the response
   546  	var resp structs.NodeUpdateResponse
   547  	require.NoError(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
   548  
   549  	// Update the status
   550  	dereg := &structs.NodeUpdateStatusRequest{
   551  		NodeID:       node.ID,
   552  		Status:       structs.NodeStatusInit,
   553  		WriteRequest: structs.WriteRequest{Region: "global"},
   554  	}
   555  	var resp2 structs.NodeUpdateResponse
   556  	require.NoError(msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2))
   557  	require.NotZero(resp2.Index)
   558  
   559  	// Check for heartbeat interval
   560  	ttl := resp2.HeartbeatTTL
   561  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   562  		t.Fatalf("bad: %#v", ttl)
   563  	}
   564  
   565  	// Check for the node in the FSM
   566  	state := s1.fsm.State()
   567  	ws := memdb.NewWatchSet()
   568  	out, err := state.NodeByID(ws, node.ID)
   569  	require.NoError(err)
   570  	require.NotNil(out)
   571  	require.EqualValues(resp2.Index, out.ModifyIndex)
   572  	require.Len(out.Events, 2)
   573  	require.Equal(NodeHeartbeatEventReregistered, out.Events[1].Message)
   574  }
   575  
   576  func TestClientEndpoint_Register_GetEvals(t *testing.T) {
   577  	t.Parallel()
   578  
   579  	s1, cleanupS1 := TestServer(t, nil)
   580  	defer cleanupS1()
   581  	codec := rpcClient(t, s1)
   582  	testutil.WaitForLeader(t, s1.RPC)
   583  
   584  	// Register a system job.
   585  	job := mock.SystemJob()
   586  	state := s1.fsm.State()
   587  	if err := state.UpsertJob(1, job); err != nil {
   588  		t.Fatalf("err: %v", err)
   589  	}
   590  
   591  	// Create the register request going directly to ready
   592  	node := mock.Node()
   593  	node.Status = structs.NodeStatusReady
   594  	reg := &structs.NodeRegisterRequest{
   595  		Node:         node,
   596  		WriteRequest: structs.WriteRequest{Region: "global"},
   597  	}
   598  
   599  	// Fetch the response
   600  	var resp structs.NodeUpdateResponse
   601  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   602  		t.Fatalf("err: %v", err)
   603  	}
   604  
   605  	// Check for heartbeat interval
   606  	ttl := resp.HeartbeatTTL
   607  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   608  		t.Fatalf("bad: %#v", ttl)
   609  	}
   610  
   611  	// Check for an eval caused by the system job.
   612  	if len(resp.EvalIDs) != 1 {
   613  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   614  	}
   615  
   616  	evalID := resp.EvalIDs[0]
   617  	ws := memdb.NewWatchSet()
   618  	eval, err := state.EvalByID(ws, evalID)
   619  	if err != nil {
   620  		t.Fatalf("could not get eval %v", evalID)
   621  	}
   622  
   623  	if eval.Type != "system" {
   624  		t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
   625  	}
   626  
   627  	// Check for the node in the FSM
   628  	out, err := state.NodeByID(ws, node.ID)
   629  	if err != nil {
   630  		t.Fatalf("err: %v", err)
   631  	}
   632  	if out == nil {
   633  		t.Fatalf("expected node")
   634  	}
   635  	if out.ModifyIndex != resp.Index {
   636  		t.Fatalf("index mis-match")
   637  	}
   638  
   639  	// Transition it to down and then ready
   640  	node.Status = structs.NodeStatusDown
   641  	reg = &structs.NodeRegisterRequest{
   642  		Node:         node,
   643  		WriteRequest: structs.WriteRequest{Region: "global"},
   644  	}
   645  
   646  	// Fetch the response
   647  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   648  		t.Fatalf("err: %v", err)
   649  	}
   650  
   651  	if len(resp.EvalIDs) != 1 {
   652  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   653  	}
   654  
   655  	node.Status = structs.NodeStatusReady
   656  	reg = &structs.NodeRegisterRequest{
   657  		Node:         node,
   658  		WriteRequest: structs.WriteRequest{Region: "global"},
   659  	}
   660  
   661  	// Fetch the response
   662  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   663  		t.Fatalf("err: %v", err)
   664  	}
   665  
   666  	if len(resp.EvalIDs) != 1 {
   667  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   668  	}
   669  }
   670  
   671  func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
   672  	t.Parallel()
   673  
   674  	s1, cleanupS1 := TestServer(t, nil)
   675  	defer cleanupS1()
   676  	codec := rpcClient(t, s1)
   677  	testutil.WaitForLeader(t, s1.RPC)
   678  
   679  	// Register a system job.
   680  	job := mock.SystemJob()
   681  	state := s1.fsm.State()
   682  	if err := state.UpsertJob(1, job); err != nil {
   683  		t.Fatalf("err: %v", err)
   684  	}
   685  
   686  	// Create the register request
   687  	node := mock.Node()
   688  	node.Status = structs.NodeStatusInit
   689  	reg := &structs.NodeRegisterRequest{
   690  		Node:         node,
   691  		WriteRequest: structs.WriteRequest{Region: "global"},
   692  	}
   693  
   694  	// Fetch the response
   695  	var resp structs.NodeUpdateResponse
   696  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   697  		t.Fatalf("err: %v", err)
   698  	}
   699  
   700  	// Check for heartbeat interval
   701  	ttl := resp.HeartbeatTTL
   702  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   703  		t.Fatalf("bad: %#v", ttl)
   704  	}
   705  
   706  	// Update the status
   707  	update := &structs.NodeUpdateStatusRequest{
   708  		NodeID:       node.ID,
   709  		Status:       structs.NodeStatusReady,
   710  		WriteRequest: structs.WriteRequest{Region: "global"},
   711  	}
   712  	var resp2 structs.NodeUpdateResponse
   713  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", update, &resp2); err != nil {
   714  		t.Fatalf("err: %v", err)
   715  	}
   716  	if resp2.Index == 0 {
   717  		t.Fatalf("bad index: %d", resp2.Index)
   718  	}
   719  
   720  	// Check for an eval caused by the system job.
   721  	if len(resp2.EvalIDs) != 1 {
   722  		t.Fatalf("expected one eval; got %#v", resp2.EvalIDs)
   723  	}
   724  
   725  	evalID := resp2.EvalIDs[0]
   726  	ws := memdb.NewWatchSet()
   727  	eval, err := state.EvalByID(ws, evalID)
   728  	if err != nil {
   729  		t.Fatalf("could not get eval %v", evalID)
   730  	}
   731  
   732  	if eval.Type != "system" {
   733  		t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
   734  	}
   735  
   736  	// Check for heartbeat interval
   737  	ttl = resp2.HeartbeatTTL
   738  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   739  		t.Fatalf("bad: %#v", ttl)
   740  	}
   741  
   742  	// Check for the node in the FSM
   743  	out, err := state.NodeByID(ws, node.ID)
   744  	if err != nil {
   745  		t.Fatalf("err: %v", err)
   746  	}
   747  	if out == nil {
   748  		t.Fatalf("expected node")
   749  	}
   750  	if out.ModifyIndex != resp2.Index {
   751  		t.Fatalf("index mis-match")
   752  	}
   753  }
   754  
   755  func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
   756  	t.Parallel()
   757  
   758  	s1, cleanupS1 := TestServer(t, func(c *Config) {
   759  		c.BootstrapExpect = 3
   760  	})
   761  	defer cleanupS1()
   762  
   763  	s2, cleanupS2 := TestServer(t, func(c *Config) {
   764  		c.BootstrapExpect = 3
   765  	})
   766  	defer cleanupS2()
   767  
   768  	s3, cleanupS3 := TestServer(t, func(c *Config) {
   769  		c.BootstrapExpect = 3
   770  	})
   771  	defer cleanupS3()
   772  	servers := []*Server{s1, s2, s3}
   773  	TestJoin(t, s1, s2, s3)
   774  
   775  	for _, s := range servers {
   776  		testutil.WaitForResult(func() (bool, error) {
   777  			peers, _ := s.numPeers()
   778  			return peers == 3, nil
   779  		}, func(err error) {
   780  			t.Fatalf("should have 3 peers")
   781  		})
   782  	}
   783  
   784  	codec := rpcClient(t, s1)
   785  	testutil.WaitForLeader(t, s1.RPC)
   786  
   787  	// Create the register request
   788  	node := mock.Node()
   789  	reg := &structs.NodeRegisterRequest{
   790  		Node:         node,
   791  		WriteRequest: structs.WriteRequest{Region: "global"},
   792  	}
   793  
   794  	// Fetch the response
   795  	var resp structs.NodeUpdateResponse
   796  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   797  		t.Fatalf("err: %v", err)
   798  	}
   799  
   800  	// Check for heartbeat interval
   801  	ttl := resp.HeartbeatTTL
   802  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   803  		t.Fatalf("bad: %#v", ttl)
   804  	}
   805  
   806  	// Check for heartbeat servers
   807  	serverAddrs := resp.Servers
   808  	if len(serverAddrs) == 0 {
   809  		t.Fatalf("bad: %#v", serverAddrs)
   810  	}
   811  
   812  	// Update the status, static state
   813  	dereg := &structs.NodeUpdateStatusRequest{
   814  		NodeID:       node.ID,
   815  		Status:       node.Status,
   816  		WriteRequest: structs.WriteRequest{Region: "global"},
   817  	}
   818  	var resp2 structs.NodeUpdateResponse
   819  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   820  		t.Fatalf("err: %v", err)
   821  	}
   822  	if resp2.Index != 0 {
   823  		t.Fatalf("bad index: %d", resp2.Index)
   824  	}
   825  
   826  	// Check for heartbeat interval
   827  	ttl = resp2.HeartbeatTTL
   828  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   829  		t.Fatalf("bad: %#v", ttl)
   830  	}
   831  }
   832  
   833  func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) {
   834  	t.Parallel()
   835  	require := require.New(t)
   836  
   837  	advAddr := "127.0.1.1:1234"
   838  	adv, err := net.ResolveTCPAddr("tcp", advAddr)
   839  	require.Nil(err)
   840  
   841  	s1, cleanupS1 := TestServer(t, func(c *Config) {
   842  		c.ClientRPCAdvertise = adv
   843  	})
   844  	defer cleanupS1()
   845  	codec := rpcClient(t, s1)
   846  	testutil.WaitForLeader(t, s1.RPC)
   847  
   848  	// Create the register request
   849  	node := mock.Node()
   850  	reg := &structs.NodeRegisterRequest{
   851  		Node:         node,
   852  		WriteRequest: structs.WriteRequest{Region: "global"},
   853  	}
   854  
   855  	// Fetch the response
   856  	var resp structs.NodeUpdateResponse
   857  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   858  		t.Fatalf("err: %v", err)
   859  	}
   860  
   861  	// Check for heartbeat interval
   862  	ttl := resp.HeartbeatTTL
   863  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   864  		t.Fatalf("bad: %#v", ttl)
   865  	}
   866  
   867  	// Check for heartbeat servers
   868  	require.Len(resp.Servers, 1)
   869  	require.Equal(resp.Servers[0].RPCAdvertiseAddr, advAddr)
   870  }
   871  
   872  func TestClientEndpoint_UpdateDrain(t *testing.T) {
   873  	t.Parallel()
   874  	require := require.New(t)
   875  
   876  	s1, cleanupS1 := TestServer(t, nil)
   877  	defer cleanupS1()
   878  	codec := rpcClient(t, s1)
   879  	testutil.WaitForLeader(t, s1.RPC)
   880  
   881  	// Disable drainer to prevent drain from completing during test
   882  	s1.nodeDrainer.SetEnabled(false, nil)
   883  
   884  	// Create the register request
   885  	node := mock.Node()
   886  	reg := &structs.NodeRegisterRequest{
   887  		Node:         node,
   888  		WriteRequest: structs.WriteRequest{Region: "global"},
   889  	}
   890  
   891  	// Fetch the response
   892  	var resp structs.NodeUpdateResponse
   893  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
   894  
   895  	beforeUpdate := time.Now()
   896  	strategy := &structs.DrainStrategy{
   897  		DrainSpec: structs.DrainSpec{
   898  			Deadline: 10 * time.Second,
   899  		},
   900  	}
   901  
   902  	// Update the status
   903  	dereg := &structs.NodeUpdateDrainRequest{
   904  		NodeID:        node.ID,
   905  		DrainStrategy: strategy,
   906  		WriteRequest:  structs.WriteRequest{Region: "global"},
   907  	}
   908  	var resp2 structs.NodeDrainUpdateResponse
   909  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
   910  	require.NotZero(resp2.Index)
   911  
   912  	// Check for the node in the FSM
   913  	state := s1.fsm.State()
   914  	ws := memdb.NewWatchSet()
   915  	out, err := state.NodeByID(ws, node.ID)
   916  	require.Nil(err)
   917  	require.True(out.Drain)
   918  	require.Equal(strategy.Deadline, out.DrainStrategy.Deadline)
   919  	require.Len(out.Events, 2)
   920  	require.Equal(NodeDrainEventDrainSet, out.Events[1].Message)
   921  
   922  	// before+deadline should be before the forced deadline
   923  	require.True(beforeUpdate.Add(strategy.Deadline).Before(out.DrainStrategy.ForceDeadline))
   924  
   925  	// now+deadline should be after the forced deadline
   926  	require.True(time.Now().Add(strategy.Deadline).After(out.DrainStrategy.ForceDeadline))
   927  
   928  	drainStartedAt := out.DrainStrategy.StartedAt
   929  	// StartedAt should be close to the time the drain started
   930  	require.WithinDuration(beforeUpdate, drainStartedAt, 1*time.Second)
   931  
   932  	// StartedAt shouldn't change if a new request comes while still draining
   933  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
   934  	ws = memdb.NewWatchSet()
   935  	out, err = state.NodeByID(ws, node.ID)
   936  	require.NoError(err)
   937  	require.True(out.DrainStrategy.StartedAt.Equal(drainStartedAt))
   938  
   939  	// Register a system job
   940  	job := mock.SystemJob()
   941  	require.Nil(s1.State().UpsertJob(10, job))
   942  
   943  	// Update the eligibility and expect evals
   944  	dereg.DrainStrategy = nil
   945  	dereg.MarkEligible = true
   946  	var resp3 structs.NodeDrainUpdateResponse
   947  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
   948  	require.NotZero(resp3.Index)
   949  	require.NotZero(resp3.EvalCreateIndex)
   950  	require.Len(resp3.EvalIDs, 1)
   951  
   952  	// Check for updated node in the FSM
   953  	ws = memdb.NewWatchSet()
   954  	out, err = state.NodeByID(ws, node.ID)
   955  	require.NoError(err)
   956  	require.Len(out.Events, 4)
   957  	require.Equal(NodeDrainEventDrainDisabled, out.Events[3].Message)
   958  
   959  	// Check that calling UpdateDrain with the same DrainStrategy does not emit
   960  	// a node event.
   961  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
   962  	ws = memdb.NewWatchSet()
   963  	out, err = state.NodeByID(ws, node.ID)
   964  	require.NoError(err)
   965  	require.Len(out.Events, 4)
   966  }
   967  
   968  func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) {
   969  	t.Parallel()
   970  
   971  	s1, root, cleanupS1 := TestACLServer(t, nil)
   972  	defer cleanupS1()
   973  	codec := rpcClient(t, s1)
   974  	testutil.WaitForLeader(t, s1.RPC)
   975  	require := require.New(t)
   976  
   977  	// Create the node
   978  	node := mock.Node()
   979  	state := s1.fsm.State()
   980  
   981  	require.Nil(state.UpsertNode(1, node), "UpsertNode")
   982  
   983  	// Create the policy and tokens
   984  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
   985  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
   986  
   987  	// Update the status without a token and expect failure
   988  	dereg := &structs.NodeUpdateDrainRequest{
   989  		NodeID: node.ID,
   990  		DrainStrategy: &structs.DrainStrategy{
   991  			DrainSpec: structs.DrainSpec{
   992  				Deadline: 10 * time.Second,
   993  			},
   994  		},
   995  		WriteRequest: structs.WriteRequest{Region: "global"},
   996  	}
   997  	{
   998  		var resp structs.NodeDrainUpdateResponse
   999  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
  1000  		require.NotNil(err, "RPC")
  1001  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1002  	}
  1003  
  1004  	// Try with a valid token
  1005  	dereg.AuthToken = validToken.SecretID
  1006  	{
  1007  		var resp structs.NodeDrainUpdateResponse
  1008  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
  1009  	}
  1010  
  1011  	// Try with a invalid token
  1012  	dereg.AuthToken = invalidToken.SecretID
  1013  	{
  1014  		var resp structs.NodeDrainUpdateResponse
  1015  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
  1016  		require.NotNil(err, "RPC")
  1017  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1018  	}
  1019  
  1020  	// Try with a root token
  1021  	dereg.AuthToken = root.SecretID
  1022  	{
  1023  		var resp structs.NodeDrainUpdateResponse
  1024  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
  1025  	}
  1026  }
  1027  
  1028  // This test ensures that Nomad marks client state of allocations which are in
  1029  // pending/running state to lost when a node is marked as down.
  1030  func TestClientEndpoint_Drain_Down(t *testing.T) {
  1031  	t.Parallel()
  1032  
  1033  	s1, cleanupS1 := TestServer(t, nil)
  1034  	defer cleanupS1()
  1035  	codec := rpcClient(t, s1)
  1036  	testutil.WaitForLeader(t, s1.RPC)
  1037  	require := require.New(t)
  1038  
  1039  	// Register a node
  1040  	node := mock.Node()
  1041  	reg := &structs.NodeRegisterRequest{
  1042  		Node:         node,
  1043  		WriteRequest: structs.WriteRequest{Region: "global"},
  1044  	}
  1045  	// Fetch the response
  1046  	var resp structs.NodeUpdateResponse
  1047  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1048  
  1049  	// Register a service job
  1050  	var jobResp structs.JobRegisterResponse
  1051  	job := mock.Job()
  1052  	job.TaskGroups[0].Count = 1
  1053  	jobReq := &structs.JobRegisterRequest{
  1054  		Job: job,
  1055  		WriteRequest: structs.WriteRequest{
  1056  			Region:    "global",
  1057  			Namespace: job.Namespace,
  1058  		},
  1059  	}
  1060  	require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp))
  1061  
  1062  	// Register a system job
  1063  	var jobResp1 structs.JobRegisterResponse
  1064  	job1 := mock.SystemJob()
  1065  	job1.TaskGroups[0].Count = 1
  1066  	jobReq1 := &structs.JobRegisterRequest{
  1067  		Job: job1,
  1068  		WriteRequest: structs.WriteRequest{
  1069  			Region:    "global",
  1070  			Namespace: job1.Namespace,
  1071  		},
  1072  	}
  1073  	require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq1, &jobResp1))
  1074  
  1075  	// Wait for the scheduler to create an allocation
  1076  	testutil.WaitForResult(func() (bool, error) {
  1077  		ws := memdb.NewWatchSet()
  1078  		allocs, err := s1.fsm.state.AllocsByJob(ws, job.Namespace, job.ID, true)
  1079  		if err != nil {
  1080  			return false, err
  1081  		}
  1082  		allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.Namespace, job1.ID, true)
  1083  		if err != nil {
  1084  			return false, err
  1085  		}
  1086  		return len(allocs) > 0 && len(allocs1) > 0, nil
  1087  	}, func(err error) {
  1088  		t.Fatalf("err: %v", err)
  1089  	})
  1090  
  1091  	// Drain the node
  1092  	dereg := &structs.NodeUpdateDrainRequest{
  1093  		NodeID: node.ID,
  1094  		DrainStrategy: &structs.DrainStrategy{
  1095  			DrainSpec: structs.DrainSpec{
  1096  				Deadline: -1 * time.Second,
  1097  			},
  1098  		},
  1099  		WriteRequest: structs.WriteRequest{Region: "global"},
  1100  	}
  1101  	var resp2 structs.NodeDrainUpdateResponse
  1102  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
  1103  
  1104  	// Mark the node as down
  1105  	node.Status = structs.NodeStatusDown
  1106  	reg = &structs.NodeRegisterRequest{
  1107  		Node:         node,
  1108  		WriteRequest: structs.WriteRequest{Region: "global"},
  1109  	}
  1110  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1111  
  1112  	// Ensure that the allocation has transitioned to lost
  1113  	testutil.WaitForResult(func() (bool, error) {
  1114  		ws := memdb.NewWatchSet()
  1115  		summary, err := s1.fsm.state.JobSummaryByID(ws, job.Namespace, job.ID)
  1116  		if err != nil {
  1117  			return false, err
  1118  		}
  1119  		expectedSummary := &structs.JobSummary{
  1120  			JobID:     job.ID,
  1121  			Namespace: job.Namespace,
  1122  			Summary: map[string]structs.TaskGroupSummary{
  1123  				"web": {
  1124  					Queued: 1,
  1125  					Lost:   1,
  1126  				},
  1127  			},
  1128  			Children:    new(structs.JobChildrenSummary),
  1129  			CreateIndex: jobResp.JobModifyIndex,
  1130  			ModifyIndex: summary.ModifyIndex,
  1131  		}
  1132  		if !reflect.DeepEqual(summary, expectedSummary) {
  1133  			return false, fmt.Errorf("Service: expected: %#v, actual: %#v", expectedSummary, summary)
  1134  		}
  1135  
  1136  		summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.Namespace, job1.ID)
  1137  		if err != nil {
  1138  			return false, err
  1139  		}
  1140  		expectedSummary1 := &structs.JobSummary{
  1141  			JobID:     job1.ID,
  1142  			Namespace: job1.Namespace,
  1143  			Summary: map[string]structs.TaskGroupSummary{
  1144  				"web": {
  1145  					Lost: 1,
  1146  				},
  1147  			},
  1148  			Children:    new(structs.JobChildrenSummary),
  1149  			CreateIndex: jobResp1.JobModifyIndex,
  1150  			ModifyIndex: summary1.ModifyIndex,
  1151  		}
  1152  		if !reflect.DeepEqual(summary1, expectedSummary1) {
  1153  			return false, fmt.Errorf("System: expected: %#v, actual: %#v", expectedSummary1, summary1)
  1154  		}
  1155  		return true, nil
  1156  	}, func(err error) {
  1157  		t.Fatalf("err: %v", err)
  1158  	})
  1159  }
  1160  
  1161  func TestClientEndpoint_UpdateEligibility(t *testing.T) {
  1162  	t.Parallel()
  1163  	require := require.New(t)
  1164  
  1165  	s1, cleanupS1 := TestServer(t, nil)
  1166  	defer cleanupS1()
  1167  	codec := rpcClient(t, s1)
  1168  	testutil.WaitForLeader(t, s1.RPC)
  1169  
  1170  	// Create the register request
  1171  	node := mock.Node()
  1172  	reg := &structs.NodeRegisterRequest{
  1173  		Node:         node,
  1174  		WriteRequest: structs.WriteRequest{Region: "global"},
  1175  	}
  1176  
  1177  	// Fetch the response
  1178  	var resp structs.NodeUpdateResponse
  1179  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1180  
  1181  	// Update the eligibility
  1182  	elig := &structs.NodeUpdateEligibilityRequest{
  1183  		NodeID:       node.ID,
  1184  		Eligibility:  structs.NodeSchedulingIneligible,
  1185  		WriteRequest: structs.WriteRequest{Region: "global"},
  1186  	}
  1187  	var resp2 structs.NodeEligibilityUpdateResponse
  1188  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp2))
  1189  	require.NotZero(resp2.Index)
  1190  	require.Zero(resp2.EvalCreateIndex)
  1191  	require.Empty(resp2.EvalIDs)
  1192  
  1193  	// Check for the node in the FSM
  1194  	state := s1.fsm.State()
  1195  	out, err := state.NodeByID(nil, node.ID)
  1196  	require.Nil(err)
  1197  	require.Equal(out.SchedulingEligibility, structs.NodeSchedulingIneligible)
  1198  	require.Len(out.Events, 2)
  1199  	require.Equal(NodeEligibilityEventIneligible, out.Events[1].Message)
  1200  
  1201  	// Register a system job
  1202  	job := mock.SystemJob()
  1203  	require.Nil(s1.State().UpsertJob(10, job))
  1204  
  1205  	// Update the eligibility and expect evals
  1206  	elig.Eligibility = structs.NodeSchedulingEligible
  1207  	var resp3 structs.NodeEligibilityUpdateResponse
  1208  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp3))
  1209  	require.NotZero(resp3.Index)
  1210  	require.NotZero(resp3.EvalCreateIndex)
  1211  	require.Len(resp3.EvalIDs, 1)
  1212  
  1213  	out, err = state.NodeByID(nil, node.ID)
  1214  	require.Nil(err)
  1215  	require.Len(out.Events, 3)
  1216  	require.Equal(NodeEligibilityEventEligible, out.Events[2].Message)
  1217  }
  1218  
  1219  func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) {
  1220  	t.Parallel()
  1221  
  1222  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1223  	defer cleanupS1()
  1224  	codec := rpcClient(t, s1)
  1225  	testutil.WaitForLeader(t, s1.RPC)
  1226  	require := require.New(t)
  1227  
  1228  	// Create the node
  1229  	node := mock.Node()
  1230  	state := s1.fsm.State()
  1231  
  1232  	require.Nil(state.UpsertNode(1, node), "UpsertNode")
  1233  
  1234  	// Create the policy and tokens
  1235  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
  1236  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
  1237  
  1238  	// Update the status without a token and expect failure
  1239  	dereg := &structs.NodeUpdateEligibilityRequest{
  1240  		NodeID:       node.ID,
  1241  		Eligibility:  structs.NodeSchedulingIneligible,
  1242  		WriteRequest: structs.WriteRequest{Region: "global"},
  1243  	}
  1244  	{
  1245  		var resp structs.NodeEligibilityUpdateResponse
  1246  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
  1247  		require.NotNil(err, "RPC")
  1248  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1249  	}
  1250  
  1251  	// Try with a valid token
  1252  	dereg.AuthToken = validToken.SecretID
  1253  	{
  1254  		var resp structs.NodeEligibilityUpdateResponse
  1255  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
  1256  	}
  1257  
  1258  	// Try with a invalid token
  1259  	dereg.AuthToken = invalidToken.SecretID
  1260  	{
  1261  		var resp structs.NodeEligibilityUpdateResponse
  1262  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
  1263  		require.NotNil(err, "RPC")
  1264  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1265  	}
  1266  
  1267  	// Try with a root token
  1268  	dereg.AuthToken = root.SecretID
  1269  	{
  1270  		var resp structs.NodeEligibilityUpdateResponse
  1271  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
  1272  	}
  1273  }
  1274  
  1275  func TestClientEndpoint_GetNode(t *testing.T) {
  1276  	t.Parallel()
  1277  
  1278  	s1, cleanupS1 := TestServer(t, nil)
  1279  	defer cleanupS1()
  1280  	codec := rpcClient(t, s1)
  1281  	testutil.WaitForLeader(t, s1.RPC)
  1282  
  1283  	// Create the register request
  1284  	node := mock.Node()
  1285  	reg := &structs.NodeRegisterRequest{
  1286  		Node:         node,
  1287  		WriteRequest: structs.WriteRequest{Region: "global"},
  1288  	}
  1289  
  1290  	// Fetch the response
  1291  	var resp structs.GenericResponse
  1292  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1293  		t.Fatalf("err: %v", err)
  1294  	}
  1295  	node.CreateIndex = resp.Index
  1296  	node.ModifyIndex = resp.Index
  1297  
  1298  	// Lookup the node
  1299  	get := &structs.NodeSpecificRequest{
  1300  		NodeID:       node.ID,
  1301  		QueryOptions: structs.QueryOptions{Region: "global"},
  1302  	}
  1303  	var resp2 structs.SingleNodeResponse
  1304  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
  1305  		t.Fatalf("err: %v", err)
  1306  	}
  1307  	if resp2.Index != resp.Index {
  1308  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  1309  	}
  1310  
  1311  	if resp2.Node.ComputedClass == "" {
  1312  		t.Fatalf("bad ComputedClass: %#v", resp2.Node)
  1313  	}
  1314  
  1315  	// Update the status updated at value
  1316  	node.StatusUpdatedAt = resp2.Node.StatusUpdatedAt
  1317  	node.SecretID = ""
  1318  	node.Events = resp2.Node.Events
  1319  	if !reflect.DeepEqual(node, resp2.Node) {
  1320  		t.Fatalf("bad: %#v \n %#v", node, resp2.Node)
  1321  	}
  1322  
  1323  	// assert that the node register event was set correctly
  1324  	if len(resp2.Node.Events) != 1 {
  1325  		t.Fatalf("Did not set node events: %#v", resp2.Node)
  1326  	}
  1327  	if resp2.Node.Events[0].Message != state.NodeRegisterEventRegistered {
  1328  		t.Fatalf("Did not set node register event correctly: %#v", resp2.Node)
  1329  	}
  1330  
  1331  	// Lookup non-existing node
  1332  	get.NodeID = "12345678-abcd-efab-cdef-123456789abc"
  1333  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
  1334  		t.Fatalf("err: %v", err)
  1335  	}
  1336  	if resp2.Index != resp.Index {
  1337  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  1338  	}
  1339  	if resp2.Node != nil {
  1340  		t.Fatalf("unexpected node")
  1341  	}
  1342  }
  1343  
  1344  func TestClientEndpoint_GetNode_ACL(t *testing.T) {
  1345  	t.Parallel()
  1346  
  1347  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1348  	defer cleanupS1()
  1349  	codec := rpcClient(t, s1)
  1350  	testutil.WaitForLeader(t, s1.RPC)
  1351  	assert := assert.New(t)
  1352  
  1353  	// Create the node
  1354  	node := mock.Node()
  1355  	state := s1.fsm.State()
  1356  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  1357  
  1358  	// Create the policy and tokens
  1359  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
  1360  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
  1361  
  1362  	// Lookup the node without a token and expect failure
  1363  	req := &structs.NodeSpecificRequest{
  1364  		NodeID:       node.ID,
  1365  		QueryOptions: structs.QueryOptions{Region: "global"},
  1366  	}
  1367  	{
  1368  		var resp structs.SingleNodeResponse
  1369  		err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
  1370  		assert.NotNil(err, "RPC")
  1371  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1372  	}
  1373  
  1374  	// Try with a valid token
  1375  	req.AuthToken = validToken.SecretID
  1376  	{
  1377  		var resp structs.SingleNodeResponse
  1378  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1379  		assert.Equal(node.ID, resp.Node.ID)
  1380  	}
  1381  
  1382  	// Try with a Node.SecretID
  1383  	req.AuthToken = node.SecretID
  1384  	{
  1385  		var resp structs.SingleNodeResponse
  1386  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1387  		assert.Equal(node.ID, resp.Node.ID)
  1388  	}
  1389  
  1390  	// Try with a invalid token
  1391  	req.AuthToken = invalidToken.SecretID
  1392  	{
  1393  		var resp structs.SingleNodeResponse
  1394  		err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
  1395  		assert.NotNil(err, "RPC")
  1396  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1397  	}
  1398  
  1399  	// Try with a root token
  1400  	req.AuthToken = root.SecretID
  1401  	{
  1402  		var resp structs.SingleNodeResponse
  1403  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1404  		assert.Equal(node.ID, resp.Node.ID)
  1405  	}
  1406  }
  1407  
  1408  func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
  1409  	t.Parallel()
  1410  
  1411  	s1, cleanupS1 := TestServer(t, nil)
  1412  	defer cleanupS1()
  1413  	state := s1.fsm.State()
  1414  	codec := rpcClient(t, s1)
  1415  	testutil.WaitForLeader(t, s1.RPC)
  1416  
  1417  	// Create the node
  1418  	node1 := mock.Node()
  1419  	node2 := mock.Node()
  1420  
  1421  	// First create an unrelated node.
  1422  	time.AfterFunc(100*time.Millisecond, func() {
  1423  		if err := state.UpsertNode(100, node1); err != nil {
  1424  			t.Fatalf("err: %v", err)
  1425  		}
  1426  	})
  1427  
  1428  	// Upsert the node we are watching later
  1429  	time.AfterFunc(200*time.Millisecond, func() {
  1430  		if err := state.UpsertNode(200, node2); err != nil {
  1431  			t.Fatalf("err: %v", err)
  1432  		}
  1433  	})
  1434  
  1435  	// Lookup the node
  1436  	req := &structs.NodeSpecificRequest{
  1437  		NodeID: node2.ID,
  1438  		QueryOptions: structs.QueryOptions{
  1439  			Region:        "global",
  1440  			MinQueryIndex: 150,
  1441  		},
  1442  	}
  1443  	var resp structs.SingleNodeResponse
  1444  	start := time.Now()
  1445  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
  1446  		t.Fatalf("err: %v", err)
  1447  	}
  1448  
  1449  	if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
  1450  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1451  	}
  1452  	if resp.Index != 200 {
  1453  		t.Fatalf("Bad index: %d %d", resp.Index, 200)
  1454  	}
  1455  	if resp.Node == nil || resp.Node.ID != node2.ID {
  1456  		t.Fatalf("bad: %#v", resp.Node)
  1457  	}
  1458  
  1459  	// Node update triggers watches
  1460  	time.AfterFunc(100*time.Millisecond, func() {
  1461  		nodeUpdate := mock.Node()
  1462  		nodeUpdate.ID = node2.ID
  1463  		nodeUpdate.Status = structs.NodeStatusDown
  1464  		if err := state.UpsertNode(300, nodeUpdate); err != nil {
  1465  			t.Fatalf("err: %v", err)
  1466  		}
  1467  	})
  1468  
  1469  	req.QueryOptions.MinQueryIndex = 250
  1470  	var resp2 structs.SingleNodeResponse
  1471  	start = time.Now()
  1472  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp2); err != nil {
  1473  		t.Fatalf("err: %v", err)
  1474  	}
  1475  
  1476  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  1477  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1478  	}
  1479  	if resp2.Index != 300 {
  1480  		t.Fatalf("Bad index: %d %d", resp2.Index, 300)
  1481  	}
  1482  	if resp2.Node == nil || resp2.Node.Status != structs.NodeStatusDown {
  1483  		t.Fatalf("bad: %#v", resp2.Node)
  1484  	}
  1485  
  1486  	// Node delete triggers watches
  1487  	time.AfterFunc(100*time.Millisecond, func() {
  1488  		if err := state.DeleteNode(400, []string{node2.ID}); err != nil {
  1489  			t.Fatalf("err: %v", err)
  1490  		}
  1491  	})
  1492  
  1493  	req.QueryOptions.MinQueryIndex = 350
  1494  	var resp3 structs.SingleNodeResponse
  1495  	start = time.Now()
  1496  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp3); err != nil {
  1497  		t.Fatalf("err: %v", err)
  1498  	}
  1499  
  1500  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  1501  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1502  	}
  1503  	if resp3.Index != 400 {
  1504  		t.Fatalf("Bad index: %d %d", resp2.Index, 400)
  1505  	}
  1506  	if resp3.Node != nil {
  1507  		t.Fatalf("bad: %#v", resp3.Node)
  1508  	}
  1509  }
  1510  
  1511  func TestClientEndpoint_GetAllocs(t *testing.T) {
  1512  	t.Parallel()
  1513  
  1514  	s1, cleanupS1 := TestServer(t, nil)
  1515  	defer cleanupS1()
  1516  	codec := rpcClient(t, s1)
  1517  	testutil.WaitForLeader(t, s1.RPC)
  1518  
  1519  	// Create the register request
  1520  	node := mock.Node()
  1521  	reg := &structs.NodeRegisterRequest{
  1522  		Node:         node,
  1523  		WriteRequest: structs.WriteRequest{Region: "global"},
  1524  	}
  1525  
  1526  	// Fetch the response
  1527  	var resp structs.GenericResponse
  1528  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1529  		t.Fatalf("err: %v", err)
  1530  	}
  1531  	node.CreateIndex = resp.Index
  1532  	node.ModifyIndex = resp.Index
  1533  
  1534  	// Inject fake evaluations
  1535  	alloc := mock.Alloc()
  1536  	alloc.NodeID = node.ID
  1537  	state := s1.fsm.State()
  1538  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1539  	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1540  	if err != nil {
  1541  		t.Fatalf("err: %v", err)
  1542  	}
  1543  
  1544  	// Lookup the allocs
  1545  	get := &structs.NodeSpecificRequest{
  1546  		NodeID:       node.ID,
  1547  		QueryOptions: structs.QueryOptions{Region: "global"},
  1548  	}
  1549  	var resp2 structs.NodeAllocsResponse
  1550  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
  1551  		t.Fatalf("err: %v", err)
  1552  	}
  1553  	if resp2.Index != 100 {
  1554  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1555  	}
  1556  
  1557  	if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
  1558  		t.Fatalf("bad: %#v", resp2.Allocs)
  1559  	}
  1560  
  1561  	// Lookup non-existing node
  1562  	get.NodeID = "foobarbaz"
  1563  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
  1564  		t.Fatalf("err: %v", err)
  1565  	}
  1566  	if resp2.Index != 100 {
  1567  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1568  	}
  1569  	if len(resp2.Allocs) != 0 {
  1570  		t.Fatalf("unexpected node")
  1571  	}
  1572  }
  1573  
  1574  func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) {
  1575  	t.Parallel()
  1576  
  1577  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1578  	defer cleanupS1()
  1579  	codec := rpcClient(t, s1)
  1580  	testutil.WaitForLeader(t, s1.RPC)
  1581  	assert := assert.New(t)
  1582  
  1583  	// Create the node
  1584  	allocDefaultNS := mock.Alloc()
  1585  	node := mock.Node()
  1586  	allocDefaultNS.NodeID = node.ID
  1587  	state := s1.fsm.State()
  1588  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  1589  	assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
  1590  	allocs := []*structs.Allocation{allocDefaultNS}
  1591  	assert.Nil(state.UpsertAllocs(5, allocs), "UpsertAllocs")
  1592  
  1593  	// Create the namespace policy and tokens
  1594  	validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
  1595  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1596  	invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
  1597  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1598  
  1599  	req := &structs.NodeSpecificRequest{
  1600  		NodeID: node.ID,
  1601  		QueryOptions: structs.QueryOptions{
  1602  			Region: "global",
  1603  		},
  1604  	}
  1605  
  1606  	// Lookup the node without a token and expect failure
  1607  	{
  1608  		var resp structs.NodeAllocsResponse
  1609  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1610  		assert.NotNil(err, "RPC")
  1611  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1612  	}
  1613  
  1614  	// Try with a valid token for the default namespace
  1615  	req.AuthToken = validDefaultToken.SecretID
  1616  	{
  1617  		var resp structs.NodeAllocsResponse
  1618  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1619  		assert.Len(resp.Allocs, 1)
  1620  		assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
  1621  	}
  1622  
  1623  	// Try with a invalid token
  1624  	req.AuthToken = invalidToken.SecretID
  1625  	{
  1626  		var resp structs.NodeAllocsResponse
  1627  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1628  		assert.NotNil(err, "RPC")
  1629  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1630  	}
  1631  
  1632  	// Try with a root token
  1633  	req.AuthToken = root.SecretID
  1634  	{
  1635  		var resp structs.NodeAllocsResponse
  1636  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1637  		assert.Len(resp.Allocs, 1)
  1638  		for _, alloc := range resp.Allocs {
  1639  			switch alloc.ID {
  1640  			case allocDefaultNS.ID:
  1641  				// expected
  1642  			default:
  1643  				t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
  1644  			}
  1645  		}
  1646  	}
  1647  }
  1648  
  1649  func TestClientEndpoint_GetClientAllocs(t *testing.T) {
  1650  	t.Parallel()
  1651  	require := require.New(t)
  1652  
  1653  	s1, cleanupS1 := TestServer(t, nil)
  1654  	defer cleanupS1()
  1655  	codec := rpcClient(t, s1)
  1656  	testutil.WaitForLeader(t, s1.RPC)
  1657  
  1658  	// Check that we have no client connections
  1659  	require.Empty(s1.connectedNodes())
  1660  
  1661  	// Create the register request
  1662  	node := mock.Node()
  1663  	state := s1.fsm.State()
  1664  	require.Nil(state.UpsertNode(98, node))
  1665  
  1666  	// Inject fake evaluations
  1667  	alloc := mock.Alloc()
  1668  	alloc.NodeID = node.ID
  1669  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1670  	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1671  	if err != nil {
  1672  		t.Fatalf("err: %v", err)
  1673  	}
  1674  
  1675  	// Lookup the allocs
  1676  	get := &structs.NodeSpecificRequest{
  1677  		NodeID:       node.ID,
  1678  		SecretID:     node.SecretID,
  1679  		QueryOptions: structs.QueryOptions{Region: "global"},
  1680  	}
  1681  	var resp2 structs.NodeClientAllocsResponse
  1682  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2); err != nil {
  1683  		t.Fatalf("err: %v", err)
  1684  	}
  1685  	if resp2.Index != 100 {
  1686  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1687  	}
  1688  
  1689  	if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
  1690  		t.Fatalf("bad: %#v", resp2.Allocs)
  1691  	}
  1692  
  1693  	// Check that we have the client connections
  1694  	nodes := s1.connectedNodes()
  1695  	require.Len(nodes, 1)
  1696  	require.Contains(nodes, node.ID)
  1697  
  1698  	// Lookup node with bad SecretID
  1699  	get.SecretID = "foobarbaz"
  1700  	var resp3 structs.NodeClientAllocsResponse
  1701  	err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp3)
  1702  	if err == nil || !strings.Contains(err.Error(), "does not match") {
  1703  		t.Fatalf("err: %v", err)
  1704  	}
  1705  
  1706  	// Lookup non-existing node
  1707  	get.NodeID = uuid.Generate()
  1708  	var resp4 structs.NodeClientAllocsResponse
  1709  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp4); err != nil {
  1710  		t.Fatalf("err: %v", err)
  1711  	}
  1712  	if resp4.Index != 100 {
  1713  		t.Fatalf("Bad index: %d %d", resp3.Index, 100)
  1714  	}
  1715  	if len(resp4.Allocs) != 0 {
  1716  		t.Fatalf("unexpected node %#v", resp3.Allocs)
  1717  	}
  1718  
  1719  	// Close the connection and check that we remove the client connections
  1720  	require.Nil(codec.Close())
  1721  	testutil.WaitForResult(func() (bool, error) {
  1722  		nodes := s1.connectedNodes()
  1723  		return len(nodes) == 0, nil
  1724  	}, func(err error) {
  1725  		t.Fatalf("should have no clients")
  1726  	})
  1727  }
  1728  
  1729  func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
  1730  	t.Parallel()
  1731  
  1732  	s1, cleanupS1 := TestServer(t, nil)
  1733  	defer cleanupS1()
  1734  	codec := rpcClient(t, s1)
  1735  	testutil.WaitForLeader(t, s1.RPC)
  1736  
  1737  	// Create the register request
  1738  	node := mock.Node()
  1739  	reg := &structs.NodeRegisterRequest{
  1740  		Node:         node,
  1741  		WriteRequest: structs.WriteRequest{Region: "global"},
  1742  	}
  1743  
  1744  	// Fetch the response
  1745  	var resp structs.GenericResponse
  1746  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1747  		t.Fatalf("err: %v", err)
  1748  	}
  1749  	node.CreateIndex = resp.Index
  1750  	node.ModifyIndex = resp.Index
  1751  
  1752  	// Inject fake evaluations async
  1753  	now := time.Now().UTC().UnixNano()
  1754  	alloc := mock.Alloc()
  1755  	alloc.NodeID = node.ID
  1756  	alloc.ModifyTime = now
  1757  	state := s1.fsm.State()
  1758  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1759  	start := time.Now()
  1760  	time.AfterFunc(100*time.Millisecond, func() {
  1761  		err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  1762  		if err != nil {
  1763  			t.Fatalf("err: %v", err)
  1764  		}
  1765  	})
  1766  
  1767  	// Lookup the allocs in a blocking query
  1768  	req := &structs.NodeSpecificRequest{
  1769  		NodeID:   node.ID,
  1770  		SecretID: node.SecretID,
  1771  		QueryOptions: structs.QueryOptions{
  1772  			Region:        "global",
  1773  			MinQueryIndex: 50,
  1774  			MaxQueryTime:  time.Second,
  1775  		},
  1776  	}
  1777  	var resp2 structs.NodeClientAllocsResponse
  1778  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2); err != nil {
  1779  		t.Fatalf("err: %v", err)
  1780  	}
  1781  
  1782  	// Should block at least 100ms
  1783  	if time.Since(start) < 100*time.Millisecond {
  1784  		t.Fatalf("too fast")
  1785  	}
  1786  
  1787  	if resp2.Index != 100 {
  1788  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1789  	}
  1790  
  1791  	if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
  1792  		t.Fatalf("bad: %#v", resp2.Allocs)
  1793  	}
  1794  
  1795  	iter, err := state.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID)
  1796  	if err != nil {
  1797  		t.Fatalf("err: %v", err)
  1798  	}
  1799  
  1800  	getAllocs := func(iter memdb.ResultIterator) []*structs.Allocation {
  1801  		var allocs []*structs.Allocation
  1802  		for {
  1803  			raw := iter.Next()
  1804  			if raw == nil {
  1805  				break
  1806  			}
  1807  			allocs = append(allocs, raw.(*structs.Allocation))
  1808  		}
  1809  		return allocs
  1810  	}
  1811  	out := getAllocs(iter)
  1812  
  1813  	if len(out) != 1 {
  1814  		t.Fatalf("Expected to get one allocation but got:%v", out)
  1815  	}
  1816  
  1817  	if out[0].ModifyTime != now {
  1818  		t.Fatalf("Invalid modify time %v", out[0].ModifyTime)
  1819  	}
  1820  
  1821  	// Alloc updates fire watches
  1822  	time.AfterFunc(100*time.Millisecond, func() {
  1823  		allocUpdate := mock.Alloc()
  1824  		allocUpdate.NodeID = alloc.NodeID
  1825  		allocUpdate.ID = alloc.ID
  1826  		allocUpdate.ClientStatus = structs.AllocClientStatusRunning
  1827  		state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
  1828  		err := state.UpsertAllocs(200, []*structs.Allocation{allocUpdate})
  1829  		if err != nil {
  1830  			t.Fatalf("err: %v", err)
  1831  		}
  1832  	})
  1833  
  1834  	req.QueryOptions.MinQueryIndex = 150
  1835  	var resp3 structs.NodeClientAllocsResponse
  1836  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3); err != nil {
  1837  		t.Fatalf("err: %v", err)
  1838  	}
  1839  
  1840  	if time.Since(start) < 100*time.Millisecond {
  1841  		t.Fatalf("too fast")
  1842  	}
  1843  	if resp3.Index != 200 {
  1844  		t.Fatalf("Bad index: %d %d", resp3.Index, 200)
  1845  	}
  1846  	if len(resp3.Allocs) != 1 || resp3.Allocs[alloc.ID] != 200 {
  1847  		t.Fatalf("bad: %#v", resp3.Allocs)
  1848  	}
  1849  }
  1850  
  1851  func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) {
  1852  	t.Parallel()
  1853  	assert := assert.New(t)
  1854  
  1855  	s1, cleanupS1 := TestServer(t, nil)
  1856  	defer cleanupS1()
  1857  	codec := rpcClient(t, s1)
  1858  	testutil.WaitForLeader(t, s1.RPC)
  1859  
  1860  	// Create the register request
  1861  	node := mock.Node()
  1862  	reg := &structs.NodeRegisterRequest{
  1863  		Node:         node,
  1864  		WriteRequest: structs.WriteRequest{Region: "global"},
  1865  	}
  1866  
  1867  	// Fetch the response
  1868  	var resp structs.GenericResponse
  1869  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1870  	node.CreateIndex = resp.Index
  1871  	node.ModifyIndex = resp.Index
  1872  
  1873  	// Inject fake allocations async
  1874  	alloc1 := mock.Alloc()
  1875  	alloc1.NodeID = node.ID
  1876  	alloc2 := mock.Alloc()
  1877  	alloc2.NodeID = node.ID
  1878  	state := s1.fsm.State()
  1879  	state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
  1880  	start := time.Now()
  1881  	time.AfterFunc(100*time.Millisecond, func() {
  1882  		assert.Nil(state.UpsertAllocs(100, []*structs.Allocation{alloc1, alloc2}))
  1883  	})
  1884  
  1885  	// Lookup the allocs in a blocking query
  1886  	req := &structs.NodeSpecificRequest{
  1887  		NodeID:   node.ID,
  1888  		SecretID: node.SecretID,
  1889  		QueryOptions: structs.QueryOptions{
  1890  			Region:        "global",
  1891  			MinQueryIndex: 50,
  1892  			MaxQueryTime:  time.Second,
  1893  		},
  1894  	}
  1895  	var resp2 structs.NodeClientAllocsResponse
  1896  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2))
  1897  
  1898  	// Should block at least 100ms
  1899  	if time.Since(start) < 100*time.Millisecond {
  1900  		t.Fatalf("too fast")
  1901  	}
  1902  
  1903  	assert.EqualValues(100, resp2.Index)
  1904  	if assert.Len(resp2.Allocs, 2) {
  1905  		assert.EqualValues(100, resp2.Allocs[alloc1.ID])
  1906  	}
  1907  
  1908  	// Delete an allocation
  1909  	time.AfterFunc(100*time.Millisecond, func() {
  1910  		assert.Nil(state.DeleteEval(200, nil, []string{alloc2.ID}))
  1911  	})
  1912  
  1913  	req.QueryOptions.MinQueryIndex = 150
  1914  	var resp3 structs.NodeClientAllocsResponse
  1915  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3))
  1916  
  1917  	if time.Since(start) < 100*time.Millisecond {
  1918  		t.Fatalf("too fast")
  1919  	}
  1920  	assert.EqualValues(200, resp3.Index)
  1921  	if assert.Len(resp3.Allocs, 1) {
  1922  		assert.EqualValues(100, resp3.Allocs[alloc1.ID])
  1923  	}
  1924  }
  1925  
  1926  // A MigrateToken should not be created if an allocation shares the same node
  1927  // with its previous allocation
  1928  func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) {
  1929  	t.Parallel()
  1930  	assert := assert.New(t)
  1931  
  1932  	s1, cleanupS1 := TestServer(t, nil)
  1933  	defer cleanupS1()
  1934  	codec := rpcClient(t, s1)
  1935  	testutil.WaitForLeader(t, s1.RPC)
  1936  
  1937  	// Create the register request
  1938  	node := mock.Node()
  1939  	reg := &structs.NodeRegisterRequest{
  1940  		Node:         node,
  1941  		WriteRequest: structs.WriteRequest{Region: "global"},
  1942  	}
  1943  
  1944  	// Fetch the response
  1945  	var resp structs.GenericResponse
  1946  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1947  		t.Fatalf("err: %v", err)
  1948  	}
  1949  	node.CreateIndex = resp.Index
  1950  	node.ModifyIndex = resp.Index
  1951  
  1952  	// Inject fake evaluations
  1953  	prevAlloc := mock.Alloc()
  1954  	prevAlloc.NodeID = node.ID
  1955  	alloc := mock.Alloc()
  1956  	alloc.NodeID = node.ID
  1957  	alloc.PreviousAllocation = prevAlloc.ID
  1958  	alloc.DesiredStatus = structs.AllocClientStatusComplete
  1959  	state := s1.fsm.State()
  1960  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1961  	err := state.UpsertAllocs(100, []*structs.Allocation{prevAlloc, alloc})
  1962  	assert.Nil(err)
  1963  
  1964  	// Lookup the allocs
  1965  	get := &structs.NodeSpecificRequest{
  1966  		NodeID:       node.ID,
  1967  		SecretID:     node.SecretID,
  1968  		QueryOptions: structs.QueryOptions{Region: "global"},
  1969  	}
  1970  	var resp2 structs.NodeClientAllocsResponse
  1971  
  1972  	err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2)
  1973  	assert.Nil(err)
  1974  
  1975  	assert.Equal(uint64(100), resp2.Index)
  1976  	assert.Equal(2, len(resp2.Allocs))
  1977  	assert.Equal(uint64(100), resp2.Allocs[alloc.ID])
  1978  	assert.Equal(0, len(resp2.MigrateTokens))
  1979  }
  1980  
  1981  func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
  1982  	t.Parallel()
  1983  
  1984  	s1, cleanupS1 := TestServer(t, nil)
  1985  	defer cleanupS1()
  1986  	codec := rpcClient(t, s1)
  1987  	testutil.WaitForLeader(t, s1.RPC)
  1988  
  1989  	// Create the register request
  1990  	node := mock.Node()
  1991  	reg := &structs.NodeRegisterRequest{
  1992  		Node:         node,
  1993  		WriteRequest: structs.WriteRequest{Region: "global"},
  1994  	}
  1995  
  1996  	// Fetch the response
  1997  	var resp structs.GenericResponse
  1998  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1999  		t.Fatalf("err: %v", err)
  2000  	}
  2001  	node.CreateIndex = resp.Index
  2002  	node.ModifyIndex = resp.Index
  2003  
  2004  	// Inject fake evaluations async
  2005  	alloc := mock.Alloc()
  2006  	alloc.NodeID = node.ID
  2007  	state := s1.fsm.State()
  2008  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2009  	start := time.Now()
  2010  	time.AfterFunc(100*time.Millisecond, func() {
  2011  		err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  2012  		if err != nil {
  2013  			t.Fatalf("err: %v", err)
  2014  		}
  2015  	})
  2016  
  2017  	// Lookup the allocs in a blocking query
  2018  	req := &structs.NodeSpecificRequest{
  2019  		NodeID: node.ID,
  2020  		QueryOptions: structs.QueryOptions{
  2021  			Region:        "global",
  2022  			MinQueryIndex: 50,
  2023  			MaxQueryTime:  time.Second,
  2024  		},
  2025  	}
  2026  	var resp2 structs.NodeAllocsResponse
  2027  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp2); err != nil {
  2028  		t.Fatalf("err: %v", err)
  2029  	}
  2030  
  2031  	// Should block at least 100ms
  2032  	if time.Since(start) < 100*time.Millisecond {
  2033  		t.Fatalf("too fast")
  2034  	}
  2035  
  2036  	if resp2.Index != 100 {
  2037  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  2038  	}
  2039  
  2040  	if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
  2041  		t.Fatalf("bad: %#v", resp2.Allocs)
  2042  	}
  2043  
  2044  	// Alloc updates fire watches
  2045  	time.AfterFunc(100*time.Millisecond, func() {
  2046  		allocUpdate := mock.Alloc()
  2047  		allocUpdate.NodeID = alloc.NodeID
  2048  		allocUpdate.ID = alloc.ID
  2049  		allocUpdate.ClientStatus = structs.AllocClientStatusRunning
  2050  		state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
  2051  		err := state.UpdateAllocsFromClient(200, []*structs.Allocation{allocUpdate})
  2052  		if err != nil {
  2053  			t.Fatalf("err: %v", err)
  2054  		}
  2055  	})
  2056  
  2057  	req.QueryOptions.MinQueryIndex = 150
  2058  	var resp3 structs.NodeAllocsResponse
  2059  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp3); err != nil {
  2060  		t.Fatalf("err: %v", err)
  2061  	}
  2062  
  2063  	if time.Since(start) < 100*time.Millisecond {
  2064  		t.Fatalf("too fast")
  2065  	}
  2066  	if resp3.Index != 200 {
  2067  		t.Fatalf("Bad index: %d %d", resp3.Index, 200)
  2068  	}
  2069  	if len(resp3.Allocs) != 1 || resp3.Allocs[0].ClientStatus != structs.AllocClientStatusRunning {
  2070  		t.Fatalf("bad: %#v", resp3.Allocs[0])
  2071  	}
  2072  }
  2073  
  2074  func TestClientEndpoint_UpdateAlloc(t *testing.T) {
  2075  	t.Parallel()
  2076  
  2077  	s1, cleanupS1 := TestServer(t, func(c *Config) {
  2078  		// Disabling scheduling in this test so that we can
  2079  		// ensure that the state store doesn't accumulate more evals
  2080  		// than what we expect the unit test to add
  2081  		c.NumSchedulers = 0
  2082  	})
  2083  
  2084  	defer cleanupS1()
  2085  	codec := rpcClient(t, s1)
  2086  	testutil.WaitForLeader(t, s1.RPC)
  2087  	require := require.New(t)
  2088  
  2089  	// Create the register request
  2090  	node := mock.Node()
  2091  	reg := &structs.NodeRegisterRequest{
  2092  		Node:         node,
  2093  		WriteRequest: structs.WriteRequest{Region: "global"},
  2094  	}
  2095  
  2096  	// Fetch the response
  2097  	var resp structs.GenericResponse
  2098  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2099  		t.Fatalf("err: %v", err)
  2100  	}
  2101  
  2102  	state := s1.fsm.State()
  2103  	// Inject mock job
  2104  	job := mock.Job()
  2105  	job.ID = "mytestjob"
  2106  	err := state.UpsertJob(101, job)
  2107  	require.Nil(err)
  2108  
  2109  	// Inject fake allocations
  2110  	alloc := mock.Alloc()
  2111  	alloc.JobID = job.ID
  2112  	alloc.NodeID = node.ID
  2113  	err = state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2114  	require.Nil(err)
  2115  	alloc.TaskGroup = job.TaskGroups[0].Name
  2116  
  2117  	alloc2 := mock.Alloc()
  2118  	alloc2.JobID = job.ID
  2119  	alloc2.NodeID = node.ID
  2120  	err = state.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID))
  2121  	require.Nil(err)
  2122  	alloc2.TaskGroup = job.TaskGroups[0].Name
  2123  
  2124  	err = state.UpsertAllocs(100, []*structs.Allocation{alloc, alloc2})
  2125  	require.Nil(err)
  2126  
  2127  	// Attempt updates of more than one alloc for the same job
  2128  	clientAlloc1 := new(structs.Allocation)
  2129  	*clientAlloc1 = *alloc
  2130  	clientAlloc1.ClientStatus = structs.AllocClientStatusFailed
  2131  
  2132  	clientAlloc2 := new(structs.Allocation)
  2133  	*clientAlloc2 = *alloc2
  2134  	clientAlloc2.ClientStatus = structs.AllocClientStatusFailed
  2135  
  2136  	// Update the alloc
  2137  	update := &structs.AllocUpdateRequest{
  2138  		Alloc:        []*structs.Allocation{clientAlloc1, clientAlloc2},
  2139  		WriteRequest: structs.WriteRequest{Region: "global"},
  2140  	}
  2141  	var resp2 structs.NodeAllocsResponse
  2142  	start := time.Now()
  2143  	err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2)
  2144  	require.Nil(err)
  2145  	require.NotEqual(uint64(0), resp2.Index)
  2146  
  2147  	if diff := time.Since(start); diff < batchUpdateInterval {
  2148  		t.Fatalf("too fast: %v", diff)
  2149  	}
  2150  
  2151  	// Lookup the alloc
  2152  	ws := memdb.NewWatchSet()
  2153  	out, err := state.AllocByID(ws, alloc.ID)
  2154  	require.Nil(err)
  2155  	require.Equal(structs.AllocClientStatusFailed, out.ClientStatus)
  2156  	require.True(out.ModifyTime > 0)
  2157  
  2158  	// Assert that exactly one eval with TriggeredBy EvalTriggerRetryFailedAlloc exists
  2159  	evaluations, err := state.EvalsByJob(ws, job.Namespace, job.ID)
  2160  	require.Nil(err)
  2161  	require.True(len(evaluations) != 0)
  2162  	foundCount := 0
  2163  	for _, resultEval := range evaluations {
  2164  		if resultEval.TriggeredBy == structs.EvalTriggerRetryFailedAlloc && resultEval.WaitUntil.IsZero() {
  2165  			foundCount++
  2166  		}
  2167  	}
  2168  	require.Equal(1, foundCount, "Should create exactly one eval for failed allocs")
  2169  
  2170  }
  2171  
  2172  func TestClientEndpoint_BatchUpdate(t *testing.T) {
  2173  	t.Parallel()
  2174  
  2175  	s1, cleanupS1 := TestServer(t, nil)
  2176  	defer cleanupS1()
  2177  	codec := rpcClient(t, s1)
  2178  	testutil.WaitForLeader(t, s1.RPC)
  2179  
  2180  	// Create the register request
  2181  	node := mock.Node()
  2182  	reg := &structs.NodeRegisterRequest{
  2183  		Node:         node,
  2184  		WriteRequest: structs.WriteRequest{Region: "global"},
  2185  	}
  2186  
  2187  	// Fetch the response
  2188  	var resp structs.GenericResponse
  2189  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2190  		t.Fatalf("err: %v", err)
  2191  	}
  2192  
  2193  	// Inject fake evaluations
  2194  	alloc := mock.Alloc()
  2195  	alloc.NodeID = node.ID
  2196  	state := s1.fsm.State()
  2197  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2198  	err := state.UpsertAllocs(100, []*structs.Allocation{alloc})
  2199  	if err != nil {
  2200  		t.Fatalf("err: %v", err)
  2201  	}
  2202  
  2203  	// Attempt update
  2204  	clientAlloc := new(structs.Allocation)
  2205  	*clientAlloc = *alloc
  2206  	clientAlloc.ClientStatus = structs.AllocClientStatusFailed
  2207  
  2208  	// Call to do the batch update
  2209  	bf := structs.NewBatchFuture()
  2210  	endpoint := s1.staticEndpoints.Node
  2211  	endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc}, nil)
  2212  	if err := bf.Wait(); err != nil {
  2213  		t.Fatalf("err: %v", err)
  2214  	}
  2215  	if bf.Index() == 0 {
  2216  		t.Fatalf("Bad index: %d", bf.Index())
  2217  	}
  2218  
  2219  	// Lookup the alloc
  2220  	ws := memdb.NewWatchSet()
  2221  	out, err := state.AllocByID(ws, alloc.ID)
  2222  	if err != nil {
  2223  		t.Fatalf("err: %v", err)
  2224  	}
  2225  	if out.ClientStatus != structs.AllocClientStatusFailed {
  2226  		t.Fatalf("Bad: %#v", out)
  2227  	}
  2228  }
  2229  
  2230  func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) {
  2231  	t.Parallel()
  2232  
  2233  	s1, cleanupS1 := TestServer(t, nil)
  2234  	defer cleanupS1()
  2235  	codec := rpcClient(t, s1)
  2236  	testutil.WaitForLeader(t, s1.RPC)
  2237  
  2238  	// Create the register request
  2239  	node := mock.Node()
  2240  	reg := &structs.NodeRegisterRequest{
  2241  		Node:         node,
  2242  		WriteRequest: structs.WriteRequest{Region: "global"},
  2243  	}
  2244  
  2245  	// Fetch the response
  2246  	var resp structs.GenericResponse
  2247  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2248  		t.Fatalf("err: %v", err)
  2249  	}
  2250  
  2251  	// Swap the servers Vault Client
  2252  	tvc := &TestVaultClient{}
  2253  	s1.vault = tvc
  2254  
  2255  	// Inject fake allocation and vault accessor
  2256  	alloc := mock.Alloc()
  2257  	alloc.NodeID = node.ID
  2258  	state := s1.fsm.State()
  2259  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2260  	if err := state.UpsertAllocs(100, []*structs.Allocation{alloc}); err != nil {
  2261  		t.Fatalf("err: %v", err)
  2262  	}
  2263  
  2264  	va := mock.VaultAccessor()
  2265  	va.NodeID = node.ID
  2266  	va.AllocID = alloc.ID
  2267  	if err := state.UpsertVaultAccessor(101, []*structs.VaultAccessor{va}); err != nil {
  2268  		t.Fatalf("err: %v", err)
  2269  	}
  2270  
  2271  	// Inject mock job
  2272  	job := mock.Job()
  2273  	job.ID = alloc.JobID
  2274  	err := state.UpsertJob(101, job)
  2275  	if err != nil {
  2276  		t.Fatalf("err: %v", err)
  2277  	}
  2278  
  2279  	// Attempt update
  2280  	clientAlloc := new(structs.Allocation)
  2281  	*clientAlloc = *alloc
  2282  	clientAlloc.ClientStatus = structs.AllocClientStatusFailed
  2283  
  2284  	// Update the alloc
  2285  	update := &structs.AllocUpdateRequest{
  2286  		Alloc:        []*structs.Allocation{clientAlloc},
  2287  		WriteRequest: structs.WriteRequest{Region: "global"},
  2288  	}
  2289  	var resp2 structs.NodeAllocsResponse
  2290  	start := time.Now()
  2291  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2); err != nil {
  2292  		t.Fatalf("err: %v", err)
  2293  	}
  2294  	if resp2.Index == 0 {
  2295  		t.Fatalf("Bad index: %d", resp2.Index)
  2296  	}
  2297  	if diff := time.Since(start); diff < batchUpdateInterval {
  2298  		t.Fatalf("too fast: %v", diff)
  2299  	}
  2300  
  2301  	// Lookup the alloc
  2302  	ws := memdb.NewWatchSet()
  2303  	out, err := state.AllocByID(ws, alloc.ID)
  2304  	if err != nil {
  2305  		t.Fatalf("err: %v", err)
  2306  	}
  2307  	if out.ClientStatus != structs.AllocClientStatusFailed {
  2308  		t.Fatalf("Bad: %#v", out)
  2309  	}
  2310  
  2311  	if l := len(tvc.RevokedTokens); l != 1 {
  2312  		t.Fatalf("Deregister revoked %d tokens; want 1", l)
  2313  	}
  2314  }
  2315  
  2316  func TestClientEndpoint_UpdateAlloc_UnclaimVolumes(t *testing.T) {
  2317  	t.Parallel()
  2318  	srv, shutdown := TestServer(t, func(c *Config) { c.NumSchedulers = 0 })
  2319  	defer shutdown()
  2320  	testutil.WaitForLeader(t, srv.RPC)
  2321  
  2322  	codec := rpcClient(t, srv)
  2323  	state := srv.fsm.State()
  2324  
  2325  	index := uint64(0)
  2326  	ws := memdb.NewWatchSet()
  2327  
  2328  	// Create a client node, plugin, and volume
  2329  	node := mock.Node()
  2330  	node.Attributes["nomad.version"] = "0.11.0" // client RPCs not supported on early version
  2331  	node.CSINodePlugins = map[string]*structs.CSIInfo{
  2332  		"csi-plugin-example": {PluginID: "csi-plugin-example",
  2333  			Healthy:        true,
  2334  			NodeInfo:       &structs.CSINodeInfo{},
  2335  			ControllerInfo: &structs.CSIControllerInfo{},
  2336  		},
  2337  	}
  2338  	index++
  2339  	err := state.UpsertNode(index, node)
  2340  	require.NoError(t, err)
  2341  	volId0 := uuid.Generate()
  2342  	ns := structs.DefaultNamespace
  2343  	vols := []*structs.CSIVolume{{
  2344  		ID:             volId0,
  2345  		Namespace:      ns,
  2346  		PluginID:       "csi-plugin-example",
  2347  		AccessMode:     structs.CSIVolumeAccessModeMultiNodeSingleWriter,
  2348  		AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
  2349  	}}
  2350  	index++
  2351  	err = state.CSIVolumeRegister(index, vols)
  2352  	require.NoError(t, err)
  2353  	vol, err := state.CSIVolumeByID(ws, ns, volId0)
  2354  	require.NoError(t, err)
  2355  	require.Len(t, vol.ReadAllocs, 0)
  2356  	require.Len(t, vol.WriteAllocs, 0)
  2357  
  2358  	// Create a job with 2 allocations
  2359  	job := mock.Job()
  2360  	job.TaskGroups[0].Volumes = map[string]*structs.VolumeRequest{
  2361  		"_": {
  2362  			Name:     "someVolume",
  2363  			Type:     structs.VolumeTypeCSI,
  2364  			Source:   volId0,
  2365  			ReadOnly: false,
  2366  		},
  2367  	}
  2368  	index++
  2369  	err = state.UpsertJob(index, job)
  2370  	require.NoError(t, err)
  2371  
  2372  	alloc1 := mock.Alloc()
  2373  	alloc1.JobID = job.ID
  2374  	alloc1.NodeID = node.ID
  2375  	index++
  2376  	err = state.UpsertJobSummary(index, mock.JobSummary(alloc1.JobID))
  2377  	require.NoError(t, err)
  2378  	alloc1.TaskGroup = job.TaskGroups[0].Name
  2379  
  2380  	alloc2 := mock.Alloc()
  2381  	alloc2.JobID = job.ID
  2382  	alloc2.NodeID = node.ID
  2383  	index++
  2384  	err = state.UpsertJobSummary(index, mock.JobSummary(alloc2.JobID))
  2385  	require.NoError(t, err)
  2386  	alloc2.TaskGroup = job.TaskGroups[0].Name
  2387  
  2388  	index++
  2389  	err = state.UpsertAllocs(index, []*structs.Allocation{alloc1, alloc2})
  2390  	require.NoError(t, err)
  2391  
  2392  	// Claim the volumes and verify the claims were set. We need to
  2393  	// apply this through the FSM so that we make sure the index is
  2394  	// properly updated to test later
  2395  	batch := &structs.CSIVolumeClaimBatchRequest{
  2396  		Claims: []structs.CSIVolumeClaimRequest{
  2397  			{
  2398  				VolumeID:     volId0,
  2399  				AllocationID: alloc1.ID,
  2400  				NodeID:       alloc1.NodeID,
  2401  				Claim:        structs.CSIVolumeClaimWrite,
  2402  			},
  2403  			{
  2404  				VolumeID:     volId0,
  2405  				AllocationID: alloc2.ID,
  2406  				NodeID:       alloc2.NodeID,
  2407  				Claim:        structs.CSIVolumeClaimRead,
  2408  			},
  2409  		}}
  2410  	_, lastIndex, err := srv.raftApply(structs.CSIVolumeClaimBatchRequestType, batch)
  2411  	require.NoError(t, err)
  2412  
  2413  	vol, err = state.CSIVolumeByID(ws, ns, volId0)
  2414  	require.NoError(t, err)
  2415  	require.Len(t, vol.ReadAllocs, 1)
  2416  	require.Len(t, vol.WriteAllocs, 1)
  2417  
  2418  	// Update the 1st alloc as terminal/failed
  2419  	alloc1.ClientStatus = structs.AllocClientStatusFailed
  2420  	err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc",
  2421  		&structs.AllocUpdateRequest{
  2422  			Alloc:        []*structs.Allocation{alloc1},
  2423  			WriteRequest: structs.WriteRequest{Region: "global"},
  2424  		}, &structs.NodeAllocsResponse{})
  2425  	require.NoError(t, err)
  2426  
  2427  	// Lookup the alloc and verify status was updated
  2428  	out, err := state.AllocByID(ws, alloc1.ID)
  2429  	require.NoError(t, err)
  2430  	require.Equal(t, structs.AllocClientStatusFailed, out.ClientStatus)
  2431  
  2432  	// Verify the index has been updated to trigger a volume claim release
  2433  
  2434  	req := &structs.CSIVolumeGetRequest{ID: volId0}
  2435  	req.Region = "global"
  2436  	getResp := &structs.CSIVolumeGetResponse{}
  2437  	err = msgpackrpc.CallWithCodec(codec, "CSIVolume.Get", req, getResp)
  2438  	require.NoError(t, err)
  2439  	require.Greater(t, getResp.Volume.ModifyIndex, lastIndex)
  2440  }
  2441  
  2442  func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
  2443  	t.Parallel()
  2444  
  2445  	s1, cleanupS1 := TestServer(t, nil)
  2446  	defer cleanupS1()
  2447  	testutil.WaitForLeader(t, s1.RPC)
  2448  
  2449  	// Inject fake evaluations
  2450  	alloc := mock.Alloc()
  2451  	state := s1.fsm.State()
  2452  	state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
  2453  	if err := state.UpsertAllocs(2, []*structs.Allocation{alloc}); err != nil {
  2454  		t.Fatalf("err: %v", err)
  2455  	}
  2456  
  2457  	// Inject a fake system job.
  2458  	job := mock.SystemJob()
  2459  	if err := state.UpsertJob(3, job); err != nil {
  2460  		t.Fatalf("err: %v", err)
  2461  	}
  2462  
  2463  	// Create some evaluations
  2464  	ids, index, err := s1.staticEndpoints.Node.createNodeEvals(alloc.NodeID, 1)
  2465  	if err != nil {
  2466  		t.Fatalf("err: %v", err)
  2467  	}
  2468  	if index == 0 {
  2469  		t.Fatalf("bad: %d", index)
  2470  	}
  2471  	if len(ids) != 2 {
  2472  		t.Fatalf("bad: %s", ids)
  2473  	}
  2474  
  2475  	// Lookup the evaluations
  2476  	ws := memdb.NewWatchSet()
  2477  	evalByType := make(map[string]*structs.Evaluation, 2)
  2478  	for _, id := range ids {
  2479  		eval, err := state.EvalByID(ws, id)
  2480  		if err != nil {
  2481  			t.Fatalf("err: %v", err)
  2482  		}
  2483  		if eval == nil {
  2484  			t.Fatalf("expected eval")
  2485  		}
  2486  
  2487  		if old, ok := evalByType[eval.Type]; ok {
  2488  			t.Fatalf("multiple evals of the same type: %v and %v", old, eval)
  2489  		}
  2490  
  2491  		evalByType[eval.Type] = eval
  2492  	}
  2493  
  2494  	if len(evalByType) != 2 {
  2495  		t.Fatalf("Expected a service and system job; got %#v", evalByType)
  2496  	}
  2497  
  2498  	// Ensure the evals are correct.
  2499  	for schedType, eval := range evalByType {
  2500  		expPriority := alloc.Job.Priority
  2501  		expJobID := alloc.JobID
  2502  		if schedType == "system" {
  2503  			expPriority = job.Priority
  2504  			expJobID = job.ID
  2505  		}
  2506  
  2507  		t.Logf("checking eval: %v", pretty.Sprint(eval))
  2508  		require.Equal(t, index, eval.CreateIndex)
  2509  		require.Equal(t, structs.EvalTriggerNodeUpdate, eval.TriggeredBy)
  2510  		require.Equal(t, alloc.NodeID, eval.NodeID)
  2511  		require.Equal(t, uint64(1), eval.NodeModifyIndex)
  2512  		switch eval.Status {
  2513  		case structs.EvalStatusPending, structs.EvalStatusComplete:
  2514  			// success
  2515  		default:
  2516  			t.Fatalf("expected pending or complete, found %v", eval.Status)
  2517  		}
  2518  		require.Equal(t, expPriority, eval.Priority)
  2519  		require.Equal(t, expJobID, eval.JobID)
  2520  		require.NotZero(t, eval.CreateTime)
  2521  		require.NotZero(t, eval.ModifyTime)
  2522  	}
  2523  }
  2524  
  2525  func TestClientEndpoint_Evaluate(t *testing.T) {
  2526  	t.Parallel()
  2527  
  2528  	s1, cleanupS1 := TestServer(t, func(c *Config) {
  2529  		c.NumSchedulers = 0 // Prevent automatic dequeue
  2530  	})
  2531  	defer cleanupS1()
  2532  	codec := rpcClient(t, s1)
  2533  	testutil.WaitForLeader(t, s1.RPC)
  2534  
  2535  	// Inject fake evaluations
  2536  	alloc := mock.Alloc()
  2537  	node := mock.Node()
  2538  	node.ID = alloc.NodeID
  2539  	state := s1.fsm.State()
  2540  	err := state.UpsertNode(1, node)
  2541  	if err != nil {
  2542  		t.Fatalf("err: %v", err)
  2543  	}
  2544  	state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID))
  2545  	err = state.UpsertAllocs(3, []*structs.Allocation{alloc})
  2546  	if err != nil {
  2547  		t.Fatalf("err: %v", err)
  2548  	}
  2549  
  2550  	// Re-evaluate
  2551  	req := &structs.NodeEvaluateRequest{
  2552  		NodeID:       alloc.NodeID,
  2553  		WriteRequest: structs.WriteRequest{Region: "global"},
  2554  	}
  2555  
  2556  	// Fetch the response
  2557  	var resp structs.NodeUpdateResponse
  2558  	if err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp); err != nil {
  2559  		t.Fatalf("err: %v", err)
  2560  	}
  2561  	if resp.Index == 0 {
  2562  		t.Fatalf("bad index: %d", resp.Index)
  2563  	}
  2564  
  2565  	// Create some evaluations
  2566  	ids := resp.EvalIDs
  2567  	if len(ids) != 1 {
  2568  		t.Fatalf("bad: %s", ids)
  2569  	}
  2570  
  2571  	// Lookup the evaluation
  2572  	ws := memdb.NewWatchSet()
  2573  	eval, err := state.EvalByID(ws, ids[0])
  2574  	if err != nil {
  2575  		t.Fatalf("err: %v", err)
  2576  	}
  2577  	if eval == nil {
  2578  		t.Fatalf("expected eval")
  2579  	}
  2580  	if eval.CreateIndex != resp.Index {
  2581  		t.Fatalf("index mis-match")
  2582  	}
  2583  
  2584  	if eval.Priority != alloc.Job.Priority {
  2585  		t.Fatalf("bad: %#v", eval)
  2586  	}
  2587  	if eval.Type != alloc.Job.Type {
  2588  		t.Fatalf("bad: %#v", eval)
  2589  	}
  2590  	if eval.TriggeredBy != structs.EvalTriggerNodeUpdate {
  2591  		t.Fatalf("bad: %#v", eval)
  2592  	}
  2593  	if eval.JobID != alloc.JobID {
  2594  		t.Fatalf("bad: %#v", eval)
  2595  	}
  2596  	if eval.NodeID != alloc.NodeID {
  2597  		t.Fatalf("bad: %#v", eval)
  2598  	}
  2599  	if eval.NodeModifyIndex != 1 {
  2600  		t.Fatalf("bad: %#v", eval)
  2601  	}
  2602  	if eval.Status != structs.EvalStatusPending {
  2603  		t.Fatalf("bad: %#v", eval)
  2604  	}
  2605  	if eval.CreateTime == 0 {
  2606  		t.Fatalf("CreateTime is unset: %#v", eval)
  2607  	}
  2608  	if eval.ModifyTime == 0 {
  2609  		t.Fatalf("ModifyTime is unset: %#v", eval)
  2610  	}
  2611  }
  2612  
  2613  func TestClientEndpoint_Evaluate_ACL(t *testing.T) {
  2614  	t.Parallel()
  2615  
  2616  	s1, root, cleanupS1 := TestACLServer(t, nil)
  2617  	defer cleanupS1()
  2618  	codec := rpcClient(t, s1)
  2619  	testutil.WaitForLeader(t, s1.RPC)
  2620  	assert := assert.New(t)
  2621  
  2622  	// Create the node with an alloc
  2623  	alloc := mock.Alloc()
  2624  	node := mock.Node()
  2625  	node.ID = alloc.NodeID
  2626  	state := s1.fsm.State()
  2627  
  2628  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  2629  	assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary")
  2630  	assert.Nil(state.UpsertAllocs(3, []*structs.Allocation{alloc}), "UpsertAllocs")
  2631  
  2632  	// Create the policy and tokens
  2633  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
  2634  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
  2635  
  2636  	// Re-evaluate without a token and expect failure
  2637  	req := &structs.NodeEvaluateRequest{
  2638  		NodeID:       alloc.NodeID,
  2639  		WriteRequest: structs.WriteRequest{Region: "global"},
  2640  	}
  2641  	{
  2642  		var resp structs.NodeUpdateResponse
  2643  		err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
  2644  		assert.NotNil(err, "RPC")
  2645  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2646  	}
  2647  
  2648  	// Try with a valid token
  2649  	req.AuthToken = validToken.SecretID
  2650  	{
  2651  		var resp structs.NodeUpdateResponse
  2652  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
  2653  	}
  2654  
  2655  	// Try with a invalid token
  2656  	req.AuthToken = invalidToken.SecretID
  2657  	{
  2658  		var resp structs.NodeUpdateResponse
  2659  		err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
  2660  		assert.NotNil(err, "RPC")
  2661  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2662  	}
  2663  
  2664  	// Try with a root token
  2665  	req.AuthToken = root.SecretID
  2666  	{
  2667  		var resp structs.NodeUpdateResponse
  2668  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
  2669  	}
  2670  }
  2671  
  2672  func TestClientEndpoint_ListNodes(t *testing.T) {
  2673  	t.Parallel()
  2674  
  2675  	s1, cleanupS1 := TestServer(t, nil)
  2676  	defer cleanupS1()
  2677  	codec := rpcClient(t, s1)
  2678  	testutil.WaitForLeader(t, s1.RPC)
  2679  
  2680  	// Create the register request
  2681  	node := mock.Node()
  2682  	node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{
  2683  		"foo": {
  2684  			Name:     "foo",
  2685  			Path:     "/",
  2686  			ReadOnly: true,
  2687  		},
  2688  	}
  2689  	reg := &structs.NodeRegisterRequest{
  2690  		Node:         node,
  2691  		WriteRequest: structs.WriteRequest{Region: "global"},
  2692  	}
  2693  
  2694  	// Fetch the response
  2695  	var resp structs.GenericResponse
  2696  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2697  		t.Fatalf("err: %v", err)
  2698  	}
  2699  	node.CreateIndex = resp.Index
  2700  	node.ModifyIndex = resp.Index
  2701  
  2702  	// Lookup the node
  2703  	get := &structs.NodeListRequest{
  2704  		QueryOptions: structs.QueryOptions{Region: "global"},
  2705  	}
  2706  	var resp2 structs.NodeListResponse
  2707  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2); err != nil {
  2708  		t.Fatalf("err: %v", err)
  2709  	}
  2710  	if resp2.Index != resp.Index {
  2711  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  2712  	}
  2713  
  2714  	require.Len(t, resp2.Nodes, 1)
  2715  	require.Equal(t, node.ID, resp2.Nodes[0].ID)
  2716  
  2717  	// #7344 - Assert HostVolumes are included in stub
  2718  	require.Equal(t, node.HostVolumes, resp2.Nodes[0].HostVolumes)
  2719  
  2720  	// Lookup the node with prefix
  2721  	get = &structs.NodeListRequest{
  2722  		QueryOptions: structs.QueryOptions{Region: "global", Prefix: node.ID[:4]},
  2723  	}
  2724  	var resp3 structs.NodeListResponse
  2725  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp3); err != nil {
  2726  		t.Fatalf("err: %v", err)
  2727  	}
  2728  	if resp3.Index != resp.Index {
  2729  		t.Fatalf("Bad index: %d %d", resp3.Index, resp2.Index)
  2730  	}
  2731  
  2732  	if len(resp3.Nodes) != 1 {
  2733  		t.Fatalf("bad: %#v", resp3.Nodes)
  2734  	}
  2735  	if resp3.Nodes[0].ID != node.ID {
  2736  		t.Fatalf("bad: %#v", resp3.Nodes[0])
  2737  	}
  2738  }
  2739  
  2740  func TestClientEndpoint_ListNodes_ACL(t *testing.T) {
  2741  	t.Parallel()
  2742  
  2743  	s1, root, cleanupS1 := TestACLServer(t, nil)
  2744  	defer cleanupS1()
  2745  	codec := rpcClient(t, s1)
  2746  	testutil.WaitForLeader(t, s1.RPC)
  2747  	assert := assert.New(t)
  2748  
  2749  	// Create the node
  2750  	node := mock.Node()
  2751  	state := s1.fsm.State()
  2752  	assert.Nil(state.UpsertNode(1, node), "UpsertNode")
  2753  
  2754  	// Create the namespace policy and tokens
  2755  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
  2756  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
  2757  
  2758  	// Lookup the node without a token and expect failure
  2759  	req := &structs.NodeListRequest{
  2760  		QueryOptions: structs.QueryOptions{Region: "global"},
  2761  	}
  2762  	{
  2763  		var resp structs.NodeListResponse
  2764  		err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
  2765  		assert.NotNil(err, "RPC")
  2766  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2767  	}
  2768  
  2769  	// Try with a valid token
  2770  	req.AuthToken = validToken.SecretID
  2771  	{
  2772  		var resp structs.NodeListResponse
  2773  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
  2774  		assert.Equal(node.ID, resp.Nodes[0].ID)
  2775  	}
  2776  
  2777  	// Try with a invalid token
  2778  	req.AuthToken = invalidToken.SecretID
  2779  	{
  2780  		var resp structs.NodeListResponse
  2781  		err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
  2782  		assert.NotNil(err, "RPC")
  2783  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2784  	}
  2785  
  2786  	// Try with a root token
  2787  	req.AuthToken = root.SecretID
  2788  	{
  2789  		var resp structs.NodeListResponse
  2790  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
  2791  		assert.Equal(node.ID, resp.Nodes[0].ID)
  2792  	}
  2793  }
  2794  
  2795  func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
  2796  	t.Parallel()
  2797  
  2798  	s1, cleanupS1 := TestServer(t, nil)
  2799  	defer cleanupS1()
  2800  	state := s1.fsm.State()
  2801  	codec := rpcClient(t, s1)
  2802  	testutil.WaitForLeader(t, s1.RPC)
  2803  
  2804  	// Disable drainer to prevent drain from completing during test
  2805  	s1.nodeDrainer.SetEnabled(false, nil)
  2806  
  2807  	// Create the node
  2808  	node := mock.Node()
  2809  
  2810  	// Node upsert triggers watches
  2811  	errCh := make(chan error, 1)
  2812  	timer := time.AfterFunc(100*time.Millisecond, func() {
  2813  		errCh <- state.UpsertNode(2, node)
  2814  	})
  2815  	defer timer.Stop()
  2816  
  2817  	req := &structs.NodeListRequest{
  2818  		QueryOptions: structs.QueryOptions{
  2819  			Region:        "global",
  2820  			MinQueryIndex: 1,
  2821  		},
  2822  	}
  2823  	start := time.Now()
  2824  	var resp structs.NodeListResponse
  2825  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp); err != nil {
  2826  		t.Fatalf("err: %v", err)
  2827  	}
  2828  
  2829  	if err := <-errCh; err != nil {
  2830  		t.Fatalf("error from timer: %v", err)
  2831  	}
  2832  
  2833  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2834  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  2835  	}
  2836  	if resp.Index != 2 {
  2837  		t.Fatalf("Bad index: %d %d", resp.Index, 2)
  2838  	}
  2839  	if len(resp.Nodes) != 1 || resp.Nodes[0].ID != node.ID {
  2840  		t.Fatalf("bad: %#v", resp.Nodes)
  2841  	}
  2842  
  2843  	// Node drain updates trigger watches.
  2844  	time.AfterFunc(100*time.Millisecond, func() {
  2845  		s := &structs.DrainStrategy{
  2846  			DrainSpec: structs.DrainSpec{
  2847  				Deadline: 10 * time.Second,
  2848  			},
  2849  		}
  2850  		errCh <- state.UpdateNodeDrain(3, node.ID, s, false, 0, nil)
  2851  	})
  2852  
  2853  	req.MinQueryIndex = 2
  2854  	var resp2 structs.NodeListResponse
  2855  	start = time.Now()
  2856  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp2); err != nil {
  2857  		t.Fatalf("err: %v", err)
  2858  	}
  2859  
  2860  	if err := <-errCh; err != nil {
  2861  		t.Fatalf("error from timer: %v", err)
  2862  	}
  2863  
  2864  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2865  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
  2866  	}
  2867  	if resp2.Index != 3 {
  2868  		t.Fatalf("Bad index: %d %d", resp2.Index, 3)
  2869  	}
  2870  	if len(resp2.Nodes) != 1 || !resp2.Nodes[0].Drain {
  2871  		t.Fatalf("bad: %#v", resp2.Nodes)
  2872  	}
  2873  
  2874  	// Node status update triggers watches
  2875  	time.AfterFunc(100*time.Millisecond, func() {
  2876  		errCh <- state.UpdateNodeStatus(40, node.ID, structs.NodeStatusDown, 0, nil)
  2877  	})
  2878  
  2879  	req.MinQueryIndex = 38
  2880  	var resp3 structs.NodeListResponse
  2881  	start = time.Now()
  2882  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp3); err != nil {
  2883  		t.Fatalf("err: %v", err)
  2884  	}
  2885  
  2886  	if err := <-errCh; err != nil {
  2887  		t.Fatalf("error from timer: %v", err)
  2888  	}
  2889  
  2890  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2891  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp3)
  2892  	}
  2893  	if resp3.Index != 40 {
  2894  		t.Fatalf("Bad index: %d %d", resp3.Index, 40)
  2895  	}
  2896  	if len(resp3.Nodes) != 1 || resp3.Nodes[0].Status != structs.NodeStatusDown {
  2897  		t.Fatalf("bad: %#v", resp3.Nodes)
  2898  	}
  2899  
  2900  	// Node delete triggers watches.
  2901  	time.AfterFunc(100*time.Millisecond, func() {
  2902  		errCh <- state.DeleteNode(50, []string{node.ID})
  2903  	})
  2904  
  2905  	req.MinQueryIndex = 45
  2906  	var resp4 structs.NodeListResponse
  2907  	start = time.Now()
  2908  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp4); err != nil {
  2909  		t.Fatalf("err: %v", err)
  2910  	}
  2911  
  2912  	if err := <-errCh; err != nil {
  2913  		t.Fatalf("error from timer: %v", err)
  2914  	}
  2915  
  2916  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2917  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp4)
  2918  	}
  2919  	if resp4.Index != 50 {
  2920  		t.Fatalf("Bad index: %d %d", resp4.Index, 50)
  2921  	}
  2922  	if len(resp4.Nodes) != 0 {
  2923  		t.Fatalf("bad: %#v", resp4.Nodes)
  2924  	}
  2925  }
  2926  
  2927  func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) {
  2928  	t.Parallel()
  2929  
  2930  	s1, cleanupS1 := TestServer(t, nil)
  2931  	defer cleanupS1()
  2932  	state := s1.fsm.State()
  2933  	codec := rpcClient(t, s1)
  2934  	testutil.WaitForLeader(t, s1.RPC)
  2935  
  2936  	// Create the node
  2937  	node := mock.Node()
  2938  	if err := state.UpsertNode(2, node); err != nil {
  2939  		t.Fatalf("err: %v", err)
  2940  	}
  2941  
  2942  	// Create an alloc
  2943  	alloc := mock.Alloc()
  2944  	task := alloc.Job.TaskGroups[0].Tasks[0]
  2945  	tasks := []string{task.Name}
  2946  	if err := state.UpsertAllocs(3, []*structs.Allocation{alloc}); err != nil {
  2947  		t.Fatalf("err: %v", err)
  2948  	}
  2949  
  2950  	req := &structs.DeriveVaultTokenRequest{
  2951  		NodeID:   node.ID,
  2952  		SecretID: uuid.Generate(),
  2953  		AllocID:  alloc.ID,
  2954  		Tasks:    tasks,
  2955  		QueryOptions: structs.QueryOptions{
  2956  			Region: "global",
  2957  		},
  2958  	}
  2959  
  2960  	var resp structs.DeriveVaultTokenResponse
  2961  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2962  		t.Fatalf("bad: %v", err)
  2963  	}
  2964  
  2965  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "SecretID mismatch") {
  2966  		t.Fatalf("Expected SecretID mismatch: %v", resp.Error)
  2967  	}
  2968  
  2969  	// Put the correct SecretID
  2970  	req.SecretID = node.SecretID
  2971  
  2972  	// Now we should get an error about the allocation not running on the node
  2973  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2974  		t.Fatalf("bad: %v", err)
  2975  	}
  2976  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "not running on Node") {
  2977  		t.Fatalf("Expected not running on node error: %v", resp.Error)
  2978  	}
  2979  
  2980  	// Update to be running on the node
  2981  	alloc.NodeID = node.ID
  2982  	if err := state.UpsertAllocs(4, []*structs.Allocation{alloc}); err != nil {
  2983  		t.Fatalf("err: %v", err)
  2984  	}
  2985  
  2986  	// Now we should get an error about the job not needing any Vault secrets
  2987  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2988  		t.Fatalf("bad: %v", err)
  2989  	}
  2990  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "does not require") {
  2991  		t.Fatalf("Expected no policies error: %v", resp.Error)
  2992  	}
  2993  
  2994  	// Update to be terminal
  2995  	alloc.DesiredStatus = structs.AllocDesiredStatusStop
  2996  	if err := state.UpsertAllocs(5, []*structs.Allocation{alloc}); err != nil {
  2997  		t.Fatalf("err: %v", err)
  2998  	}
  2999  
  3000  	// Now we should get an error about the job not needing any Vault secrets
  3001  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  3002  		t.Fatalf("bad: %v", err)
  3003  	}
  3004  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "terminal") {
  3005  		t.Fatalf("Expected terminal allocation error: %v", resp.Error)
  3006  	}
  3007  }
  3008  
  3009  func TestClientEndpoint_DeriveVaultToken(t *testing.T) {
  3010  	t.Parallel()
  3011  
  3012  	s1, cleanupS1 := TestServer(t, nil)
  3013  	defer cleanupS1()
  3014  	state := s1.fsm.State()
  3015  	codec := rpcClient(t, s1)
  3016  	testutil.WaitForLeader(t, s1.RPC)
  3017  
  3018  	// Enable vault and allow authenticated
  3019  	tr := true
  3020  	s1.config.VaultConfig.Enabled = &tr
  3021  	s1.config.VaultConfig.AllowUnauthenticated = &tr
  3022  
  3023  	// Replace the Vault Client on the server
  3024  	tvc := &TestVaultClient{}
  3025  	s1.vault = tvc
  3026  
  3027  	// Create the node
  3028  	node := mock.Node()
  3029  	if err := state.UpsertNode(2, node); err != nil {
  3030  		t.Fatalf("err: %v", err)
  3031  	}
  3032  
  3033  	// Create an alloc an allocation that has vault policies required
  3034  	alloc := mock.Alloc()
  3035  	alloc.NodeID = node.ID
  3036  	task := alloc.Job.TaskGroups[0].Tasks[0]
  3037  	tasks := []string{task.Name}
  3038  	task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
  3039  	if err := state.UpsertAllocs(3, []*structs.Allocation{alloc}); err != nil {
  3040  		t.Fatalf("err: %v", err)
  3041  	}
  3042  
  3043  	// Return a secret for the task
  3044  	token := uuid.Generate()
  3045  	accessor := uuid.Generate()
  3046  	ttl := 10
  3047  	secret := &vapi.Secret{
  3048  		WrapInfo: &vapi.SecretWrapInfo{
  3049  			Token:           token,
  3050  			WrappedAccessor: accessor,
  3051  			TTL:             ttl,
  3052  		},
  3053  	}
  3054  	tvc.SetCreateTokenSecret(alloc.ID, task.Name, secret)
  3055  
  3056  	req := &structs.DeriveVaultTokenRequest{
  3057  		NodeID:   node.ID,
  3058  		SecretID: node.SecretID,
  3059  		AllocID:  alloc.ID,
  3060  		Tasks:    tasks,
  3061  		QueryOptions: structs.QueryOptions{
  3062  			Region: "global",
  3063  		},
  3064  	}
  3065  
  3066  	var resp structs.DeriveVaultTokenResponse
  3067  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  3068  		t.Fatalf("bad: %v", err)
  3069  	}
  3070  	if resp.Error != nil {
  3071  		t.Fatalf("bad: %v", resp.Error)
  3072  	}
  3073  
  3074  	// Check the state store and ensure that we created a VaultAccessor
  3075  	ws := memdb.NewWatchSet()
  3076  	va, err := state.VaultAccessor(ws, accessor)
  3077  	if err != nil {
  3078  		t.Fatalf("bad: %v", err)
  3079  	}
  3080  	if va == nil {
  3081  		t.Fatalf("bad: %v", va)
  3082  	}
  3083  
  3084  	if va.CreateIndex == 0 {
  3085  		t.Fatalf("bad: %v", va)
  3086  	}
  3087  
  3088  	va.CreateIndex = 0
  3089  	expected := &structs.VaultAccessor{
  3090  		AllocID:     alloc.ID,
  3091  		Task:        task.Name,
  3092  		NodeID:      alloc.NodeID,
  3093  		Accessor:    accessor,
  3094  		CreationTTL: ttl,
  3095  	}
  3096  
  3097  	if !reflect.DeepEqual(expected, va) {
  3098  		t.Fatalf("Got %#v; want %#v", va, expected)
  3099  	}
  3100  }
  3101  
  3102  func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) {
  3103  	t.Parallel()
  3104  
  3105  	s1, cleanupS1 := TestServer(t, nil)
  3106  	defer cleanupS1()
  3107  	state := s1.fsm.State()
  3108  	codec := rpcClient(t, s1)
  3109  	testutil.WaitForLeader(t, s1.RPC)
  3110  
  3111  	// Enable vault and allow authenticated
  3112  	tr := true
  3113  	s1.config.VaultConfig.Enabled = &tr
  3114  	s1.config.VaultConfig.AllowUnauthenticated = &tr
  3115  
  3116  	// Replace the Vault Client on the server
  3117  	tvc := &TestVaultClient{}
  3118  	s1.vault = tvc
  3119  
  3120  	// Create the node
  3121  	node := mock.Node()
  3122  	if err := state.UpsertNode(2, node); err != nil {
  3123  		t.Fatalf("err: %v", err)
  3124  	}
  3125  
  3126  	// Create an alloc an allocation that has vault policies required
  3127  	alloc := mock.Alloc()
  3128  	alloc.NodeID = node.ID
  3129  	task := alloc.Job.TaskGroups[0].Tasks[0]
  3130  	tasks := []string{task.Name}
  3131  	task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
  3132  	if err := state.UpsertAllocs(3, []*structs.Allocation{alloc}); err != nil {
  3133  		t.Fatalf("err: %v", err)
  3134  	}
  3135  
  3136  	// Return an error when creating the token
  3137  	tvc.SetCreateTokenError(alloc.ID, task.Name,
  3138  		structs.NewRecoverableError(fmt.Errorf("recover"), true))
  3139  
  3140  	req := &structs.DeriveVaultTokenRequest{
  3141  		NodeID:   node.ID,
  3142  		SecretID: node.SecretID,
  3143  		AllocID:  alloc.ID,
  3144  		Tasks:    tasks,
  3145  		QueryOptions: structs.QueryOptions{
  3146  			Region: "global",
  3147  		},
  3148  	}
  3149  
  3150  	var resp structs.DeriveVaultTokenResponse
  3151  	err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp)
  3152  	if err != nil {
  3153  		t.Fatalf("bad: %v", err)
  3154  	}
  3155  	if resp.Error == nil || !resp.Error.IsRecoverable() {
  3156  		t.Fatalf("bad: %+v", resp.Error)
  3157  	}
  3158  }
  3159  
  3160  func TestClientEndpoint_taskUsesConnect(t *testing.T) {
  3161  	t.Parallel()
  3162  
  3163  	try := func(t *testing.T, task *structs.Task, exp bool) {
  3164  		result := taskUsesConnect(task)
  3165  		require.Equal(t, exp, result)
  3166  	}
  3167  
  3168  	t.Run("task uses connect", func(t *testing.T) {
  3169  		try(t, &structs.Task{
  3170  			// see nomad.newConnectTask for how this works
  3171  			Name: "connect-proxy-myservice",
  3172  			Kind: "connect-proxy:myservice",
  3173  		}, true)
  3174  	})
  3175  
  3176  	t.Run("task does not use connect", func(t *testing.T) {
  3177  		try(t, &structs.Task{
  3178  			Name: "mytask",
  3179  			Kind: "incorrect:mytask",
  3180  		}, false)
  3181  	})
  3182  
  3183  	t.Run("task does not exist", func(t *testing.T) {
  3184  		try(t, nil, false)
  3185  	})
  3186  }
  3187  
  3188  func TestClientEndpoint_tasksNotUsingConnect(t *testing.T) {
  3189  	t.Parallel()
  3190  
  3191  	taskGroup := &structs.TaskGroup{
  3192  		Name: "testgroup",
  3193  		Tasks: []*structs.Task{{
  3194  			Name: "connect-proxy-service1",
  3195  			Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
  3196  		}, {
  3197  			Name: "incorrect-task3",
  3198  			Kind: "incorrect:task3",
  3199  		}, {
  3200  			Name: "connect-proxy-service4",
  3201  			Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service4"),
  3202  		}, {
  3203  			Name: "incorrect-task5",
  3204  			Kind: "incorrect:task5",
  3205  		}, {
  3206  			Name: "task6",
  3207  			Kind: structs.NewTaskKind(structs.ConnectNativePrefix, "service6"),
  3208  		}},
  3209  	}
  3210  
  3211  	requestingTasks := []string{
  3212  		"connect-proxy-service1", // yes
  3213  		"task2",                  // does not exist
  3214  		"task3",                  // no
  3215  		"connect-proxy-service4", // yes
  3216  		"task5",                  // no
  3217  		"task6",                  // yes, native
  3218  	}
  3219  
  3220  	notConnect, usingConnect := connectTasks(taskGroup, requestingTasks)
  3221  
  3222  	notConnectExp := []string{"task2", "task3", "task5"}
  3223  	usingConnectExp := []connectTask{
  3224  		{TaskName: "connect-proxy-service1", TaskKind: "connect-proxy:service1"},
  3225  		{TaskName: "connect-proxy-service4", TaskKind: "connect-proxy:service4"},
  3226  		{TaskName: "task6", TaskKind: "connect-native:service6"},
  3227  	}
  3228  
  3229  	require.Equal(t, notConnectExp, notConnect)
  3230  	require.Equal(t, usingConnectExp, usingConnect)
  3231  }
  3232  
  3233  func mutateConnectJob(t *testing.T, job *structs.Job) {
  3234  	var jch jobConnectHook
  3235  	_, warnings, err := jch.Mutate(job)
  3236  	require.Empty(t, warnings)
  3237  	require.NoError(t, err)
  3238  }
  3239  
  3240  func TestClientEndpoint_DeriveSIToken(t *testing.T) {
  3241  	t.Parallel()
  3242  	r := require.New(t)
  3243  
  3244  	s1, cleanupS1 := TestServer(t, nil) // already sets consul mocks
  3245  	defer cleanupS1()
  3246  
  3247  	state := s1.fsm.State()
  3248  	codec := rpcClient(t, s1)
  3249  	testutil.WaitForLeader(t, s1.RPC)
  3250  
  3251  	// Set allow unauthenticated (no operator token required)
  3252  	s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true)
  3253  
  3254  	// Create the node
  3255  	node := mock.Node()
  3256  	err := state.UpsertNode(2, node)
  3257  	r.NoError(err)
  3258  
  3259  	// Create an alloc with a typical connect service (sidecar) defined
  3260  	alloc := mock.ConnectAlloc()
  3261  	alloc.NodeID = node.ID
  3262  	mutateConnectJob(t, alloc.Job) // appends sidecar task
  3263  	sidecarTask := alloc.Job.TaskGroups[0].Tasks[1]
  3264  
  3265  	err = state.UpsertAllocs(3, []*structs.Allocation{alloc})
  3266  	r.NoError(err)
  3267  
  3268  	request := &structs.DeriveSITokenRequest{
  3269  		NodeID:       node.ID,
  3270  		SecretID:     node.SecretID,
  3271  		AllocID:      alloc.ID,
  3272  		Tasks:        []string{sidecarTask.Name},
  3273  		QueryOptions: structs.QueryOptions{Region: "global"},
  3274  	}
  3275  
  3276  	var response structs.DeriveSITokenResponse
  3277  	err = msgpackrpc.CallWithCodec(codec, "Node.DeriveSIToken", request, &response)
  3278  	r.NoError(err)
  3279  	r.Nil(response.Error)
  3280  
  3281  	// Check the state store and ensure we created a Consul SI Token Accessor
  3282  	ws := memdb.NewWatchSet()
  3283  	accessors, err := state.SITokenAccessorsByNode(ws, node.ID)
  3284  	r.NoError(err)
  3285  	r.Equal(1, len(accessors))                                  // only asked for one
  3286  	r.Equal("connect-proxy-testconnect", accessors[0].TaskName) // set by the mock
  3287  	r.Equal(node.ID, accessors[0].NodeID)                       // should match
  3288  	r.Equal(alloc.ID, accessors[0].AllocID)                     // should match
  3289  	r.True(helper.IsUUID(accessors[0].AccessorID))              // should be set
  3290  	r.Greater(accessors[0].CreateIndex, uint64(3))              // more than 3rd
  3291  }
  3292  
  3293  func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) {
  3294  	t.Parallel()
  3295  	r := require.New(t)
  3296  
  3297  	s1, cleanupS1 := TestServer(t, nil)
  3298  	defer cleanupS1()
  3299  	state := s1.fsm.State()
  3300  	codec := rpcClient(t, s1)
  3301  	testutil.WaitForLeader(t, s1.RPC)
  3302  
  3303  	// Set allow unauthenticated (no operator token required)
  3304  	s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true)
  3305  
  3306  	// Create the node
  3307  	node := mock.Node()
  3308  	err := state.UpsertNode(2, node)
  3309  	r.NoError(err)
  3310  
  3311  	// Create an alloc with a typical connect service (sidecar) defined
  3312  	alloc := mock.ConnectAlloc()
  3313  	alloc.NodeID = node.ID
  3314  	mutateConnectJob(t, alloc.Job) // appends sidecar task
  3315  	sidecarTask := alloc.Job.TaskGroups[0].Tasks[1]
  3316  
  3317  	// rejigger the server to use a broken mock consul
  3318  	mockACLsAPI := consul.NewMockACLsAPI(s1.logger)
  3319  	mockACLsAPI.SetError(structs.NewRecoverableError(errors.New("consul recoverable error"), true))
  3320  	m := NewConsulACLsAPI(mockACLsAPI, s1.logger, nil)
  3321  	s1.consulACLs = m
  3322  
  3323  	err = state.UpsertAllocs(3, []*structs.Allocation{alloc})
  3324  	r.NoError(err)
  3325  
  3326  	request := &structs.DeriveSITokenRequest{
  3327  		NodeID:       node.ID,
  3328  		SecretID:     node.SecretID,
  3329  		AllocID:      alloc.ID,
  3330  		Tasks:        []string{sidecarTask.Name},
  3331  		QueryOptions: structs.QueryOptions{Region: "global"},
  3332  	}
  3333  
  3334  	var response structs.DeriveSITokenResponse
  3335  	err = msgpackrpc.CallWithCodec(codec, "Node.DeriveSIToken", request, &response)
  3336  	r.NoError(err)
  3337  	r.NotNil(response.Error)               // error should be set
  3338  	r.True(response.Error.IsRecoverable()) // and is recoverable
  3339  }
  3340  
  3341  func TestClientEndpoint_EmitEvents(t *testing.T) {
  3342  	t.Parallel()
  3343  	require := require.New(t)
  3344  
  3345  	s1, cleanupS1 := TestServer(t, nil)
  3346  	defer cleanupS1()
  3347  	state := s1.fsm.State()
  3348  	codec := rpcClient(t, s1)
  3349  	testutil.WaitForLeader(t, s1.RPC)
  3350  
  3351  	// create a node that we can register our event to
  3352  	node := mock.Node()
  3353  	err := state.UpsertNode(2, node)
  3354  	require.Nil(err)
  3355  
  3356  	nodeEvent := &structs.NodeEvent{
  3357  		Message:   "Registration failed",
  3358  		Subsystem: "Server",
  3359  		Timestamp: time.Now(),
  3360  	}
  3361  
  3362  	nodeEvents := map[string][]*structs.NodeEvent{node.ID: {nodeEvent}}
  3363  	req := structs.EmitNodeEventsRequest{
  3364  		NodeEvents:   nodeEvents,
  3365  		WriteRequest: structs.WriteRequest{Region: "global"},
  3366  	}
  3367  
  3368  	var resp structs.GenericResponse
  3369  	err = msgpackrpc.CallWithCodec(codec, "Node.EmitEvents", &req, &resp)
  3370  	require.Nil(err)
  3371  	require.NotEqual(uint64(0), resp.Index)
  3372  
  3373  	// Check for the node in the FSM
  3374  	ws := memdb.NewWatchSet()
  3375  	out, err := state.NodeByID(ws, node.ID)
  3376  	require.Nil(err)
  3377  	require.False(len(out.Events) < 2)
  3378  }