github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/nomad/node_endpoint_test.go (about)

     1  package nomad
     2  
     3  import (
     4  	"errors"
     5  	"fmt"
     6  	"net"
     7  	"reflect"
     8  	"strings"
     9  	"testing"
    10  	"time"
    11  
    12  	memdb "github.com/hashicorp/go-memdb"
    13  	msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc"
    14  	"github.com/hashicorp/nomad/acl"
    15  	"github.com/hashicorp/nomad/command/agent/consul"
    16  	"github.com/hashicorp/nomad/helper"
    17  	"github.com/hashicorp/nomad/helper/uuid"
    18  	"github.com/hashicorp/nomad/nomad/mock"
    19  	"github.com/hashicorp/nomad/nomad/state"
    20  	"github.com/hashicorp/nomad/nomad/structs"
    21  	"github.com/hashicorp/nomad/testutil"
    22  	vapi "github.com/hashicorp/vault/api"
    23  	"github.com/kr/pretty"
    24  	"github.com/stretchr/testify/assert"
    25  	"github.com/stretchr/testify/require"
    26  )
    27  
    28  func TestClientEndpoint_Register(t *testing.T) {
    29  	t.Parallel()
    30  	require := require.New(t)
    31  
    32  	s1, cleanupS1 := TestServer(t, nil)
    33  	defer cleanupS1()
    34  	codec := rpcClient(t, s1)
    35  	testutil.WaitForLeader(t, s1.RPC)
    36  
    37  	// Check that we have no client connections
    38  	require.Empty(s1.connectedNodes())
    39  
    40  	// Create the register request
    41  	node := mock.Node()
    42  	req := &structs.NodeRegisterRequest{
    43  		Node:         node,
    44  		WriteRequest: structs.WriteRequest{Region: "global"},
    45  	}
    46  
    47  	// Fetch the response
    48  	var resp structs.GenericResponse
    49  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
    50  		t.Fatalf("err: %v", err)
    51  	}
    52  	if resp.Index == 0 {
    53  		t.Fatalf("bad index: %d", resp.Index)
    54  	}
    55  
    56  	// Check that we have the client connections
    57  	nodes := s1.connectedNodes()
    58  	require.Len(nodes, 1)
    59  	require.Contains(nodes, node.ID)
    60  
    61  	// Check for the node in the FSM
    62  	state := s1.fsm.State()
    63  	ws := memdb.NewWatchSet()
    64  	out, err := state.NodeByID(ws, node.ID)
    65  	if err != nil {
    66  		t.Fatalf("err: %v", err)
    67  	}
    68  	if out == nil {
    69  		t.Fatalf("expected node")
    70  	}
    71  	if out.CreateIndex != resp.Index {
    72  		t.Fatalf("index mis-match")
    73  	}
    74  	if out.ComputedClass == "" {
    75  		t.Fatal("ComputedClass not set")
    76  	}
    77  
    78  	// Close the connection and check that we remove the client connections
    79  	require.Nil(codec.Close())
    80  	testutil.WaitForResult(func() (bool, error) {
    81  		nodes := s1.connectedNodes()
    82  		return len(nodes) == 0, nil
    83  	}, func(err error) {
    84  		t.Fatalf("should have no clients")
    85  	})
    86  }
    87  
    88  // This test asserts that we only track node connections if they are not from
    89  // forwarded RPCs. This is essential otherwise we will think a Yamux session to
    90  // a Nomad server is actually the session to the node.
    91  func TestClientEndpoint_Register_NodeConn_Forwarded(t *testing.T) {
    92  	t.Parallel()
    93  	require := require.New(t)
    94  
    95  	s1, cleanupS1 := TestServer(t, func(c *Config) {
    96  		c.BootstrapExpect = 2
    97  	})
    98  
    99  	defer cleanupS1()
   100  	s2, cleanupS2 := TestServer(t, func(c *Config) {
   101  		c.BootstrapExpect = 2
   102  	})
   103  	defer cleanupS2()
   104  	TestJoin(t, s1, s2)
   105  	testutil.WaitForLeader(t, s1.RPC)
   106  	testutil.WaitForLeader(t, s2.RPC)
   107  
   108  	// Determine the non-leader server
   109  	var leader, nonLeader *Server
   110  	if s1.IsLeader() {
   111  		leader = s1
   112  		nonLeader = s2
   113  	} else {
   114  		leader = s2
   115  		nonLeader = s1
   116  	}
   117  
   118  	// Send the requests to the non-leader
   119  	codec := rpcClient(t, nonLeader)
   120  
   121  	// Check that we have no client connections
   122  	require.Empty(nonLeader.connectedNodes())
   123  	require.Empty(leader.connectedNodes())
   124  
   125  	// Create the register request
   126  	node := mock.Node()
   127  	req := &structs.NodeRegisterRequest{
   128  		Node:         node,
   129  		WriteRequest: structs.WriteRequest{Region: "global"},
   130  	}
   131  
   132  	// Fetch the response
   133  	var resp structs.GenericResponse
   134  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
   135  		t.Fatalf("err: %v", err)
   136  	}
   137  	if resp.Index == 0 {
   138  		t.Fatalf("bad index: %d", resp.Index)
   139  	}
   140  
   141  	// Check that we have the client connections on the non leader
   142  	nodes := nonLeader.connectedNodes()
   143  	require.Len(nodes, 1)
   144  	require.Contains(nodes, node.ID)
   145  
   146  	// Check that we have no client connections on the leader
   147  	nodes = leader.connectedNodes()
   148  	require.Empty(nodes)
   149  
   150  	// Check for the node in the FSM
   151  	state := leader.State()
   152  	testutil.WaitForResult(func() (bool, error) {
   153  		out, err := state.NodeByID(nil, node.ID)
   154  		if err != nil {
   155  			return false, err
   156  		}
   157  		if out == nil {
   158  			return false, fmt.Errorf("expected node")
   159  		}
   160  		if out.CreateIndex != resp.Index {
   161  			return false, fmt.Errorf("index mis-match")
   162  		}
   163  		if out.ComputedClass == "" {
   164  			return false, fmt.Errorf("ComputedClass not set")
   165  		}
   166  
   167  		return true, nil
   168  	}, func(err error) {
   169  		t.Fatalf("err: %v", err)
   170  	})
   171  
   172  	// Close the connection and check that we remove the client connections
   173  	require.Nil(codec.Close())
   174  	testutil.WaitForResult(func() (bool, error) {
   175  		nodes := nonLeader.connectedNodes()
   176  		return len(nodes) == 0, nil
   177  	}, func(err error) {
   178  		t.Fatalf("should have no clients")
   179  	})
   180  }
   181  
   182  func TestClientEndpoint_Register_SecretMismatch(t *testing.T) {
   183  	t.Parallel()
   184  
   185  	s1, cleanupS1 := TestServer(t, nil)
   186  	defer cleanupS1()
   187  	codec := rpcClient(t, s1)
   188  	testutil.WaitForLeader(t, s1.RPC)
   189  
   190  	// Create the register request
   191  	node := mock.Node()
   192  	req := &structs.NodeRegisterRequest{
   193  		Node:         node,
   194  		WriteRequest: structs.WriteRequest{Region: "global"},
   195  	}
   196  
   197  	// Fetch the response
   198  	var resp structs.GenericResponse
   199  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp); err != nil {
   200  		t.Fatalf("err: %v", err)
   201  	}
   202  
   203  	// Update the nodes SecretID
   204  	node.SecretID = uuid.Generate()
   205  	err := msgpackrpc.CallWithCodec(codec, "Node.Register", req, &resp)
   206  	if err == nil || !strings.Contains(err.Error(), "Not registering") {
   207  		t.Fatalf("Expecting error regarding mismatching secret id: %v", err)
   208  	}
   209  }
   210  
   211  // Test the deprecated single node deregistration path
   212  func TestClientEndpoint_DeregisterOne(t *testing.T) {
   213  	t.Parallel()
   214  
   215  	s1, cleanupS1 := TestServer(t, nil)
   216  	defer cleanupS1()
   217  	codec := rpcClient(t, s1)
   218  	testutil.WaitForLeader(t, s1.RPC)
   219  
   220  	// Create the register request
   221  	node := mock.Node()
   222  	reg := &structs.NodeRegisterRequest{
   223  		Node:         node,
   224  		WriteRequest: structs.WriteRequest{Region: "global"},
   225  	}
   226  
   227  	// Fetch the response
   228  	var resp structs.GenericResponse
   229  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   230  		t.Fatalf("err: %v", err)
   231  	}
   232  
   233  	// Deregister
   234  	dereg := &structs.NodeDeregisterRequest{
   235  		NodeID:       node.ID,
   236  		WriteRequest: structs.WriteRequest{Region: "global"},
   237  	}
   238  	var resp2 structs.GenericResponse
   239  	if err := msgpackrpc.CallWithCodec(codec, "Node.Deregister", dereg, &resp2); err != nil {
   240  		t.Fatalf("err: %v", err)
   241  	}
   242  	if resp2.Index == 0 {
   243  		t.Fatalf("bad index: %d", resp2.Index)
   244  	}
   245  
   246  	// Check for the node in the FSM
   247  	state := s1.fsm.State()
   248  	ws := memdb.NewWatchSet()
   249  	out, err := state.NodeByID(ws, node.ID)
   250  	if err != nil {
   251  		t.Fatalf("err: %v", err)
   252  	}
   253  	if out != nil {
   254  		t.Fatalf("unexpected node")
   255  	}
   256  }
   257  
   258  func TestClientEndpoint_Deregister_ACL(t *testing.T) {
   259  	t.Parallel()
   260  
   261  	s1, root, cleanupS1 := TestACLServer(t, nil)
   262  	defer cleanupS1()
   263  	codec := rpcClient(t, s1)
   264  	testutil.WaitForLeader(t, s1.RPC)
   265  
   266  	// Create the node
   267  	node := mock.Node()
   268  	node1 := mock.Node()
   269  	state := s1.fsm.State()
   270  	if err := state.UpsertNode(structs.MsgTypeTestSetup, 1, node); err != nil {
   271  		t.Fatalf("err: %v", err)
   272  	}
   273  	if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node1); err != nil {
   274  		t.Fatalf("err: %v", err)
   275  	}
   276  
   277  	// Create the policy and tokens
   278  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
   279  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
   280  
   281  	// Deregister without any token and expect it to fail
   282  	dereg := &structs.NodeBatchDeregisterRequest{
   283  		NodeIDs:      []string{node.ID},
   284  		WriteRequest: structs.WriteRequest{Region: "global"},
   285  	}
   286  	var resp structs.GenericResponse
   287  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err == nil {
   288  		t.Fatalf("node de-register succeeded")
   289  	}
   290  
   291  	// Deregister with a valid token
   292  	dereg.AuthToken = validToken.SecretID
   293  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp); err != nil {
   294  		t.Fatalf("err: %v", err)
   295  	}
   296  
   297  	// Check for the node in the FSM
   298  	ws := memdb.NewWatchSet()
   299  	out, err := state.NodeByID(ws, node.ID)
   300  	if err != nil {
   301  		t.Fatalf("err: %v", err)
   302  	}
   303  	if out != nil {
   304  		t.Fatalf("unexpected node")
   305  	}
   306  
   307  	// Deregister with an invalid token.
   308  	dereg1 := &structs.NodeBatchDeregisterRequest{
   309  		NodeIDs:      []string{node1.ID},
   310  		WriteRequest: structs.WriteRequest{Region: "global"},
   311  	}
   312  	dereg1.AuthToken = invalidToken.SecretID
   313  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err == nil {
   314  		t.Fatalf("rpc should not have succeeded")
   315  	}
   316  
   317  	// Try with a root token
   318  	dereg1.AuthToken = root.SecretID
   319  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg1, &resp); err != nil {
   320  		t.Fatalf("err: %v", err)
   321  	}
   322  }
   323  
   324  func TestClientEndpoint_Deregister_Vault(t *testing.T) {
   325  	t.Parallel()
   326  
   327  	s1, cleanupS1 := TestServer(t, nil)
   328  	defer cleanupS1()
   329  	codec := rpcClient(t, s1)
   330  	testutil.WaitForLeader(t, s1.RPC)
   331  
   332  	// Create the register request
   333  	node := mock.Node()
   334  	reg := &structs.NodeRegisterRequest{
   335  		Node:         node,
   336  		WriteRequest: structs.WriteRequest{Region: "global"},
   337  	}
   338  
   339  	// Fetch the response
   340  	var resp structs.GenericResponse
   341  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   342  		t.Fatalf("err: %v", err)
   343  	}
   344  
   345  	// Swap the servers Vault Client
   346  	tvc := &TestVaultClient{}
   347  	s1.vault = tvc
   348  
   349  	// Put some Vault accessors in the state store for that node
   350  	state := s1.fsm.State()
   351  	va1 := mock.VaultAccessor()
   352  	va1.NodeID = node.ID
   353  	va2 := mock.VaultAccessor()
   354  	va2.NodeID = node.ID
   355  	state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
   356  
   357  	// Deregister
   358  	dereg := &structs.NodeBatchDeregisterRequest{
   359  		NodeIDs:      []string{node.ID},
   360  		WriteRequest: structs.WriteRequest{Region: "global"},
   361  	}
   362  	var resp2 structs.GenericResponse
   363  	if err := msgpackrpc.CallWithCodec(codec, "Node.BatchDeregister", dereg, &resp2); err != nil {
   364  		t.Fatalf("err: %v", err)
   365  	}
   366  	if resp2.Index == 0 {
   367  		t.Fatalf("bad index: %d", resp2.Index)
   368  	}
   369  
   370  	// Check for the node in the FSM
   371  	ws := memdb.NewWatchSet()
   372  	out, err := state.NodeByID(ws, node.ID)
   373  	if err != nil {
   374  		t.Fatalf("err: %v", err)
   375  	}
   376  	if out != nil {
   377  		t.Fatalf("unexpected node")
   378  	}
   379  
   380  	// Check that the endpoint revoked the tokens
   381  	if l := len(tvc.RevokedTokens); l != 2 {
   382  		t.Fatalf("Deregister revoked %d tokens; want 2", l)
   383  	}
   384  }
   385  
   386  func TestClientEndpoint_UpdateStatus(t *testing.T) {
   387  	t.Parallel()
   388  	require := require.New(t)
   389  
   390  	s1, cleanupS1 := TestServer(t, nil)
   391  	defer cleanupS1()
   392  	codec := rpcClient(t, s1)
   393  	testutil.WaitForLeader(t, s1.RPC)
   394  
   395  	// Check that we have no client connections
   396  	require.Empty(s1.connectedNodes())
   397  
   398  	// Create the register request
   399  	node := mock.Node()
   400  	reg := &structs.NodeRegisterRequest{
   401  		Node:         node,
   402  		WriteRequest: structs.WriteRequest{Region: "global"},
   403  	}
   404  
   405  	// Fetch the response
   406  	var resp structs.NodeUpdateResponse
   407  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   408  		t.Fatalf("err: %v", err)
   409  	}
   410  
   411  	// Check for heartbeat interval
   412  	ttl := resp.HeartbeatTTL
   413  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   414  		t.Fatalf("bad: %#v", ttl)
   415  	}
   416  
   417  	// Update the status
   418  	dereg := &structs.NodeUpdateStatusRequest{
   419  		NodeID:       node.ID,
   420  		Status:       structs.NodeStatusInit,
   421  		WriteRequest: structs.WriteRequest{Region: "global"},
   422  	}
   423  	var resp2 structs.NodeUpdateResponse
   424  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   425  		t.Fatalf("err: %v", err)
   426  	}
   427  	if resp2.Index == 0 {
   428  		t.Fatalf("bad index: %d", resp2.Index)
   429  	}
   430  
   431  	// Check for heartbeat interval
   432  	ttl = resp2.HeartbeatTTL
   433  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   434  		t.Fatalf("bad: %#v", ttl)
   435  	}
   436  
   437  	// Check that we have the client connections
   438  	nodes := s1.connectedNodes()
   439  	require.Len(nodes, 1)
   440  	require.Contains(nodes, node.ID)
   441  
   442  	// Check for the node in the FSM
   443  	state := s1.fsm.State()
   444  	ws := memdb.NewWatchSet()
   445  	out, err := state.NodeByID(ws, node.ID)
   446  	if err != nil {
   447  		t.Fatalf("err: %v", err)
   448  	}
   449  	if out == nil {
   450  		t.Fatalf("expected node")
   451  	}
   452  	if out.ModifyIndex != resp2.Index {
   453  		t.Fatalf("index mis-match")
   454  	}
   455  
   456  	// Close the connection and check that we remove the client connections
   457  	require.Nil(codec.Close())
   458  	testutil.WaitForResult(func() (bool, error) {
   459  		nodes := s1.connectedNodes()
   460  		return len(nodes) == 0, nil
   461  	}, func(err error) {
   462  		t.Fatalf("should have no clients")
   463  	})
   464  }
   465  
   466  func TestClientEndpoint_UpdateStatus_Vault(t *testing.T) {
   467  	t.Parallel()
   468  
   469  	s1, cleanupS1 := TestServer(t, nil)
   470  	defer cleanupS1()
   471  	codec := rpcClient(t, s1)
   472  	testutil.WaitForLeader(t, s1.RPC)
   473  
   474  	// Create the register request
   475  	node := mock.Node()
   476  	reg := &structs.NodeRegisterRequest{
   477  		Node:         node,
   478  		WriteRequest: structs.WriteRequest{Region: "global"},
   479  	}
   480  
   481  	// Fetch the response
   482  	var resp structs.NodeUpdateResponse
   483  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   484  		t.Fatalf("err: %v", err)
   485  	}
   486  
   487  	// Check for heartbeat interval
   488  	ttl := resp.HeartbeatTTL
   489  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   490  		t.Fatalf("bad: %#v", ttl)
   491  	}
   492  
   493  	// Swap the servers Vault Client
   494  	tvc := &TestVaultClient{}
   495  	s1.vault = tvc
   496  
   497  	// Put some Vault accessors in the state store for that node
   498  	state := s1.fsm.State()
   499  	va1 := mock.VaultAccessor()
   500  	va1.NodeID = node.ID
   501  	va2 := mock.VaultAccessor()
   502  	va2.NodeID = node.ID
   503  	state.UpsertVaultAccessor(100, []*structs.VaultAccessor{va1, va2})
   504  
   505  	// Update the status to be down
   506  	dereg := &structs.NodeUpdateStatusRequest{
   507  		NodeID:       node.ID,
   508  		Status:       structs.NodeStatusDown,
   509  		WriteRequest: structs.WriteRequest{Region: "global"},
   510  	}
   511  	var resp2 structs.NodeUpdateResponse
   512  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   513  		t.Fatalf("err: %v", err)
   514  	}
   515  	if resp2.Index == 0 {
   516  		t.Fatalf("bad index: %d", resp2.Index)
   517  	}
   518  
   519  	// Check that the endpoint revoked the tokens
   520  	if l := len(tvc.RevokedTokens); l != 2 {
   521  		t.Fatalf("Deregister revoked %d tokens; want 2", l)
   522  	}
   523  }
   524  
   525  func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) {
   526  	t.Parallel()
   527  	require := require.New(t)
   528  
   529  	s1, cleanupS1 := TestServer(t, nil)
   530  	defer cleanupS1()
   531  	codec := rpcClient(t, s1)
   532  	testutil.WaitForLeader(t, s1.RPC)
   533  
   534  	// Check that we have no client connections
   535  	require.Empty(s1.connectedNodes())
   536  
   537  	// Create the register request but make the node down
   538  	node := mock.Node()
   539  	node.Status = structs.NodeStatusDown
   540  	reg := &structs.NodeRegisterRequest{
   541  		Node:         node,
   542  		WriteRequest: structs.WriteRequest{Region: "global"},
   543  	}
   544  
   545  	// Fetch the response
   546  	var resp structs.NodeUpdateResponse
   547  	require.NoError(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
   548  
   549  	// Update the status
   550  	dereg := &structs.NodeUpdateStatusRequest{
   551  		NodeID:       node.ID,
   552  		Status:       structs.NodeStatusInit,
   553  		WriteRequest: structs.WriteRequest{Region: "global"},
   554  	}
   555  	var resp2 structs.NodeUpdateResponse
   556  	require.NoError(msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2))
   557  	require.NotZero(resp2.Index)
   558  
   559  	// Check for heartbeat interval
   560  	ttl := resp2.HeartbeatTTL
   561  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   562  		t.Fatalf("bad: %#v", ttl)
   563  	}
   564  
   565  	// Check for the node in the FSM
   566  	state := s1.fsm.State()
   567  	ws := memdb.NewWatchSet()
   568  	out, err := state.NodeByID(ws, node.ID)
   569  	require.NoError(err)
   570  	require.NotNil(out)
   571  	require.EqualValues(resp2.Index, out.ModifyIndex)
   572  	require.Len(out.Events, 2)
   573  	require.Equal(NodeHeartbeatEventReregistered, out.Events[1].Message)
   574  }
   575  
   576  func TestClientEndpoint_Register_GetEvals(t *testing.T) {
   577  	t.Parallel()
   578  
   579  	s1, cleanupS1 := TestServer(t, nil)
   580  	defer cleanupS1()
   581  	codec := rpcClient(t, s1)
   582  	testutil.WaitForLeader(t, s1.RPC)
   583  
   584  	// Register a system job.
   585  	job := mock.SystemJob()
   586  	state := s1.fsm.State()
   587  	if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil {
   588  		t.Fatalf("err: %v", err)
   589  	}
   590  
   591  	// Create the register request going directly to ready
   592  	node := mock.Node()
   593  	node.Status = structs.NodeStatusReady
   594  	reg := &structs.NodeRegisterRequest{
   595  		Node:         node,
   596  		WriteRequest: structs.WriteRequest{Region: "global"},
   597  	}
   598  
   599  	// Fetch the response
   600  	var resp structs.NodeUpdateResponse
   601  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   602  		t.Fatalf("err: %v", err)
   603  	}
   604  
   605  	// Check for heartbeat interval
   606  	ttl := resp.HeartbeatTTL
   607  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   608  		t.Fatalf("bad: %#v", ttl)
   609  	}
   610  
   611  	// Check for an eval caused by the system job.
   612  	if len(resp.EvalIDs) != 1 {
   613  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   614  	}
   615  
   616  	evalID := resp.EvalIDs[0]
   617  	ws := memdb.NewWatchSet()
   618  	eval, err := state.EvalByID(ws, evalID)
   619  	if err != nil {
   620  		t.Fatalf("could not get eval %v", evalID)
   621  	}
   622  
   623  	if eval.Type != "system" {
   624  		t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
   625  	}
   626  
   627  	// Check for the node in the FSM
   628  	out, err := state.NodeByID(ws, node.ID)
   629  	if err != nil {
   630  		t.Fatalf("err: %v", err)
   631  	}
   632  	if out == nil {
   633  		t.Fatalf("expected node")
   634  	}
   635  	if out.ModifyIndex != resp.Index {
   636  		t.Fatalf("index mis-match")
   637  	}
   638  
   639  	// Transition it to down and then ready
   640  	node.Status = structs.NodeStatusDown
   641  	reg = &structs.NodeRegisterRequest{
   642  		Node:         node,
   643  		WriteRequest: structs.WriteRequest{Region: "global"},
   644  	}
   645  
   646  	// Fetch the response
   647  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   648  		t.Fatalf("err: %v", err)
   649  	}
   650  
   651  	if len(resp.EvalIDs) != 1 {
   652  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   653  	}
   654  
   655  	node.Status = structs.NodeStatusReady
   656  	reg = &structs.NodeRegisterRequest{
   657  		Node:         node,
   658  		WriteRequest: structs.WriteRequest{Region: "global"},
   659  	}
   660  
   661  	// Fetch the response
   662  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   663  		t.Fatalf("err: %v", err)
   664  	}
   665  
   666  	if len(resp.EvalIDs) != 1 {
   667  		t.Fatalf("expected one eval; got %#v", resp.EvalIDs)
   668  	}
   669  }
   670  
   671  func TestClientEndpoint_UpdateStatus_GetEvals(t *testing.T) {
   672  	t.Parallel()
   673  
   674  	s1, cleanupS1 := TestServer(t, nil)
   675  	defer cleanupS1()
   676  	codec := rpcClient(t, s1)
   677  	testutil.WaitForLeader(t, s1.RPC)
   678  
   679  	// Register a system job.
   680  	job := mock.SystemJob()
   681  	state := s1.fsm.State()
   682  	if err := state.UpsertJob(structs.MsgTypeTestSetup, 1, job); err != nil {
   683  		t.Fatalf("err: %v", err)
   684  	}
   685  
   686  	// Create the register request
   687  	node := mock.Node()
   688  	node.Status = structs.NodeStatusInit
   689  	reg := &structs.NodeRegisterRequest{
   690  		Node:         node,
   691  		WriteRequest: structs.WriteRequest{Region: "global"},
   692  	}
   693  
   694  	// Fetch the response
   695  	var resp structs.NodeUpdateResponse
   696  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   697  		t.Fatalf("err: %v", err)
   698  	}
   699  
   700  	// Check for heartbeat interval
   701  	ttl := resp.HeartbeatTTL
   702  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   703  		t.Fatalf("bad: %#v", ttl)
   704  	}
   705  
   706  	// Update the status
   707  	update := &structs.NodeUpdateStatusRequest{
   708  		NodeID:       node.ID,
   709  		Status:       structs.NodeStatusReady,
   710  		WriteRequest: structs.WriteRequest{Region: "global"},
   711  	}
   712  	var resp2 structs.NodeUpdateResponse
   713  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", update, &resp2); err != nil {
   714  		t.Fatalf("err: %v", err)
   715  	}
   716  	if resp2.Index == 0 {
   717  		t.Fatalf("bad index: %d", resp2.Index)
   718  	}
   719  
   720  	// Check for an eval caused by the system job.
   721  	if len(resp2.EvalIDs) != 1 {
   722  		t.Fatalf("expected one eval; got %#v", resp2.EvalIDs)
   723  	}
   724  
   725  	evalID := resp2.EvalIDs[0]
   726  	ws := memdb.NewWatchSet()
   727  	eval, err := state.EvalByID(ws, evalID)
   728  	if err != nil {
   729  		t.Fatalf("could not get eval %v", evalID)
   730  	}
   731  
   732  	if eval.Type != "system" {
   733  		t.Fatalf("unexpected eval type; got %v; want %q", eval.Type, "system")
   734  	}
   735  
   736  	// Check for heartbeat interval
   737  	ttl = resp2.HeartbeatTTL
   738  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   739  		t.Fatalf("bad: %#v", ttl)
   740  	}
   741  
   742  	// Check for the node in the FSM
   743  	out, err := state.NodeByID(ws, node.ID)
   744  	if err != nil {
   745  		t.Fatalf("err: %v", err)
   746  	}
   747  	if out == nil {
   748  		t.Fatalf("expected node")
   749  	}
   750  	if out.ModifyIndex != resp2.Index {
   751  		t.Fatalf("index mis-match")
   752  	}
   753  }
   754  
   755  func TestClientEndpoint_UpdateStatus_HeartbeatOnly(t *testing.T) {
   756  	t.Parallel()
   757  
   758  	s1, cleanupS1 := TestServer(t, func(c *Config) {
   759  		c.BootstrapExpect = 3
   760  	})
   761  	defer cleanupS1()
   762  
   763  	s2, cleanupS2 := TestServer(t, func(c *Config) {
   764  		c.BootstrapExpect = 3
   765  	})
   766  	defer cleanupS2()
   767  
   768  	s3, cleanupS3 := TestServer(t, func(c *Config) {
   769  		c.BootstrapExpect = 3
   770  	})
   771  	defer cleanupS3()
   772  	servers := []*Server{s1, s2, s3}
   773  	TestJoin(t, s1, s2, s3)
   774  
   775  	for _, s := range servers {
   776  		testutil.WaitForResult(func() (bool, error) {
   777  			peers, _ := s.numPeers()
   778  			return peers == 3, nil
   779  		}, func(err error) {
   780  			t.Fatalf("should have 3 peers")
   781  		})
   782  	}
   783  
   784  	codec := rpcClient(t, s1)
   785  	testutil.WaitForLeader(t, s1.RPC)
   786  
   787  	// Create the register request
   788  	node := mock.Node()
   789  	reg := &structs.NodeRegisterRequest{
   790  		Node:         node,
   791  		WriteRequest: structs.WriteRequest{Region: "global"},
   792  	}
   793  
   794  	// Fetch the response
   795  	var resp structs.NodeUpdateResponse
   796  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   797  		t.Fatalf("err: %v", err)
   798  	}
   799  
   800  	// Check for heartbeat interval
   801  	ttl := resp.HeartbeatTTL
   802  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   803  		t.Fatalf("bad: %#v", ttl)
   804  	}
   805  
   806  	// Check for heartbeat servers
   807  	serverAddrs := resp.Servers
   808  	if len(serverAddrs) == 0 {
   809  		t.Fatalf("bad: %#v", serverAddrs)
   810  	}
   811  
   812  	// Update the status, static state
   813  	dereg := &structs.NodeUpdateStatusRequest{
   814  		NodeID:       node.ID,
   815  		Status:       node.Status,
   816  		WriteRequest: structs.WriteRequest{Region: "global"},
   817  	}
   818  	var resp2 structs.NodeUpdateResponse
   819  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", dereg, &resp2); err != nil {
   820  		t.Fatalf("err: %v", err)
   821  	}
   822  	if resp2.Index != 0 {
   823  		t.Fatalf("bad index: %d", resp2.Index)
   824  	}
   825  
   826  	// Check for heartbeat interval
   827  	ttl = resp2.HeartbeatTTL
   828  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   829  		t.Fatalf("bad: %#v", ttl)
   830  	}
   831  }
   832  
   833  func TestClientEndpoint_UpdateStatus_HeartbeatOnly_Advertise(t *testing.T) {
   834  	t.Parallel()
   835  	require := require.New(t)
   836  
   837  	advAddr := "127.0.1.1:1234"
   838  	adv, err := net.ResolveTCPAddr("tcp", advAddr)
   839  	require.Nil(err)
   840  
   841  	s1, cleanupS1 := TestServer(t, func(c *Config) {
   842  		c.ClientRPCAdvertise = adv
   843  	})
   844  	defer cleanupS1()
   845  	codec := rpcClient(t, s1)
   846  	testutil.WaitForLeader(t, s1.RPC)
   847  
   848  	// Create the register request
   849  	node := mock.Node()
   850  	reg := &structs.NodeRegisterRequest{
   851  		Node:         node,
   852  		WriteRequest: structs.WriteRequest{Region: "global"},
   853  	}
   854  
   855  	// Fetch the response
   856  	var resp structs.NodeUpdateResponse
   857  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
   858  		t.Fatalf("err: %v", err)
   859  	}
   860  
   861  	// Check for heartbeat interval
   862  	ttl := resp.HeartbeatTTL
   863  	if ttl < s1.config.MinHeartbeatTTL || ttl > 2*s1.config.MinHeartbeatTTL {
   864  		t.Fatalf("bad: %#v", ttl)
   865  	}
   866  
   867  	// Check for heartbeat servers
   868  	require.Len(resp.Servers, 1)
   869  	require.Equal(resp.Servers[0].RPCAdvertiseAddr, advAddr)
   870  }
   871  
   872  func TestClientEndpoint_UpdateDrain(t *testing.T) {
   873  	t.Parallel()
   874  	require := require.New(t)
   875  
   876  	s1, cleanupS1 := TestServer(t, nil)
   877  	defer cleanupS1()
   878  	codec := rpcClient(t, s1)
   879  	testutil.WaitForLeader(t, s1.RPC)
   880  
   881  	// Disable drainer to prevent drain from completing during test
   882  	s1.nodeDrainer.SetEnabled(false, nil)
   883  
   884  	// Create the register request
   885  	node := mock.Node()
   886  	reg := &structs.NodeRegisterRequest{
   887  		Node:         node,
   888  		WriteRequest: structs.WriteRequest{Region: "global"},
   889  	}
   890  
   891  	// Fetch the response
   892  	var resp structs.NodeUpdateResponse
   893  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
   894  
   895  	beforeUpdate := time.Now()
   896  	strategy := &structs.DrainStrategy{
   897  		DrainSpec: structs.DrainSpec{
   898  			Deadline: 10 * time.Second,
   899  		},
   900  	}
   901  
   902  	// Update the status
   903  	dereg := &structs.NodeUpdateDrainRequest{
   904  		NodeID:        node.ID,
   905  		DrainStrategy: strategy,
   906  		WriteRequest:  structs.WriteRequest{Region: "global"},
   907  	}
   908  	var resp2 structs.NodeDrainUpdateResponse
   909  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
   910  	require.NotZero(resp2.Index)
   911  
   912  	// Check for the node in the FSM
   913  	state := s1.fsm.State()
   914  	ws := memdb.NewWatchSet()
   915  	out, err := state.NodeByID(ws, node.ID)
   916  	require.Nil(err)
   917  	require.True(out.Drain)
   918  	require.Equal(strategy.Deadline, out.DrainStrategy.Deadline)
   919  	require.Len(out.Events, 2)
   920  	require.Equal(NodeDrainEventDrainSet, out.Events[1].Message)
   921  
   922  	// before+deadline should be before the forced deadline
   923  	require.True(beforeUpdate.Add(strategy.Deadline).Before(out.DrainStrategy.ForceDeadline))
   924  
   925  	// now+deadline should be after the forced deadline
   926  	require.True(time.Now().Add(strategy.Deadline).After(out.DrainStrategy.ForceDeadline))
   927  
   928  	drainStartedAt := out.DrainStrategy.StartedAt
   929  	// StartedAt should be close to the time the drain started
   930  	require.WithinDuration(beforeUpdate, drainStartedAt, 1*time.Second)
   931  
   932  	// StartedAt shouldn't change if a new request comes while still draining
   933  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
   934  	ws = memdb.NewWatchSet()
   935  	out, err = state.NodeByID(ws, node.ID)
   936  	require.NoError(err)
   937  	require.True(out.DrainStrategy.StartedAt.Equal(drainStartedAt))
   938  
   939  	// Register a system job
   940  	job := mock.SystemJob()
   941  	require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, job))
   942  
   943  	// Update the eligibility and expect evals
   944  	dereg.DrainStrategy = nil
   945  	dereg.MarkEligible = true
   946  	var resp3 structs.NodeDrainUpdateResponse
   947  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
   948  	require.NotZero(resp3.Index)
   949  	require.NotZero(resp3.EvalCreateIndex)
   950  	require.Len(resp3.EvalIDs, 1)
   951  
   952  	// Check for updated node in the FSM
   953  	ws = memdb.NewWatchSet()
   954  	out, err = state.NodeByID(ws, node.ID)
   955  	require.NoError(err)
   956  	require.Len(out.Events, 4)
   957  	require.Equal(NodeDrainEventDrainDisabled, out.Events[3].Message)
   958  
   959  	// Check that calling UpdateDrain with the same DrainStrategy does not emit
   960  	// a node event.
   961  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp3))
   962  	ws = memdb.NewWatchSet()
   963  	out, err = state.NodeByID(ws, node.ID)
   964  	require.NoError(err)
   965  	require.Len(out.Events, 4)
   966  }
   967  
   968  func TestClientEndpoint_UpdateDrain_ACL(t *testing.T) {
   969  	t.Parallel()
   970  
   971  	s1, root, cleanupS1 := TestACLServer(t, nil)
   972  	defer cleanupS1()
   973  	codec := rpcClient(t, s1)
   974  	testutil.WaitForLeader(t, s1.RPC)
   975  	require := require.New(t)
   976  
   977  	// Create the node
   978  	node := mock.Node()
   979  	state := s1.fsm.State()
   980  
   981  	require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
   982  
   983  	// Create the policy and tokens
   984  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
   985  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
   986  
   987  	// Update the status without a token and expect failure
   988  	dereg := &structs.NodeUpdateDrainRequest{
   989  		NodeID: node.ID,
   990  		DrainStrategy: &structs.DrainStrategy{
   991  			DrainSpec: structs.DrainSpec{
   992  				Deadline: 10 * time.Second,
   993  			},
   994  		},
   995  		WriteRequest: structs.WriteRequest{Region: "global"},
   996  	}
   997  	{
   998  		var resp structs.NodeDrainUpdateResponse
   999  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
  1000  		require.NotNil(err, "RPC")
  1001  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1002  	}
  1003  
  1004  	// Try with a valid token
  1005  	dereg.AuthToken = validToken.SecretID
  1006  	{
  1007  		var resp structs.NodeDrainUpdateResponse
  1008  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
  1009  	}
  1010  
  1011  	// Try with a invalid token
  1012  	dereg.AuthToken = invalidToken.SecretID
  1013  	{
  1014  		var resp structs.NodeDrainUpdateResponse
  1015  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp)
  1016  		require.NotNil(err, "RPC")
  1017  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1018  	}
  1019  
  1020  	// Try with a root token
  1021  	dereg.AuthToken = root.SecretID
  1022  	{
  1023  		var resp structs.NodeDrainUpdateResponse
  1024  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp), "RPC")
  1025  	}
  1026  }
  1027  
  1028  // This test ensures that Nomad marks client state of allocations which are in
  1029  // pending/running state to lost when a node is marked as down.
  1030  func TestClientEndpoint_Drain_Down(t *testing.T) {
  1031  	t.Parallel()
  1032  
  1033  	s1, cleanupS1 := TestServer(t, nil)
  1034  	defer cleanupS1()
  1035  	codec := rpcClient(t, s1)
  1036  	testutil.WaitForLeader(t, s1.RPC)
  1037  	require := require.New(t)
  1038  
  1039  	// Register a node
  1040  	node := mock.Node()
  1041  	reg := &structs.NodeRegisterRequest{
  1042  		Node:         node,
  1043  		WriteRequest: structs.WriteRequest{Region: "global"},
  1044  	}
  1045  	// Fetch the response
  1046  	var resp structs.NodeUpdateResponse
  1047  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1048  
  1049  	// Register a service job
  1050  	var jobResp structs.JobRegisterResponse
  1051  	job := mock.Job()
  1052  	job.TaskGroups[0].Count = 1
  1053  	jobReq := &structs.JobRegisterRequest{
  1054  		Job: job,
  1055  		WriteRequest: structs.WriteRequest{
  1056  			Region:    "global",
  1057  			Namespace: job.Namespace,
  1058  		},
  1059  	}
  1060  	require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp))
  1061  
  1062  	// Register a system job
  1063  	var jobResp1 structs.JobRegisterResponse
  1064  	job1 := mock.SystemJob()
  1065  	job1.TaskGroups[0].Count = 1
  1066  	jobReq1 := &structs.JobRegisterRequest{
  1067  		Job: job1,
  1068  		WriteRequest: structs.WriteRequest{
  1069  			Region:    "global",
  1070  			Namespace: job1.Namespace,
  1071  		},
  1072  	}
  1073  	require.Nil(msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq1, &jobResp1))
  1074  
  1075  	// Wait for the scheduler to create an allocation
  1076  	testutil.WaitForResult(func() (bool, error) {
  1077  		ws := memdb.NewWatchSet()
  1078  		allocs, err := s1.fsm.state.AllocsByJob(ws, job.Namespace, job.ID, true)
  1079  		if err != nil {
  1080  			return false, err
  1081  		}
  1082  		allocs1, err := s1.fsm.state.AllocsByJob(ws, job1.Namespace, job1.ID, true)
  1083  		if err != nil {
  1084  			return false, err
  1085  		}
  1086  		return len(allocs) > 0 && len(allocs1) > 0, nil
  1087  	}, func(err error) {
  1088  		t.Fatalf("err: %v", err)
  1089  	})
  1090  
  1091  	// Drain the node
  1092  	dereg := &structs.NodeUpdateDrainRequest{
  1093  		NodeID: node.ID,
  1094  		DrainStrategy: &structs.DrainStrategy{
  1095  			DrainSpec: structs.DrainSpec{
  1096  				Deadline: -1 * time.Second,
  1097  			},
  1098  		},
  1099  		WriteRequest: structs.WriteRequest{Region: "global"},
  1100  	}
  1101  	var resp2 structs.NodeDrainUpdateResponse
  1102  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateDrain", dereg, &resp2))
  1103  
  1104  	// Mark the node as down
  1105  	node.Status = structs.NodeStatusDown
  1106  	reg = &structs.NodeRegisterRequest{
  1107  		Node:         node,
  1108  		WriteRequest: structs.WriteRequest{Region: "global"},
  1109  	}
  1110  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1111  
  1112  	// Ensure that the allocation has transitioned to lost
  1113  	testutil.WaitForResult(func() (bool, error) {
  1114  		ws := memdb.NewWatchSet()
  1115  		summary, err := s1.fsm.state.JobSummaryByID(ws, job.Namespace, job.ID)
  1116  		if err != nil {
  1117  			return false, err
  1118  		}
  1119  		expectedSummary := &structs.JobSummary{
  1120  			JobID:     job.ID,
  1121  			Namespace: job.Namespace,
  1122  			Summary: map[string]structs.TaskGroupSummary{
  1123  				"web": {
  1124  					Queued: 1,
  1125  					Lost:   1,
  1126  				},
  1127  			},
  1128  			Children:    new(structs.JobChildrenSummary),
  1129  			CreateIndex: jobResp.JobModifyIndex,
  1130  			ModifyIndex: summary.ModifyIndex,
  1131  		}
  1132  		if !reflect.DeepEqual(summary, expectedSummary) {
  1133  			return false, fmt.Errorf("Service: expected: %#v, actual: %#v", expectedSummary, summary)
  1134  		}
  1135  
  1136  		summary1, err := s1.fsm.state.JobSummaryByID(ws, job1.Namespace, job1.ID)
  1137  		if err != nil {
  1138  			return false, err
  1139  		}
  1140  		expectedSummary1 := &structs.JobSummary{
  1141  			JobID:     job1.ID,
  1142  			Namespace: job1.Namespace,
  1143  			Summary: map[string]structs.TaskGroupSummary{
  1144  				"web": {
  1145  					Lost: 1,
  1146  				},
  1147  			},
  1148  			Children:    new(structs.JobChildrenSummary),
  1149  			CreateIndex: jobResp1.JobModifyIndex,
  1150  			ModifyIndex: summary1.ModifyIndex,
  1151  		}
  1152  		if !reflect.DeepEqual(summary1, expectedSummary1) {
  1153  			return false, fmt.Errorf("System: expected: %#v, actual: %#v", expectedSummary1, summary1)
  1154  		}
  1155  		return true, nil
  1156  	}, func(err error) {
  1157  		t.Fatalf("err: %v", err)
  1158  	})
  1159  }
  1160  
  1161  func TestClientEndpoint_UpdateEligibility(t *testing.T) {
  1162  	t.Parallel()
  1163  	require := require.New(t)
  1164  
  1165  	s1, cleanupS1 := TestServer(t, nil)
  1166  	defer cleanupS1()
  1167  	codec := rpcClient(t, s1)
  1168  	testutil.WaitForLeader(t, s1.RPC)
  1169  
  1170  	// Create the register request
  1171  	node := mock.Node()
  1172  	reg := &structs.NodeRegisterRequest{
  1173  		Node:         node,
  1174  		WriteRequest: structs.WriteRequest{Region: "global"},
  1175  	}
  1176  
  1177  	// Fetch the response
  1178  	var resp structs.NodeUpdateResponse
  1179  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1180  
  1181  	// Update the eligibility
  1182  	elig := &structs.NodeUpdateEligibilityRequest{
  1183  		NodeID:       node.ID,
  1184  		Eligibility:  structs.NodeSchedulingIneligible,
  1185  		WriteRequest: structs.WriteRequest{Region: "global"},
  1186  	}
  1187  	var resp2 structs.NodeEligibilityUpdateResponse
  1188  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp2))
  1189  	require.NotZero(resp2.Index)
  1190  	require.Zero(resp2.EvalCreateIndex)
  1191  	require.Empty(resp2.EvalIDs)
  1192  
  1193  	// Check for the node in the FSM
  1194  	state := s1.fsm.State()
  1195  	out, err := state.NodeByID(nil, node.ID)
  1196  	require.Nil(err)
  1197  	require.Equal(out.SchedulingEligibility, structs.NodeSchedulingIneligible)
  1198  	require.Len(out.Events, 2)
  1199  	require.Equal(NodeEligibilityEventIneligible, out.Events[1].Message)
  1200  
  1201  	// Register a system job
  1202  	job := mock.SystemJob()
  1203  	require.Nil(s1.State().UpsertJob(structs.MsgTypeTestSetup, 10, job))
  1204  
  1205  	// Update the eligibility and expect evals
  1206  	elig.Eligibility = structs.NodeSchedulingEligible
  1207  	var resp3 structs.NodeEligibilityUpdateResponse
  1208  	require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", elig, &resp3))
  1209  	require.NotZero(resp3.Index)
  1210  	require.NotZero(resp3.EvalCreateIndex)
  1211  	require.Len(resp3.EvalIDs, 1)
  1212  
  1213  	out, err = state.NodeByID(nil, node.ID)
  1214  	require.Nil(err)
  1215  	require.Len(out.Events, 3)
  1216  	require.Equal(NodeEligibilityEventEligible, out.Events[2].Message)
  1217  }
  1218  
  1219  func TestClientEndpoint_UpdateEligibility_ACL(t *testing.T) {
  1220  	t.Parallel()
  1221  
  1222  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1223  	defer cleanupS1()
  1224  	codec := rpcClient(t, s1)
  1225  	testutil.WaitForLeader(t, s1.RPC)
  1226  	require := require.New(t)
  1227  
  1228  	// Create the node
  1229  	node := mock.Node()
  1230  	state := s1.fsm.State()
  1231  
  1232  	require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
  1233  
  1234  	// Create the policy and tokens
  1235  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
  1236  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
  1237  
  1238  	// Update the status without a token and expect failure
  1239  	dereg := &structs.NodeUpdateEligibilityRequest{
  1240  		NodeID:       node.ID,
  1241  		Eligibility:  structs.NodeSchedulingIneligible,
  1242  		WriteRequest: structs.WriteRequest{Region: "global"},
  1243  	}
  1244  	{
  1245  		var resp structs.NodeEligibilityUpdateResponse
  1246  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
  1247  		require.NotNil(err, "RPC")
  1248  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1249  	}
  1250  
  1251  	// Try with a valid token
  1252  	dereg.AuthToken = validToken.SecretID
  1253  	{
  1254  		var resp structs.NodeEligibilityUpdateResponse
  1255  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
  1256  	}
  1257  
  1258  	// Try with a invalid token
  1259  	dereg.AuthToken = invalidToken.SecretID
  1260  	{
  1261  		var resp structs.NodeEligibilityUpdateResponse
  1262  		err := msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp)
  1263  		require.NotNil(err, "RPC")
  1264  		require.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1265  	}
  1266  
  1267  	// Try with a root token
  1268  	dereg.AuthToken = root.SecretID
  1269  	{
  1270  		var resp structs.NodeEligibilityUpdateResponse
  1271  		require.Nil(msgpackrpc.CallWithCodec(codec, "Node.UpdateEligibility", dereg, &resp), "RPC")
  1272  	}
  1273  }
  1274  
  1275  func TestClientEndpoint_GetNode(t *testing.T) {
  1276  	t.Parallel()
  1277  
  1278  	s1, cleanupS1 := TestServer(t, nil)
  1279  	defer cleanupS1()
  1280  	codec := rpcClient(t, s1)
  1281  	testutil.WaitForLeader(t, s1.RPC)
  1282  
  1283  	// Create the register request
  1284  	node := mock.Node()
  1285  	reg := &structs.NodeRegisterRequest{
  1286  		Node:         node,
  1287  		WriteRequest: structs.WriteRequest{Region: "global"},
  1288  	}
  1289  
  1290  	// Fetch the response
  1291  	var resp structs.GenericResponse
  1292  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1293  		t.Fatalf("err: %v", err)
  1294  	}
  1295  	node.CreateIndex = resp.Index
  1296  	node.ModifyIndex = resp.Index
  1297  
  1298  	// Lookup the node
  1299  	get := &structs.NodeSpecificRequest{
  1300  		NodeID:       node.ID,
  1301  		QueryOptions: structs.QueryOptions{Region: "global"},
  1302  	}
  1303  	var resp2 structs.SingleNodeResponse
  1304  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
  1305  		t.Fatalf("err: %v", err)
  1306  	}
  1307  	if resp2.Index != resp.Index {
  1308  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  1309  	}
  1310  
  1311  	if resp2.Node.ComputedClass == "" {
  1312  		t.Fatalf("bad ComputedClass: %#v", resp2.Node)
  1313  	}
  1314  
  1315  	// Update the status updated at value
  1316  	node.StatusUpdatedAt = resp2.Node.StatusUpdatedAt
  1317  	node.SecretID = ""
  1318  	node.Events = resp2.Node.Events
  1319  	if !reflect.DeepEqual(node, resp2.Node) {
  1320  		t.Fatalf("bad: %#v \n %#v", node, resp2.Node)
  1321  	}
  1322  
  1323  	// assert that the node register event was set correctly
  1324  	if len(resp2.Node.Events) != 1 {
  1325  		t.Fatalf("Did not set node events: %#v", resp2.Node)
  1326  	}
  1327  	if resp2.Node.Events[0].Message != state.NodeRegisterEventRegistered {
  1328  		t.Fatalf("Did not set node register event correctly: %#v", resp2.Node)
  1329  	}
  1330  
  1331  	// Lookup non-existing node
  1332  	get.NodeID = "12345678-abcd-efab-cdef-123456789abc"
  1333  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", get, &resp2); err != nil {
  1334  		t.Fatalf("err: %v", err)
  1335  	}
  1336  	if resp2.Index != resp.Index {
  1337  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  1338  	}
  1339  	if resp2.Node != nil {
  1340  		t.Fatalf("unexpected node")
  1341  	}
  1342  }
  1343  
  1344  func TestClientEndpoint_GetNode_ACL(t *testing.T) {
  1345  	t.Parallel()
  1346  
  1347  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1348  	defer cleanupS1()
  1349  	codec := rpcClient(t, s1)
  1350  	testutil.WaitForLeader(t, s1.RPC)
  1351  	assert := assert.New(t)
  1352  
  1353  	// Create the node
  1354  	node := mock.Node()
  1355  	state := s1.fsm.State()
  1356  	assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
  1357  
  1358  	// Create the policy and tokens
  1359  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
  1360  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
  1361  
  1362  	// Lookup the node without a token and expect failure
  1363  	req := &structs.NodeSpecificRequest{
  1364  		NodeID:       node.ID,
  1365  		QueryOptions: structs.QueryOptions{Region: "global"},
  1366  	}
  1367  	{
  1368  		var resp structs.SingleNodeResponse
  1369  		err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
  1370  		assert.NotNil(err, "RPC")
  1371  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1372  	}
  1373  
  1374  	// Try with a valid token
  1375  	req.AuthToken = validToken.SecretID
  1376  	{
  1377  		var resp structs.SingleNodeResponse
  1378  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1379  		assert.Equal(node.ID, resp.Node.ID)
  1380  	}
  1381  
  1382  	// Try with a Node.SecretID
  1383  	req.AuthToken = node.SecretID
  1384  	{
  1385  		var resp structs.SingleNodeResponse
  1386  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1387  		assert.Equal(node.ID, resp.Node.ID)
  1388  	}
  1389  
  1390  	// Try with a invalid token
  1391  	req.AuthToken = invalidToken.SecretID
  1392  	{
  1393  		var resp structs.SingleNodeResponse
  1394  		err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp)
  1395  		assert.NotNil(err, "RPC")
  1396  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1397  	}
  1398  
  1399  	// Try with a root token
  1400  	req.AuthToken = root.SecretID
  1401  	{
  1402  		var resp structs.SingleNodeResponse
  1403  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp), "RPC")
  1404  		assert.Equal(node.ID, resp.Node.ID)
  1405  	}
  1406  }
  1407  
  1408  func TestClientEndpoint_GetNode_Blocking(t *testing.T) {
  1409  	t.Parallel()
  1410  
  1411  	s1, cleanupS1 := TestServer(t, nil)
  1412  	defer cleanupS1()
  1413  	state := s1.fsm.State()
  1414  	codec := rpcClient(t, s1)
  1415  	testutil.WaitForLeader(t, s1.RPC)
  1416  
  1417  	// Create the node
  1418  	node1 := mock.Node()
  1419  	node2 := mock.Node()
  1420  
  1421  	// First create an unrelated node.
  1422  	time.AfterFunc(100*time.Millisecond, func() {
  1423  		if err := state.UpsertNode(structs.MsgTypeTestSetup, 100, node1); err != nil {
  1424  			t.Fatalf("err: %v", err)
  1425  		}
  1426  	})
  1427  
  1428  	// Upsert the node we are watching later
  1429  	time.AfterFunc(200*time.Millisecond, func() {
  1430  		if err := state.UpsertNode(structs.MsgTypeTestSetup, 200, node2); err != nil {
  1431  			t.Fatalf("err: %v", err)
  1432  		}
  1433  	})
  1434  
  1435  	// Lookup the node
  1436  	req := &structs.NodeSpecificRequest{
  1437  		NodeID: node2.ID,
  1438  		QueryOptions: structs.QueryOptions{
  1439  			Region:        "global",
  1440  			MinQueryIndex: 150,
  1441  		},
  1442  	}
  1443  	var resp structs.SingleNodeResponse
  1444  	start := time.Now()
  1445  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp); err != nil {
  1446  		t.Fatalf("err: %v", err)
  1447  	}
  1448  
  1449  	if elapsed := time.Since(start); elapsed < 200*time.Millisecond {
  1450  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1451  	}
  1452  	if resp.Index != 200 {
  1453  		t.Fatalf("Bad index: %d %d", resp.Index, 200)
  1454  	}
  1455  	if resp.Node == nil || resp.Node.ID != node2.ID {
  1456  		t.Fatalf("bad: %#v", resp.Node)
  1457  	}
  1458  
  1459  	// Node update triggers watches
  1460  	time.AfterFunc(100*time.Millisecond, func() {
  1461  		nodeUpdate := mock.Node()
  1462  		nodeUpdate.ID = node2.ID
  1463  		nodeUpdate.Status = structs.NodeStatusDown
  1464  		if err := state.UpsertNode(structs.MsgTypeTestSetup, 300, nodeUpdate); err != nil {
  1465  			t.Fatalf("err: %v", err)
  1466  		}
  1467  	})
  1468  
  1469  	req.QueryOptions.MinQueryIndex = 250
  1470  	var resp2 structs.SingleNodeResponse
  1471  	start = time.Now()
  1472  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp2); err != nil {
  1473  		t.Fatalf("err: %v", err)
  1474  	}
  1475  
  1476  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  1477  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1478  	}
  1479  	if resp2.Index != 300 {
  1480  		t.Fatalf("Bad index: %d %d", resp2.Index, 300)
  1481  	}
  1482  	if resp2.Node == nil || resp2.Node.Status != structs.NodeStatusDown {
  1483  		t.Fatalf("bad: %#v", resp2.Node)
  1484  	}
  1485  
  1486  	// Node delete triggers watches
  1487  	time.AfterFunc(100*time.Millisecond, func() {
  1488  		if err := state.DeleteNode(structs.MsgTypeTestSetup, 400, []string{node2.ID}); err != nil {
  1489  			t.Fatalf("err: %v", err)
  1490  		}
  1491  	})
  1492  
  1493  	req.QueryOptions.MinQueryIndex = 350
  1494  	var resp3 structs.SingleNodeResponse
  1495  	start = time.Now()
  1496  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetNode", req, &resp3); err != nil {
  1497  		t.Fatalf("err: %v", err)
  1498  	}
  1499  
  1500  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  1501  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  1502  	}
  1503  	if resp3.Index != 400 {
  1504  		t.Fatalf("Bad index: %d %d", resp2.Index, 400)
  1505  	}
  1506  	if resp3.Node != nil {
  1507  		t.Fatalf("bad: %#v", resp3.Node)
  1508  	}
  1509  }
  1510  
  1511  func TestClientEndpoint_GetAllocs(t *testing.T) {
  1512  	t.Parallel()
  1513  
  1514  	s1, cleanupS1 := TestServer(t, nil)
  1515  	defer cleanupS1()
  1516  	codec := rpcClient(t, s1)
  1517  	testutil.WaitForLeader(t, s1.RPC)
  1518  
  1519  	// Create the register request
  1520  	node := mock.Node()
  1521  	reg := &structs.NodeRegisterRequest{
  1522  		Node:         node,
  1523  		WriteRequest: structs.WriteRequest{Region: "global"},
  1524  	}
  1525  
  1526  	// Fetch the response
  1527  	var resp structs.GenericResponse
  1528  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1529  		t.Fatalf("err: %v", err)
  1530  	}
  1531  	node.CreateIndex = resp.Index
  1532  	node.ModifyIndex = resp.Index
  1533  
  1534  	// Inject fake evaluations
  1535  	alloc := mock.Alloc()
  1536  	alloc.NodeID = node.ID
  1537  	state := s1.fsm.State()
  1538  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1539  	err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
  1540  	if err != nil {
  1541  		t.Fatalf("err: %v", err)
  1542  	}
  1543  
  1544  	// Lookup the allocs
  1545  	get := &structs.NodeSpecificRequest{
  1546  		NodeID:       node.ID,
  1547  		QueryOptions: structs.QueryOptions{Region: "global"},
  1548  	}
  1549  	var resp2 structs.NodeAllocsResponse
  1550  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
  1551  		t.Fatalf("err: %v", err)
  1552  	}
  1553  	if resp2.Index != 100 {
  1554  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1555  	}
  1556  
  1557  	if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
  1558  		t.Fatalf("bad: %#v", resp2.Allocs)
  1559  	}
  1560  
  1561  	// Lookup non-existing node
  1562  	get.NodeID = "foobarbaz"
  1563  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", get, &resp2); err != nil {
  1564  		t.Fatalf("err: %v", err)
  1565  	}
  1566  	if resp2.Index != 100 {
  1567  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1568  	}
  1569  	if len(resp2.Allocs) != 0 {
  1570  		t.Fatalf("unexpected node")
  1571  	}
  1572  }
  1573  
  1574  func TestClientEndpoint_GetAllocs_ACL_Basic(t *testing.T) {
  1575  	t.Parallel()
  1576  
  1577  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1578  	defer cleanupS1()
  1579  	codec := rpcClient(t, s1)
  1580  	testutil.WaitForLeader(t, s1.RPC)
  1581  	assert := assert.New(t)
  1582  
  1583  	// Create the node
  1584  	allocDefaultNS := mock.Alloc()
  1585  	node := mock.Node()
  1586  	allocDefaultNS.NodeID = node.ID
  1587  	state := s1.fsm.State()
  1588  	assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
  1589  	assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
  1590  	allocs := []*structs.Allocation{allocDefaultNS}
  1591  	assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 5, allocs), "UpsertAllocs")
  1592  
  1593  	// Create the namespace policy and tokens
  1594  	validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
  1595  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1596  	invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
  1597  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1598  
  1599  	req := &structs.NodeSpecificRequest{
  1600  		NodeID: node.ID,
  1601  		QueryOptions: structs.QueryOptions{
  1602  			Region: "global",
  1603  		},
  1604  	}
  1605  
  1606  	// Lookup the node without a token and expect failure
  1607  	{
  1608  		var resp structs.NodeAllocsResponse
  1609  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1610  		assert.NotNil(err, "RPC")
  1611  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1612  	}
  1613  
  1614  	// Try with a valid token for the default namespace
  1615  	req.AuthToken = validDefaultToken.SecretID
  1616  	{
  1617  		var resp structs.NodeAllocsResponse
  1618  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1619  		assert.Len(resp.Allocs, 1)
  1620  		assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
  1621  	}
  1622  
  1623  	// Try with a invalid token
  1624  	req.AuthToken = invalidToken.SecretID
  1625  	{
  1626  		var resp structs.NodeAllocsResponse
  1627  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1628  		assert.NotNil(err, "RPC")
  1629  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1630  	}
  1631  
  1632  	// Try with a root token
  1633  	req.AuthToken = root.SecretID
  1634  	{
  1635  		var resp structs.NodeAllocsResponse
  1636  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1637  		assert.Len(resp.Allocs, 1)
  1638  		for _, alloc := range resp.Allocs {
  1639  			switch alloc.ID {
  1640  			case allocDefaultNS.ID:
  1641  				// expected
  1642  			default:
  1643  				t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
  1644  			}
  1645  		}
  1646  	}
  1647  }
  1648  
  1649  func TestClientEndpoint_GetAllocs_ACL_Namespaces(t *testing.T) {
  1650  	t.Parallel()
  1651  	s1, root, cleanupS1 := TestACLServer(t, nil)
  1652  	defer cleanupS1()
  1653  	codec := rpcClient(t, s1)
  1654  	testutil.WaitForLeader(t, s1.RPC)
  1655  	assert := assert.New(t)
  1656  
  1657  	// Create the namespaces
  1658  	ns1 := mock.Namespace()
  1659  	ns2 := mock.Namespace()
  1660  	ns1.Name = "altnamespace"
  1661  	ns2.Name = "should-only-be-displayed-for-root-ns"
  1662  
  1663  	// Create the allocs
  1664  	allocDefaultNS := mock.Alloc()
  1665  	allocAltNS := mock.Alloc()
  1666  	allocAltNS.Namespace = ns1.Name
  1667  	allocOtherNS := mock.Alloc()
  1668  	allocOtherNS.Namespace = ns2.Name
  1669  
  1670  	node := mock.Node()
  1671  	allocDefaultNS.NodeID = node.ID
  1672  	allocAltNS.NodeID = node.ID
  1673  	allocOtherNS.NodeID = node.ID
  1674  	state := s1.fsm.State()
  1675  	assert.Nil(state.UpsertNamespaces(1, []*structs.Namespace{ns1, ns2}), "UpsertNamespaces")
  1676  	assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 2, node), "UpsertNode")
  1677  	assert.Nil(state.UpsertJobSummary(3, mock.JobSummary(allocDefaultNS.JobID)), "UpsertJobSummary")
  1678  	assert.Nil(state.UpsertJobSummary(4, mock.JobSummary(allocAltNS.JobID)), "UpsertJobSummary")
  1679  	assert.Nil(state.UpsertJobSummary(5, mock.JobSummary(allocOtherNS.JobID)), "UpsertJobSummary")
  1680  	allocs := []*structs.Allocation{allocDefaultNS, allocAltNS, allocOtherNS}
  1681  	assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 6, allocs), "UpsertAllocs")
  1682  
  1683  	// Create the namespace policy and tokens
  1684  	validDefaultToken := mock.CreatePolicyAndToken(t, state, 1001, "test-default-valid", mock.NodePolicy(acl.PolicyRead)+
  1685  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1686  	validNoNSToken := mock.CreatePolicyAndToken(t, state, 1003, "test-alt-valid", mock.NodePolicy(acl.PolicyRead))
  1687  	invalidToken := mock.CreatePolicyAndToken(t, state, 1004, "test-invalid",
  1688  		mock.NamespacePolicy(structs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
  1689  
  1690  	// Lookup the node without a token and expect failure
  1691  	req := &structs.NodeSpecificRequest{
  1692  		NodeID:       node.ID,
  1693  		QueryOptions: structs.QueryOptions{Region: "global"},
  1694  	}
  1695  	{
  1696  		var resp structs.NodeAllocsResponse
  1697  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1698  		assert.NotNil(err, "RPC")
  1699  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1700  	}
  1701  
  1702  	// Try with a valid token for the default namespace
  1703  	req.AuthToken = validDefaultToken.SecretID
  1704  	{
  1705  		var resp structs.NodeAllocsResponse
  1706  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1707  		assert.Len(resp.Allocs, 1)
  1708  		assert.Equal(allocDefaultNS.ID, resp.Allocs[0].ID)
  1709  	}
  1710  
  1711  	// Try with a valid token for a namespace with no allocs on this node
  1712  	req.AuthToken = validNoNSToken.SecretID
  1713  	{
  1714  		var resp structs.NodeAllocsResponse
  1715  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1716  		assert.Len(resp.Allocs, 0)
  1717  	}
  1718  
  1719  	// Try with a invalid token
  1720  	req.AuthToken = invalidToken.SecretID
  1721  	{
  1722  		var resp structs.NodeAllocsResponse
  1723  		err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp)
  1724  		assert.NotNil(err, "RPC")
  1725  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  1726  	}
  1727  
  1728  	// Try with a root token
  1729  	req.AuthToken = root.SecretID
  1730  	{
  1731  		var resp structs.NodeAllocsResponse
  1732  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp), "RPC")
  1733  		assert.Len(resp.Allocs, 3)
  1734  		for _, alloc := range resp.Allocs {
  1735  			switch alloc.ID {
  1736  			case allocDefaultNS.ID, allocAltNS.ID, allocOtherNS.ID:
  1737  				// expected
  1738  			default:
  1739  				t.Errorf("unexpected alloc %q for namespace %q", alloc.ID, alloc.Namespace)
  1740  			}
  1741  		}
  1742  	}
  1743  }
  1744  
  1745  func TestClientEndpoint_GetClientAllocs(t *testing.T) {
  1746  	t.Parallel()
  1747  	require := require.New(t)
  1748  
  1749  	s1, cleanupS1 := TestServer(t, nil)
  1750  	defer cleanupS1()
  1751  	codec := rpcClient(t, s1)
  1752  	testutil.WaitForLeader(t, s1.RPC)
  1753  
  1754  	// Check that we have no client connections
  1755  	require.Empty(s1.connectedNodes())
  1756  
  1757  	// Create the register request
  1758  	node := mock.Node()
  1759  	state := s1.fsm.State()
  1760  	require.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 98, node))
  1761  
  1762  	// Inject fake evaluations
  1763  	alloc := mock.Alloc()
  1764  	alloc.NodeID = node.ID
  1765  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1766  	err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
  1767  	if err != nil {
  1768  		t.Fatalf("err: %v", err)
  1769  	}
  1770  
  1771  	// Lookup the allocs
  1772  	get := &structs.NodeSpecificRequest{
  1773  		NodeID:       node.ID,
  1774  		SecretID:     node.SecretID,
  1775  		QueryOptions: structs.QueryOptions{Region: "global"},
  1776  	}
  1777  	var resp2 structs.NodeClientAllocsResponse
  1778  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2); err != nil {
  1779  		t.Fatalf("err: %v", err)
  1780  	}
  1781  	if resp2.Index != 100 {
  1782  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1783  	}
  1784  
  1785  	if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
  1786  		t.Fatalf("bad: %#v", resp2.Allocs)
  1787  	}
  1788  
  1789  	// Check that we have the client connections
  1790  	nodes := s1.connectedNodes()
  1791  	require.Len(nodes, 1)
  1792  	require.Contains(nodes, node.ID)
  1793  
  1794  	// Lookup node with bad SecretID
  1795  	get.SecretID = "foobarbaz"
  1796  	var resp3 structs.NodeClientAllocsResponse
  1797  	err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp3)
  1798  	if err == nil || !strings.Contains(err.Error(), "does not match") {
  1799  		t.Fatalf("err: %v", err)
  1800  	}
  1801  
  1802  	// Lookup non-existing node
  1803  	get.NodeID = uuid.Generate()
  1804  	var resp4 structs.NodeClientAllocsResponse
  1805  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp4); err != nil {
  1806  		t.Fatalf("err: %v", err)
  1807  	}
  1808  	if resp4.Index != 100 {
  1809  		t.Fatalf("Bad index: %d %d", resp3.Index, 100)
  1810  	}
  1811  	if len(resp4.Allocs) != 0 {
  1812  		t.Fatalf("unexpected node %#v", resp3.Allocs)
  1813  	}
  1814  
  1815  	// Close the connection and check that we remove the client connections
  1816  	require.Nil(codec.Close())
  1817  	testutil.WaitForResult(func() (bool, error) {
  1818  		nodes := s1.connectedNodes()
  1819  		return len(nodes) == 0, nil
  1820  	}, func(err error) {
  1821  		t.Fatalf("should have no clients")
  1822  	})
  1823  }
  1824  
  1825  func TestClientEndpoint_GetClientAllocs_Blocking(t *testing.T) {
  1826  	t.Parallel()
  1827  
  1828  	s1, cleanupS1 := TestServer(t, nil)
  1829  	defer cleanupS1()
  1830  	codec := rpcClient(t, s1)
  1831  	testutil.WaitForLeader(t, s1.RPC)
  1832  
  1833  	// Create the register request
  1834  	node := mock.Node()
  1835  	reg := &structs.NodeRegisterRequest{
  1836  		Node:         node,
  1837  		WriteRequest: structs.WriteRequest{Region: "global"},
  1838  	}
  1839  
  1840  	// Fetch the response
  1841  	var resp structs.GenericResponse
  1842  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  1843  		t.Fatalf("err: %v", err)
  1844  	}
  1845  	node.CreateIndex = resp.Index
  1846  	node.ModifyIndex = resp.Index
  1847  
  1848  	// Inject fake evaluations async
  1849  	now := time.Now().UTC().UnixNano()
  1850  	alloc := mock.Alloc()
  1851  	alloc.NodeID = node.ID
  1852  	alloc.ModifyTime = now
  1853  	state := s1.fsm.State()
  1854  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  1855  	start := time.Now()
  1856  	time.AfterFunc(100*time.Millisecond, func() {
  1857  		err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
  1858  		if err != nil {
  1859  			t.Fatalf("err: %v", err)
  1860  		}
  1861  	})
  1862  
  1863  	// Lookup the allocs in a blocking query
  1864  	req := &structs.NodeSpecificRequest{
  1865  		NodeID:   node.ID,
  1866  		SecretID: node.SecretID,
  1867  		QueryOptions: structs.QueryOptions{
  1868  			Region:        "global",
  1869  			MinQueryIndex: 50,
  1870  			MaxQueryTime:  time.Second,
  1871  		},
  1872  	}
  1873  	var resp2 structs.NodeClientAllocsResponse
  1874  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2); err != nil {
  1875  		t.Fatalf("err: %v", err)
  1876  	}
  1877  
  1878  	// Should block at least 100ms
  1879  	if time.Since(start) < 100*time.Millisecond {
  1880  		t.Fatalf("too fast")
  1881  	}
  1882  
  1883  	if resp2.Index != 100 {
  1884  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  1885  	}
  1886  
  1887  	if len(resp2.Allocs) != 1 || resp2.Allocs[alloc.ID] != 100 {
  1888  		t.Fatalf("bad: %#v", resp2.Allocs)
  1889  	}
  1890  
  1891  	iter, err := state.AllocsByIDPrefix(nil, structs.DefaultNamespace, alloc.ID)
  1892  	if err != nil {
  1893  		t.Fatalf("err: %v", err)
  1894  	}
  1895  
  1896  	getAllocs := func(iter memdb.ResultIterator) []*structs.Allocation {
  1897  		var allocs []*structs.Allocation
  1898  		for {
  1899  			raw := iter.Next()
  1900  			if raw == nil {
  1901  				break
  1902  			}
  1903  			allocs = append(allocs, raw.(*structs.Allocation))
  1904  		}
  1905  		return allocs
  1906  	}
  1907  	out := getAllocs(iter)
  1908  
  1909  	if len(out) != 1 {
  1910  		t.Fatalf("Expected to get one allocation but got:%v", out)
  1911  	}
  1912  
  1913  	if out[0].ModifyTime != now {
  1914  		t.Fatalf("Invalid modify time %v", out[0].ModifyTime)
  1915  	}
  1916  
  1917  	// Alloc updates fire watches
  1918  	time.AfterFunc(100*time.Millisecond, func() {
  1919  		allocUpdate := mock.Alloc()
  1920  		allocUpdate.NodeID = alloc.NodeID
  1921  		allocUpdate.ID = alloc.ID
  1922  		allocUpdate.ClientStatus = structs.AllocClientStatusRunning
  1923  		state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
  1924  		err := state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate})
  1925  		if err != nil {
  1926  			t.Fatalf("err: %v", err)
  1927  		}
  1928  	})
  1929  
  1930  	req.QueryOptions.MinQueryIndex = 150
  1931  	var resp3 structs.NodeClientAllocsResponse
  1932  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3); err != nil {
  1933  		t.Fatalf("err: %v", err)
  1934  	}
  1935  
  1936  	if time.Since(start) < 100*time.Millisecond {
  1937  		t.Fatalf("too fast")
  1938  	}
  1939  	if resp3.Index != 200 {
  1940  		t.Fatalf("Bad index: %d %d", resp3.Index, 200)
  1941  	}
  1942  	if len(resp3.Allocs) != 1 || resp3.Allocs[alloc.ID] != 200 {
  1943  		t.Fatalf("bad: %#v", resp3.Allocs)
  1944  	}
  1945  }
  1946  
  1947  func TestClientEndpoint_GetClientAllocs_Blocking_GC(t *testing.T) {
  1948  	t.Parallel()
  1949  	assert := assert.New(t)
  1950  
  1951  	s1, cleanupS1 := TestServer(t, nil)
  1952  	defer cleanupS1()
  1953  	codec := rpcClient(t, s1)
  1954  	testutil.WaitForLeader(t, s1.RPC)
  1955  
  1956  	// Create the register request
  1957  	node := mock.Node()
  1958  	reg := &structs.NodeRegisterRequest{
  1959  		Node:         node,
  1960  		WriteRequest: structs.WriteRequest{Region: "global"},
  1961  	}
  1962  
  1963  	// Fetch the response
  1964  	var resp structs.GenericResponse
  1965  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  1966  	node.CreateIndex = resp.Index
  1967  	node.ModifyIndex = resp.Index
  1968  
  1969  	// Inject fake allocations async
  1970  	alloc1 := mock.Alloc()
  1971  	alloc1.NodeID = node.ID
  1972  	alloc2 := mock.Alloc()
  1973  	alloc2.NodeID = node.ID
  1974  	state := s1.fsm.State()
  1975  	state.UpsertJobSummary(99, mock.JobSummary(alloc1.JobID))
  1976  	start := time.Now()
  1977  	time.AfterFunc(100*time.Millisecond, func() {
  1978  		assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc1, alloc2}))
  1979  	})
  1980  
  1981  	// Lookup the allocs in a blocking query
  1982  	req := &structs.NodeSpecificRequest{
  1983  		NodeID:   node.ID,
  1984  		SecretID: node.SecretID,
  1985  		QueryOptions: structs.QueryOptions{
  1986  			Region:        "global",
  1987  			MinQueryIndex: 50,
  1988  			MaxQueryTime:  time.Second,
  1989  		},
  1990  	}
  1991  	var resp2 structs.NodeClientAllocsResponse
  1992  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp2))
  1993  
  1994  	// Should block at least 100ms
  1995  	if time.Since(start) < 100*time.Millisecond {
  1996  		t.Fatalf("too fast")
  1997  	}
  1998  
  1999  	assert.EqualValues(100, resp2.Index)
  2000  	if assert.Len(resp2.Allocs, 2) {
  2001  		assert.EqualValues(100, resp2.Allocs[alloc1.ID])
  2002  	}
  2003  
  2004  	// Delete an allocation
  2005  	time.AfterFunc(100*time.Millisecond, func() {
  2006  		assert.Nil(state.DeleteEval(200, nil, []string{alloc2.ID}))
  2007  	})
  2008  
  2009  	req.QueryOptions.MinQueryIndex = 150
  2010  	var resp3 structs.NodeClientAllocsResponse
  2011  	assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", req, &resp3))
  2012  
  2013  	if time.Since(start) < 100*time.Millisecond {
  2014  		t.Fatalf("too fast")
  2015  	}
  2016  	assert.EqualValues(200, resp3.Index)
  2017  	if assert.Len(resp3.Allocs, 1) {
  2018  		assert.EqualValues(100, resp3.Allocs[alloc1.ID])
  2019  	}
  2020  }
  2021  
  2022  // A MigrateToken should not be created if an allocation shares the same node
  2023  // with its previous allocation
  2024  func TestClientEndpoint_GetClientAllocs_WithoutMigrateTokens(t *testing.T) {
  2025  	t.Parallel()
  2026  	assert := assert.New(t)
  2027  
  2028  	s1, cleanupS1 := TestServer(t, nil)
  2029  	defer cleanupS1()
  2030  	codec := rpcClient(t, s1)
  2031  	testutil.WaitForLeader(t, s1.RPC)
  2032  
  2033  	// Create the register request
  2034  	node := mock.Node()
  2035  	reg := &structs.NodeRegisterRequest{
  2036  		Node:         node,
  2037  		WriteRequest: structs.WriteRequest{Region: "global"},
  2038  	}
  2039  
  2040  	// Fetch the response
  2041  	var resp structs.GenericResponse
  2042  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2043  		t.Fatalf("err: %v", err)
  2044  	}
  2045  	node.CreateIndex = resp.Index
  2046  	node.ModifyIndex = resp.Index
  2047  
  2048  	// Inject fake evaluations
  2049  	prevAlloc := mock.Alloc()
  2050  	prevAlloc.NodeID = node.ID
  2051  	alloc := mock.Alloc()
  2052  	alloc.NodeID = node.ID
  2053  	alloc.PreviousAllocation = prevAlloc.ID
  2054  	alloc.DesiredStatus = structs.AllocClientStatusComplete
  2055  	state := s1.fsm.State()
  2056  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2057  	err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{prevAlloc, alloc})
  2058  	assert.Nil(err)
  2059  
  2060  	// Lookup the allocs
  2061  	get := &structs.NodeSpecificRequest{
  2062  		NodeID:       node.ID,
  2063  		SecretID:     node.SecretID,
  2064  		QueryOptions: structs.QueryOptions{Region: "global"},
  2065  	}
  2066  	var resp2 structs.NodeClientAllocsResponse
  2067  
  2068  	err = msgpackrpc.CallWithCodec(codec, "Node.GetClientAllocs", get, &resp2)
  2069  	assert.Nil(err)
  2070  
  2071  	assert.Equal(uint64(100), resp2.Index)
  2072  	assert.Equal(2, len(resp2.Allocs))
  2073  	assert.Equal(uint64(100), resp2.Allocs[alloc.ID])
  2074  	assert.Equal(0, len(resp2.MigrateTokens))
  2075  }
  2076  
  2077  func TestClientEndpoint_GetAllocs_Blocking(t *testing.T) {
  2078  	t.Parallel()
  2079  
  2080  	s1, cleanupS1 := TestServer(t, nil)
  2081  	defer cleanupS1()
  2082  	codec := rpcClient(t, s1)
  2083  	testutil.WaitForLeader(t, s1.RPC)
  2084  
  2085  	// Create the register request
  2086  	node := mock.Node()
  2087  	reg := &structs.NodeRegisterRequest{
  2088  		Node:         node,
  2089  		WriteRequest: structs.WriteRequest{Region: "global"},
  2090  	}
  2091  
  2092  	// Fetch the response
  2093  	var resp structs.GenericResponse
  2094  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2095  		t.Fatalf("err: %v", err)
  2096  	}
  2097  	node.CreateIndex = resp.Index
  2098  	node.ModifyIndex = resp.Index
  2099  
  2100  	// Inject fake evaluations async
  2101  	alloc := mock.Alloc()
  2102  	alloc.NodeID = node.ID
  2103  	state := s1.fsm.State()
  2104  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2105  	start := time.Now()
  2106  	time.AfterFunc(100*time.Millisecond, func() {
  2107  		err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
  2108  		if err != nil {
  2109  			t.Fatalf("err: %v", err)
  2110  		}
  2111  	})
  2112  
  2113  	// Lookup the allocs in a blocking query
  2114  	req := &structs.NodeSpecificRequest{
  2115  		NodeID: node.ID,
  2116  		QueryOptions: structs.QueryOptions{
  2117  			Region:        "global",
  2118  			MinQueryIndex: 50,
  2119  			MaxQueryTime:  time.Second,
  2120  		},
  2121  	}
  2122  	var resp2 structs.NodeAllocsResponse
  2123  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp2); err != nil {
  2124  		t.Fatalf("err: %v", err)
  2125  	}
  2126  
  2127  	// Should block at least 100ms
  2128  	if time.Since(start) < 100*time.Millisecond {
  2129  		t.Fatalf("too fast")
  2130  	}
  2131  
  2132  	if resp2.Index != 100 {
  2133  		t.Fatalf("Bad index: %d %d", resp2.Index, 100)
  2134  	}
  2135  
  2136  	if len(resp2.Allocs) != 1 || resp2.Allocs[0].ID != alloc.ID {
  2137  		t.Fatalf("bad: %#v", resp2.Allocs)
  2138  	}
  2139  
  2140  	// Alloc updates fire watches
  2141  	time.AfterFunc(100*time.Millisecond, func() {
  2142  		allocUpdate := mock.Alloc()
  2143  		allocUpdate.NodeID = alloc.NodeID
  2144  		allocUpdate.ID = alloc.ID
  2145  		allocUpdate.ClientStatus = structs.AllocClientStatusRunning
  2146  		state.UpsertJobSummary(199, mock.JobSummary(allocUpdate.JobID))
  2147  		err := state.UpdateAllocsFromClient(structs.MsgTypeTestSetup, 200, []*structs.Allocation{allocUpdate})
  2148  		if err != nil {
  2149  			t.Fatalf("err: %v", err)
  2150  		}
  2151  	})
  2152  
  2153  	req.QueryOptions.MinQueryIndex = 150
  2154  	var resp3 structs.NodeAllocsResponse
  2155  	if err := msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", req, &resp3); err != nil {
  2156  		t.Fatalf("err: %v", err)
  2157  	}
  2158  
  2159  	if time.Since(start) < 100*time.Millisecond {
  2160  		t.Fatalf("too fast")
  2161  	}
  2162  	if resp3.Index != 200 {
  2163  		t.Fatalf("Bad index: %d %d", resp3.Index, 200)
  2164  	}
  2165  	if len(resp3.Allocs) != 1 || resp3.Allocs[0].ClientStatus != structs.AllocClientStatusRunning {
  2166  		t.Fatalf("bad: %#v", resp3.Allocs[0])
  2167  	}
  2168  }
  2169  
  2170  func TestClientEndpoint_UpdateAlloc(t *testing.T) {
  2171  	t.Parallel()
  2172  
  2173  	s1, cleanupS1 := TestServer(t, func(c *Config) {
  2174  		// Disabling scheduling in this test so that we can
  2175  		// ensure that the state store doesn't accumulate more evals
  2176  		// than what we expect the unit test to add
  2177  		c.NumSchedulers = 0
  2178  	})
  2179  
  2180  	defer cleanupS1()
  2181  	codec := rpcClient(t, s1)
  2182  	testutil.WaitForLeader(t, s1.RPC)
  2183  	require := require.New(t)
  2184  
  2185  	// Create the register request
  2186  	node := mock.Node()
  2187  	reg := &structs.NodeRegisterRequest{
  2188  		Node:         node,
  2189  		WriteRequest: structs.WriteRequest{Region: "global"},
  2190  	}
  2191  
  2192  	// Fetch the response
  2193  	var resp structs.GenericResponse
  2194  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2195  		t.Fatalf("err: %v", err)
  2196  	}
  2197  
  2198  	state := s1.fsm.State()
  2199  	// Inject mock job
  2200  	job := mock.Job()
  2201  	job.ID = "mytestjob"
  2202  	err := state.UpsertJob(structs.MsgTypeTestSetup, 101, job)
  2203  	require.Nil(err)
  2204  
  2205  	// Inject fake allocations
  2206  	alloc := mock.Alloc()
  2207  	alloc.JobID = job.ID
  2208  	alloc.NodeID = node.ID
  2209  	err = state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2210  	require.Nil(err)
  2211  	alloc.TaskGroup = job.TaskGroups[0].Name
  2212  
  2213  	alloc2 := mock.Alloc()
  2214  	alloc2.JobID = job.ID
  2215  	alloc2.NodeID = node.ID
  2216  	err = state.UpsertJobSummary(99, mock.JobSummary(alloc2.JobID))
  2217  	require.Nil(err)
  2218  	alloc2.TaskGroup = job.TaskGroups[0].Name
  2219  
  2220  	err = state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc, alloc2})
  2221  	require.Nil(err)
  2222  
  2223  	// Attempt updates of more than one alloc for the same job
  2224  	clientAlloc1 := new(structs.Allocation)
  2225  	*clientAlloc1 = *alloc
  2226  	clientAlloc1.ClientStatus = structs.AllocClientStatusFailed
  2227  
  2228  	clientAlloc2 := new(structs.Allocation)
  2229  	*clientAlloc2 = *alloc2
  2230  	clientAlloc2.ClientStatus = structs.AllocClientStatusFailed
  2231  
  2232  	// Update the alloc
  2233  	update := &structs.AllocUpdateRequest{
  2234  		Alloc:        []*structs.Allocation{clientAlloc1, clientAlloc2},
  2235  		WriteRequest: structs.WriteRequest{Region: "global"},
  2236  	}
  2237  	var resp2 structs.NodeAllocsResponse
  2238  	start := time.Now()
  2239  	err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2)
  2240  	require.Nil(err)
  2241  	require.NotEqual(uint64(0), resp2.Index)
  2242  
  2243  	if diff := time.Since(start); diff < batchUpdateInterval {
  2244  		t.Fatalf("too fast: %v", diff)
  2245  	}
  2246  
  2247  	// Lookup the alloc
  2248  	ws := memdb.NewWatchSet()
  2249  	out, err := state.AllocByID(ws, alloc.ID)
  2250  	require.Nil(err)
  2251  	require.Equal(structs.AllocClientStatusFailed, out.ClientStatus)
  2252  	require.True(out.ModifyTime > 0)
  2253  
  2254  	// Assert that exactly one eval with TriggeredBy EvalTriggerRetryFailedAlloc exists
  2255  	evaluations, err := state.EvalsByJob(ws, job.Namespace, job.ID)
  2256  	require.Nil(err)
  2257  	require.True(len(evaluations) != 0)
  2258  	foundCount := 0
  2259  	for _, resultEval := range evaluations {
  2260  		if resultEval.TriggeredBy == structs.EvalTriggerRetryFailedAlloc && resultEval.WaitUntil.IsZero() {
  2261  			foundCount++
  2262  		}
  2263  	}
  2264  	require.Equal(1, foundCount, "Should create exactly one eval for failed allocs")
  2265  
  2266  }
  2267  
  2268  func TestClientEndpoint_BatchUpdate(t *testing.T) {
  2269  	t.Parallel()
  2270  
  2271  	s1, cleanupS1 := TestServer(t, nil)
  2272  	defer cleanupS1()
  2273  	codec := rpcClient(t, s1)
  2274  	testutil.WaitForLeader(t, s1.RPC)
  2275  
  2276  	// Create the register request
  2277  	node := mock.Node()
  2278  	reg := &structs.NodeRegisterRequest{
  2279  		Node:         node,
  2280  		WriteRequest: structs.WriteRequest{Region: "global"},
  2281  	}
  2282  
  2283  	// Fetch the response
  2284  	var resp structs.GenericResponse
  2285  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2286  		t.Fatalf("err: %v", err)
  2287  	}
  2288  
  2289  	// Inject fake evaluations
  2290  	alloc := mock.Alloc()
  2291  	alloc.NodeID = node.ID
  2292  	state := s1.fsm.State()
  2293  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2294  	err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc})
  2295  	if err != nil {
  2296  		t.Fatalf("err: %v", err)
  2297  	}
  2298  
  2299  	// Attempt update
  2300  	clientAlloc := new(structs.Allocation)
  2301  	*clientAlloc = *alloc
  2302  	clientAlloc.ClientStatus = structs.AllocClientStatusFailed
  2303  
  2304  	// Call to do the batch update
  2305  	bf := structs.NewBatchFuture()
  2306  	endpoint := s1.staticEndpoints.Node
  2307  	endpoint.batchUpdate(bf, []*structs.Allocation{clientAlloc}, nil)
  2308  	if err := bf.Wait(); err != nil {
  2309  		t.Fatalf("err: %v", err)
  2310  	}
  2311  	if bf.Index() == 0 {
  2312  		t.Fatalf("Bad index: %d", bf.Index())
  2313  	}
  2314  
  2315  	// Lookup the alloc
  2316  	ws := memdb.NewWatchSet()
  2317  	out, err := state.AllocByID(ws, alloc.ID)
  2318  	if err != nil {
  2319  		t.Fatalf("err: %v", err)
  2320  	}
  2321  	if out.ClientStatus != structs.AllocClientStatusFailed {
  2322  		t.Fatalf("Bad: %#v", out)
  2323  	}
  2324  }
  2325  
  2326  func TestClientEndpoint_UpdateAlloc_Vault(t *testing.T) {
  2327  	t.Parallel()
  2328  
  2329  	s1, cleanupS1 := TestServer(t, nil)
  2330  	defer cleanupS1()
  2331  	codec := rpcClient(t, s1)
  2332  	testutil.WaitForLeader(t, s1.RPC)
  2333  
  2334  	// Create the register request
  2335  	node := mock.Node()
  2336  	reg := &structs.NodeRegisterRequest{
  2337  		Node:         node,
  2338  		WriteRequest: structs.WriteRequest{Region: "global"},
  2339  	}
  2340  
  2341  	// Fetch the response
  2342  	var resp structs.GenericResponse
  2343  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2344  		t.Fatalf("err: %v", err)
  2345  	}
  2346  
  2347  	// Swap the servers Vault Client
  2348  	tvc := &TestVaultClient{}
  2349  	s1.vault = tvc
  2350  
  2351  	// Inject fake allocation and vault accessor
  2352  	alloc := mock.Alloc()
  2353  	alloc.NodeID = node.ID
  2354  	state := s1.fsm.State()
  2355  	state.UpsertJobSummary(99, mock.JobSummary(alloc.JobID))
  2356  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 100, []*structs.Allocation{alloc}); err != nil {
  2357  		t.Fatalf("err: %v", err)
  2358  	}
  2359  
  2360  	va := mock.VaultAccessor()
  2361  	va.NodeID = node.ID
  2362  	va.AllocID = alloc.ID
  2363  	if err := state.UpsertVaultAccessor(101, []*structs.VaultAccessor{va}); err != nil {
  2364  		t.Fatalf("err: %v", err)
  2365  	}
  2366  
  2367  	// Inject mock job
  2368  	job := mock.Job()
  2369  	job.ID = alloc.JobID
  2370  	err := state.UpsertJob(structs.MsgTypeTestSetup, 101, job)
  2371  	if err != nil {
  2372  		t.Fatalf("err: %v", err)
  2373  	}
  2374  
  2375  	// Attempt update
  2376  	clientAlloc := new(structs.Allocation)
  2377  	*clientAlloc = *alloc
  2378  	clientAlloc.ClientStatus = structs.AllocClientStatusFailed
  2379  
  2380  	// Update the alloc
  2381  	update := &structs.AllocUpdateRequest{
  2382  		Alloc:        []*structs.Allocation{clientAlloc},
  2383  		WriteRequest: structs.WriteRequest{Region: "global"},
  2384  	}
  2385  	var resp2 structs.NodeAllocsResponse
  2386  	start := time.Now()
  2387  	if err := msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", update, &resp2); err != nil {
  2388  		t.Fatalf("err: %v", err)
  2389  	}
  2390  	if resp2.Index == 0 {
  2391  		t.Fatalf("Bad index: %d", resp2.Index)
  2392  	}
  2393  	if diff := time.Since(start); diff < batchUpdateInterval {
  2394  		t.Fatalf("too fast: %v", diff)
  2395  	}
  2396  
  2397  	// Lookup the alloc
  2398  	ws := memdb.NewWatchSet()
  2399  	out, err := state.AllocByID(ws, alloc.ID)
  2400  	if err != nil {
  2401  		t.Fatalf("err: %v", err)
  2402  	}
  2403  	if out.ClientStatus != structs.AllocClientStatusFailed {
  2404  		t.Fatalf("Bad: %#v", out)
  2405  	}
  2406  
  2407  	if l := len(tvc.RevokedTokens); l != 1 {
  2408  		t.Fatalf("Deregister revoked %d tokens; want 1", l)
  2409  	}
  2410  }
  2411  
  2412  func TestClientEndpoint_CreateNodeEvals(t *testing.T) {
  2413  	t.Parallel()
  2414  
  2415  	s1, cleanupS1 := TestServer(t, nil)
  2416  	defer cleanupS1()
  2417  	testutil.WaitForLeader(t, s1.RPC)
  2418  
  2419  	// Inject fake evaluations
  2420  	alloc := mock.Alloc()
  2421  	state := s1.fsm.State()
  2422  	state.UpsertJobSummary(1, mock.JobSummary(alloc.JobID))
  2423  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc}); err != nil {
  2424  		t.Fatalf("err: %v", err)
  2425  	}
  2426  
  2427  	// Inject a fake system job.
  2428  	job := mock.SystemJob()
  2429  	if err := state.UpsertJob(structs.MsgTypeTestSetup, 3, job); err != nil {
  2430  		t.Fatalf("err: %v", err)
  2431  	}
  2432  
  2433  	// Create some evaluations
  2434  	ids, index, err := s1.staticEndpoints.Node.createNodeEvals(alloc.NodeID, 1)
  2435  	if err != nil {
  2436  		t.Fatalf("err: %v", err)
  2437  	}
  2438  	if index == 0 {
  2439  		t.Fatalf("bad: %d", index)
  2440  	}
  2441  	if len(ids) != 2 {
  2442  		t.Fatalf("bad: %s", ids)
  2443  	}
  2444  
  2445  	// Lookup the evaluations
  2446  	ws := memdb.NewWatchSet()
  2447  	evalByType := make(map[string]*structs.Evaluation, 2)
  2448  	for _, id := range ids {
  2449  		eval, err := state.EvalByID(ws, id)
  2450  		if err != nil {
  2451  			t.Fatalf("err: %v", err)
  2452  		}
  2453  		if eval == nil {
  2454  			t.Fatalf("expected eval")
  2455  		}
  2456  
  2457  		if old, ok := evalByType[eval.Type]; ok {
  2458  			t.Fatalf("multiple evals of the same type: %v and %v", old, eval)
  2459  		}
  2460  
  2461  		evalByType[eval.Type] = eval
  2462  	}
  2463  
  2464  	if len(evalByType) != 2 {
  2465  		t.Fatalf("Expected a service and system job; got %#v", evalByType)
  2466  	}
  2467  
  2468  	// Ensure the evals are correct.
  2469  	for schedType, eval := range evalByType {
  2470  		expPriority := alloc.Job.Priority
  2471  		expJobID := alloc.JobID
  2472  		if schedType == "system" {
  2473  			expPriority = job.Priority
  2474  			expJobID = job.ID
  2475  		}
  2476  
  2477  		t.Logf("checking eval: %v", pretty.Sprint(eval))
  2478  		require.Equal(t, index, eval.CreateIndex)
  2479  		require.Equal(t, structs.EvalTriggerNodeUpdate, eval.TriggeredBy)
  2480  		require.Equal(t, alloc.NodeID, eval.NodeID)
  2481  		require.Equal(t, uint64(1), eval.NodeModifyIndex)
  2482  		switch eval.Status {
  2483  		case structs.EvalStatusPending, structs.EvalStatusComplete:
  2484  			// success
  2485  		default:
  2486  			t.Fatalf("expected pending or complete, found %v", eval.Status)
  2487  		}
  2488  		require.Equal(t, expPriority, eval.Priority)
  2489  		require.Equal(t, expJobID, eval.JobID)
  2490  		require.NotZero(t, eval.CreateTime)
  2491  		require.NotZero(t, eval.ModifyTime)
  2492  	}
  2493  }
  2494  
  2495  func TestClientEndpoint_Evaluate(t *testing.T) {
  2496  	t.Parallel()
  2497  
  2498  	s1, cleanupS1 := TestServer(t, func(c *Config) {
  2499  		c.NumSchedulers = 0 // Prevent automatic dequeue
  2500  	})
  2501  	defer cleanupS1()
  2502  	codec := rpcClient(t, s1)
  2503  	testutil.WaitForLeader(t, s1.RPC)
  2504  
  2505  	// Inject fake evaluations
  2506  	alloc := mock.Alloc()
  2507  	node := mock.Node()
  2508  	node.ID = alloc.NodeID
  2509  	state := s1.fsm.State()
  2510  	err := state.UpsertNode(structs.MsgTypeTestSetup, 1, node)
  2511  	if err != nil {
  2512  		t.Fatalf("err: %v", err)
  2513  	}
  2514  	state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID))
  2515  	err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
  2516  	if err != nil {
  2517  		t.Fatalf("err: %v", err)
  2518  	}
  2519  
  2520  	// Re-evaluate
  2521  	req := &structs.NodeEvaluateRequest{
  2522  		NodeID:       alloc.NodeID,
  2523  		WriteRequest: structs.WriteRequest{Region: "global"},
  2524  	}
  2525  
  2526  	// Fetch the response
  2527  	var resp structs.NodeUpdateResponse
  2528  	if err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp); err != nil {
  2529  		t.Fatalf("err: %v", err)
  2530  	}
  2531  	if resp.Index == 0 {
  2532  		t.Fatalf("bad index: %d", resp.Index)
  2533  	}
  2534  
  2535  	// Create some evaluations
  2536  	ids := resp.EvalIDs
  2537  	if len(ids) != 1 {
  2538  		t.Fatalf("bad: %s", ids)
  2539  	}
  2540  
  2541  	// Lookup the evaluation
  2542  	ws := memdb.NewWatchSet()
  2543  	eval, err := state.EvalByID(ws, ids[0])
  2544  	if err != nil {
  2545  		t.Fatalf("err: %v", err)
  2546  	}
  2547  	if eval == nil {
  2548  		t.Fatalf("expected eval")
  2549  	}
  2550  	if eval.CreateIndex != resp.Index {
  2551  		t.Fatalf("index mis-match")
  2552  	}
  2553  
  2554  	if eval.Priority != alloc.Job.Priority {
  2555  		t.Fatalf("bad: %#v", eval)
  2556  	}
  2557  	if eval.Type != alloc.Job.Type {
  2558  		t.Fatalf("bad: %#v", eval)
  2559  	}
  2560  	if eval.TriggeredBy != structs.EvalTriggerNodeUpdate {
  2561  		t.Fatalf("bad: %#v", eval)
  2562  	}
  2563  	if eval.JobID != alloc.JobID {
  2564  		t.Fatalf("bad: %#v", eval)
  2565  	}
  2566  	if eval.NodeID != alloc.NodeID {
  2567  		t.Fatalf("bad: %#v", eval)
  2568  	}
  2569  	if eval.NodeModifyIndex != 1 {
  2570  		t.Fatalf("bad: %#v", eval)
  2571  	}
  2572  	if eval.Status != structs.EvalStatusPending {
  2573  		t.Fatalf("bad: %#v", eval)
  2574  	}
  2575  	if eval.CreateTime == 0 {
  2576  		t.Fatalf("CreateTime is unset: %#v", eval)
  2577  	}
  2578  	if eval.ModifyTime == 0 {
  2579  		t.Fatalf("ModifyTime is unset: %#v", eval)
  2580  	}
  2581  }
  2582  
  2583  func TestClientEndpoint_Evaluate_ACL(t *testing.T) {
  2584  	t.Parallel()
  2585  
  2586  	s1, root, cleanupS1 := TestACLServer(t, nil)
  2587  	defer cleanupS1()
  2588  	codec := rpcClient(t, s1)
  2589  	testutil.WaitForLeader(t, s1.RPC)
  2590  	assert := assert.New(t)
  2591  
  2592  	// Create the node with an alloc
  2593  	alloc := mock.Alloc()
  2594  	node := mock.Node()
  2595  	node.ID = alloc.NodeID
  2596  	state := s1.fsm.State()
  2597  
  2598  	assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
  2599  	assert.Nil(state.UpsertJobSummary(2, mock.JobSummary(alloc.JobID)), "UpsertJobSummary")
  2600  	assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}), "UpsertAllocs")
  2601  
  2602  	// Create the policy and tokens
  2603  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyWrite))
  2604  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyRead))
  2605  
  2606  	// Re-evaluate without a token and expect failure
  2607  	req := &structs.NodeEvaluateRequest{
  2608  		NodeID:       alloc.NodeID,
  2609  		WriteRequest: structs.WriteRequest{Region: "global"},
  2610  	}
  2611  	{
  2612  		var resp structs.NodeUpdateResponse
  2613  		err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
  2614  		assert.NotNil(err, "RPC")
  2615  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2616  	}
  2617  
  2618  	// Try with a valid token
  2619  	req.AuthToken = validToken.SecretID
  2620  	{
  2621  		var resp structs.NodeUpdateResponse
  2622  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
  2623  	}
  2624  
  2625  	// Try with a invalid token
  2626  	req.AuthToken = invalidToken.SecretID
  2627  	{
  2628  		var resp structs.NodeUpdateResponse
  2629  		err := msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp)
  2630  		assert.NotNil(err, "RPC")
  2631  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2632  	}
  2633  
  2634  	// Try with a root token
  2635  	req.AuthToken = root.SecretID
  2636  	{
  2637  		var resp structs.NodeUpdateResponse
  2638  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.Evaluate", req, &resp), "RPC")
  2639  	}
  2640  }
  2641  
  2642  func TestClientEndpoint_ListNodes(t *testing.T) {
  2643  	t.Parallel()
  2644  
  2645  	s1, cleanupS1 := TestServer(t, nil)
  2646  	defer cleanupS1()
  2647  	codec := rpcClient(t, s1)
  2648  	testutil.WaitForLeader(t, s1.RPC)
  2649  
  2650  	// Create the register request
  2651  	node := mock.Node()
  2652  	node.HostVolumes = map[string]*structs.ClientHostVolumeConfig{
  2653  		"foo": {
  2654  			Name:     "foo",
  2655  			Path:     "/",
  2656  			ReadOnly: true,
  2657  		},
  2658  	}
  2659  	reg := &structs.NodeRegisterRequest{
  2660  		Node:         node,
  2661  		WriteRequest: structs.WriteRequest{Region: "global"},
  2662  	}
  2663  
  2664  	// Fetch the response
  2665  	var resp structs.GenericResponse
  2666  	if err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp); err != nil {
  2667  		t.Fatalf("err: %v", err)
  2668  	}
  2669  	node.CreateIndex = resp.Index
  2670  	node.ModifyIndex = resp.Index
  2671  
  2672  	// Lookup the node
  2673  	get := &structs.NodeListRequest{
  2674  		QueryOptions: structs.QueryOptions{Region: "global"},
  2675  	}
  2676  	var resp2 structs.NodeListResponse
  2677  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2); err != nil {
  2678  		t.Fatalf("err: %v", err)
  2679  	}
  2680  	if resp2.Index != resp.Index {
  2681  		t.Fatalf("Bad index: %d %d", resp2.Index, resp.Index)
  2682  	}
  2683  
  2684  	require.Len(t, resp2.Nodes, 1)
  2685  	require.Equal(t, node.ID, resp2.Nodes[0].ID)
  2686  
  2687  	// #7344 - Assert HostVolumes are included in stub
  2688  	require.Equal(t, node.HostVolumes, resp2.Nodes[0].HostVolumes)
  2689  
  2690  	// #9055 - Assert Resources are *not* included by default
  2691  	require.Nil(t, resp2.Nodes[0].NodeResources)
  2692  	require.Nil(t, resp2.Nodes[0].ReservedResources)
  2693  
  2694  	// Lookup the node with prefix
  2695  	get = &structs.NodeListRequest{
  2696  		QueryOptions: structs.QueryOptions{Region: "global", Prefix: node.ID[:4]},
  2697  	}
  2698  	var resp3 structs.NodeListResponse
  2699  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp3); err != nil {
  2700  		t.Fatalf("err: %v", err)
  2701  	}
  2702  	if resp3.Index != resp.Index {
  2703  		t.Fatalf("Bad index: %d %d", resp3.Index, resp2.Index)
  2704  	}
  2705  
  2706  	if len(resp3.Nodes) != 1 {
  2707  		t.Fatalf("bad: %#v", resp3.Nodes)
  2708  	}
  2709  	if resp3.Nodes[0].ID != node.ID {
  2710  		t.Fatalf("bad: %#v", resp3.Nodes[0])
  2711  	}
  2712  }
  2713  
  2714  func TestClientEndpoint_ListNodes_Fields(t *testing.T) {
  2715  	t.Parallel()
  2716  
  2717  	s1, cleanupS1 := TestServer(t, nil)
  2718  	defer cleanupS1()
  2719  	codec := rpcClient(t, s1)
  2720  	testutil.WaitForLeader(t, s1.RPC)
  2721  
  2722  	// Create the register request
  2723  	node := mock.Node()
  2724  	reg := &structs.NodeRegisterRequest{
  2725  		Node:         node,
  2726  		WriteRequest: structs.WriteRequest{Region: "global"},
  2727  	}
  2728  
  2729  	// Fetch the response
  2730  	var resp structs.GenericResponse
  2731  	require.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &resp))
  2732  	node.CreateIndex = resp.Index
  2733  	node.ModifyIndex = resp.Index
  2734  
  2735  	// Lookup the node with fields
  2736  	get := &structs.NodeListRequest{
  2737  		QueryOptions: structs.QueryOptions{Region: "global"},
  2738  		Fields: &structs.NodeStubFields{
  2739  			Resources: true,
  2740  		},
  2741  	}
  2742  	var resp2 structs.NodeListResponse
  2743  	require.NoError(t, msgpackrpc.CallWithCodec(codec, "Node.List", get, &resp2))
  2744  	require.Equal(t, resp.Index, resp2.Index)
  2745  	require.Len(t, resp2.Nodes, 1)
  2746  	require.Equal(t, node.ID, resp2.Nodes[0].ID)
  2747  	require.NotNil(t, resp2.Nodes[0].NodeResources)
  2748  	require.NotNil(t, resp2.Nodes[0].ReservedResources)
  2749  }
  2750  
  2751  func TestClientEndpoint_ListNodes_ACL(t *testing.T) {
  2752  	t.Parallel()
  2753  
  2754  	s1, root, cleanupS1 := TestACLServer(t, nil)
  2755  	defer cleanupS1()
  2756  	codec := rpcClient(t, s1)
  2757  	testutil.WaitForLeader(t, s1.RPC)
  2758  	assert := assert.New(t)
  2759  
  2760  	// Create the node
  2761  	node := mock.Node()
  2762  	state := s1.fsm.State()
  2763  	assert.Nil(state.UpsertNode(structs.MsgTypeTestSetup, 1, node), "UpsertNode")
  2764  
  2765  	// Create the namespace policy and tokens
  2766  	validToken := mock.CreatePolicyAndToken(t, state, 1001, "test-valid", mock.NodePolicy(acl.PolicyRead))
  2767  	invalidToken := mock.CreatePolicyAndToken(t, state, 1003, "test-invalid", mock.NodePolicy(acl.PolicyDeny))
  2768  
  2769  	// Lookup the node without a token and expect failure
  2770  	req := &structs.NodeListRequest{
  2771  		QueryOptions: structs.QueryOptions{Region: "global"},
  2772  	}
  2773  	{
  2774  		var resp structs.NodeListResponse
  2775  		err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
  2776  		assert.NotNil(err, "RPC")
  2777  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2778  	}
  2779  
  2780  	// Try with a valid token
  2781  	req.AuthToken = validToken.SecretID
  2782  	{
  2783  		var resp structs.NodeListResponse
  2784  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
  2785  		assert.Equal(node.ID, resp.Nodes[0].ID)
  2786  	}
  2787  
  2788  	// Try with a invalid token
  2789  	req.AuthToken = invalidToken.SecretID
  2790  	{
  2791  		var resp structs.NodeListResponse
  2792  		err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp)
  2793  		assert.NotNil(err, "RPC")
  2794  		assert.Equal(err.Error(), structs.ErrPermissionDenied.Error())
  2795  	}
  2796  
  2797  	// Try with a root token
  2798  	req.AuthToken = root.SecretID
  2799  	{
  2800  		var resp structs.NodeListResponse
  2801  		assert.Nil(msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp), "RPC")
  2802  		assert.Equal(node.ID, resp.Nodes[0].ID)
  2803  	}
  2804  }
  2805  
  2806  func TestClientEndpoint_ListNodes_Blocking(t *testing.T) {
  2807  	t.Parallel()
  2808  
  2809  	s1, cleanupS1 := TestServer(t, nil)
  2810  	defer cleanupS1()
  2811  	state := s1.fsm.State()
  2812  	codec := rpcClient(t, s1)
  2813  	testutil.WaitForLeader(t, s1.RPC)
  2814  
  2815  	// Disable drainer to prevent drain from completing during test
  2816  	s1.nodeDrainer.SetEnabled(false, nil)
  2817  
  2818  	// Create the node
  2819  	node := mock.Node()
  2820  
  2821  	// Node upsert triggers watches
  2822  	errCh := make(chan error, 1)
  2823  	timer := time.AfterFunc(100*time.Millisecond, func() {
  2824  		errCh <- state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
  2825  	})
  2826  	defer timer.Stop()
  2827  
  2828  	req := &structs.NodeListRequest{
  2829  		QueryOptions: structs.QueryOptions{
  2830  			Region:        "global",
  2831  			MinQueryIndex: 1,
  2832  		},
  2833  	}
  2834  	start := time.Now()
  2835  	var resp structs.NodeListResponse
  2836  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp); err != nil {
  2837  		t.Fatalf("err: %v", err)
  2838  	}
  2839  
  2840  	if err := <-errCh; err != nil {
  2841  		t.Fatalf("error from timer: %v", err)
  2842  	}
  2843  
  2844  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2845  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp)
  2846  	}
  2847  	if resp.Index != 2 {
  2848  		t.Fatalf("Bad index: %d %d", resp.Index, 2)
  2849  	}
  2850  	if len(resp.Nodes) != 1 || resp.Nodes[0].ID != node.ID {
  2851  		t.Fatalf("bad: %#v", resp.Nodes)
  2852  	}
  2853  
  2854  	// Node drain updates trigger watches.
  2855  	time.AfterFunc(100*time.Millisecond, func() {
  2856  		s := &structs.DrainStrategy{
  2857  			DrainSpec: structs.DrainSpec{
  2858  				Deadline: 10 * time.Second,
  2859  			},
  2860  		}
  2861  		errCh <- state.UpdateNodeDrain(structs.MsgTypeTestSetup, 3, node.ID, s, false, 0, nil)
  2862  	})
  2863  
  2864  	req.MinQueryIndex = 2
  2865  	var resp2 structs.NodeListResponse
  2866  	start = time.Now()
  2867  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp2); err != nil {
  2868  		t.Fatalf("err: %v", err)
  2869  	}
  2870  
  2871  	if err := <-errCh; err != nil {
  2872  		t.Fatalf("error from timer: %v", err)
  2873  	}
  2874  
  2875  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2876  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp2)
  2877  	}
  2878  	if resp2.Index != 3 {
  2879  		t.Fatalf("Bad index: %d %d", resp2.Index, 3)
  2880  	}
  2881  	if len(resp2.Nodes) != 1 || !resp2.Nodes[0].Drain {
  2882  		t.Fatalf("bad: %#v", resp2.Nodes)
  2883  	}
  2884  
  2885  	// Node status update triggers watches
  2886  	time.AfterFunc(100*time.Millisecond, func() {
  2887  		errCh <- state.UpdateNodeStatus(structs.MsgTypeTestSetup, 40, node.ID, structs.NodeStatusDown, 0, nil)
  2888  	})
  2889  
  2890  	req.MinQueryIndex = 38
  2891  	var resp3 structs.NodeListResponse
  2892  	start = time.Now()
  2893  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp3); err != nil {
  2894  		t.Fatalf("err: %v", err)
  2895  	}
  2896  
  2897  	if err := <-errCh; err != nil {
  2898  		t.Fatalf("error from timer: %v", err)
  2899  	}
  2900  
  2901  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2902  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp3)
  2903  	}
  2904  	if resp3.Index != 40 {
  2905  		t.Fatalf("Bad index: %d %d", resp3.Index, 40)
  2906  	}
  2907  	if len(resp3.Nodes) != 1 || resp3.Nodes[0].Status != structs.NodeStatusDown {
  2908  		t.Fatalf("bad: %#v", resp3.Nodes)
  2909  	}
  2910  
  2911  	// Node delete triggers watches.
  2912  	time.AfterFunc(100*time.Millisecond, func() {
  2913  		errCh <- state.DeleteNode(structs.MsgTypeTestSetup, 50, []string{node.ID})
  2914  	})
  2915  
  2916  	req.MinQueryIndex = 45
  2917  	var resp4 structs.NodeListResponse
  2918  	start = time.Now()
  2919  	if err := msgpackrpc.CallWithCodec(codec, "Node.List", req, &resp4); err != nil {
  2920  		t.Fatalf("err: %v", err)
  2921  	}
  2922  
  2923  	if err := <-errCh; err != nil {
  2924  		t.Fatalf("error from timer: %v", err)
  2925  	}
  2926  
  2927  	if elapsed := time.Since(start); elapsed < 100*time.Millisecond {
  2928  		t.Fatalf("should block (returned in %s) %#v", elapsed, resp4)
  2929  	}
  2930  	if resp4.Index != 50 {
  2931  		t.Fatalf("Bad index: %d %d", resp4.Index, 50)
  2932  	}
  2933  	if len(resp4.Nodes) != 0 {
  2934  		t.Fatalf("bad: %#v", resp4.Nodes)
  2935  	}
  2936  }
  2937  
  2938  func TestClientEndpoint_DeriveVaultToken_Bad(t *testing.T) {
  2939  	t.Parallel()
  2940  
  2941  	s1, cleanupS1 := TestServer(t, nil)
  2942  	defer cleanupS1()
  2943  	state := s1.fsm.State()
  2944  	codec := rpcClient(t, s1)
  2945  	testutil.WaitForLeader(t, s1.RPC)
  2946  
  2947  	// Create the node
  2948  	node := mock.Node()
  2949  	if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node); err != nil {
  2950  		t.Fatalf("err: %v", err)
  2951  	}
  2952  
  2953  	// Create an alloc
  2954  	alloc := mock.Alloc()
  2955  	task := alloc.Job.TaskGroups[0].Tasks[0]
  2956  	tasks := []string{task.Name}
  2957  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil {
  2958  		t.Fatalf("err: %v", err)
  2959  	}
  2960  
  2961  	req := &structs.DeriveVaultTokenRequest{
  2962  		NodeID:   node.ID,
  2963  		SecretID: uuid.Generate(),
  2964  		AllocID:  alloc.ID,
  2965  		Tasks:    tasks,
  2966  		QueryOptions: structs.QueryOptions{
  2967  			Region: "global",
  2968  		},
  2969  	}
  2970  
  2971  	var resp structs.DeriveVaultTokenResponse
  2972  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2973  		t.Fatalf("bad: %v", err)
  2974  	}
  2975  
  2976  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "SecretID mismatch") {
  2977  		t.Fatalf("Expected SecretID mismatch: %v", resp.Error)
  2978  	}
  2979  
  2980  	// Put the correct SecretID
  2981  	req.SecretID = node.SecretID
  2982  
  2983  	// Now we should get an error about the allocation not running on the node
  2984  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2985  		t.Fatalf("bad: %v", err)
  2986  	}
  2987  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "not running on Node") {
  2988  		t.Fatalf("Expected not running on node error: %v", resp.Error)
  2989  	}
  2990  
  2991  	// Update to be running on the node
  2992  	alloc.NodeID = node.ID
  2993  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 4, []*structs.Allocation{alloc}); err != nil {
  2994  		t.Fatalf("err: %v", err)
  2995  	}
  2996  
  2997  	// Now we should get an error about the job not needing any Vault secrets
  2998  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  2999  		t.Fatalf("bad: %v", err)
  3000  	}
  3001  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "does not require") {
  3002  		t.Fatalf("Expected no policies error: %v", resp.Error)
  3003  	}
  3004  
  3005  	// Update to be terminal
  3006  	alloc.DesiredStatus = structs.AllocDesiredStatusStop
  3007  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 5, []*structs.Allocation{alloc}); err != nil {
  3008  		t.Fatalf("err: %v", err)
  3009  	}
  3010  
  3011  	// Now we should get an error about the job not needing any Vault secrets
  3012  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  3013  		t.Fatalf("bad: %v", err)
  3014  	}
  3015  	if resp.Error == nil || !strings.Contains(resp.Error.Error(), "terminal") {
  3016  		t.Fatalf("Expected terminal allocation error: %v", resp.Error)
  3017  	}
  3018  }
  3019  
  3020  func TestClientEndpoint_DeriveVaultToken(t *testing.T) {
  3021  	t.Parallel()
  3022  
  3023  	s1, cleanupS1 := TestServer(t, nil)
  3024  	defer cleanupS1()
  3025  	state := s1.fsm.State()
  3026  	codec := rpcClient(t, s1)
  3027  	testutil.WaitForLeader(t, s1.RPC)
  3028  
  3029  	// Enable vault and allow authenticated
  3030  	tr := true
  3031  	s1.config.VaultConfig.Enabled = &tr
  3032  	s1.config.VaultConfig.AllowUnauthenticated = &tr
  3033  
  3034  	// Replace the Vault Client on the server
  3035  	tvc := &TestVaultClient{}
  3036  	s1.vault = tvc
  3037  
  3038  	// Create the node
  3039  	node := mock.Node()
  3040  	if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node); err != nil {
  3041  		t.Fatalf("err: %v", err)
  3042  	}
  3043  
  3044  	// Create an alloc an allocation that has vault policies required
  3045  	alloc := mock.Alloc()
  3046  	alloc.NodeID = node.ID
  3047  	task := alloc.Job.TaskGroups[0].Tasks[0]
  3048  	tasks := []string{task.Name}
  3049  	task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
  3050  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil {
  3051  		t.Fatalf("err: %v", err)
  3052  	}
  3053  
  3054  	// Return a secret for the task
  3055  	token := uuid.Generate()
  3056  	accessor := uuid.Generate()
  3057  	ttl := 10
  3058  	secret := &vapi.Secret{
  3059  		WrapInfo: &vapi.SecretWrapInfo{
  3060  			Token:           token,
  3061  			WrappedAccessor: accessor,
  3062  			TTL:             ttl,
  3063  		},
  3064  	}
  3065  	tvc.SetCreateTokenSecret(alloc.ID, task.Name, secret)
  3066  
  3067  	req := &structs.DeriveVaultTokenRequest{
  3068  		NodeID:   node.ID,
  3069  		SecretID: node.SecretID,
  3070  		AllocID:  alloc.ID,
  3071  		Tasks:    tasks,
  3072  		QueryOptions: structs.QueryOptions{
  3073  			Region: "global",
  3074  		},
  3075  	}
  3076  
  3077  	var resp structs.DeriveVaultTokenResponse
  3078  	if err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp); err != nil {
  3079  		t.Fatalf("bad: %v", err)
  3080  	}
  3081  	if resp.Error != nil {
  3082  		t.Fatalf("bad: %v", resp.Error)
  3083  	}
  3084  
  3085  	// Check the state store and ensure that we created a VaultAccessor
  3086  	ws := memdb.NewWatchSet()
  3087  	va, err := state.VaultAccessor(ws, accessor)
  3088  	if err != nil {
  3089  		t.Fatalf("bad: %v", err)
  3090  	}
  3091  	if va == nil {
  3092  		t.Fatalf("bad: %v", va)
  3093  	}
  3094  
  3095  	if va.CreateIndex == 0 {
  3096  		t.Fatalf("bad: %v", va)
  3097  	}
  3098  
  3099  	va.CreateIndex = 0
  3100  	expected := &structs.VaultAccessor{
  3101  		AllocID:     alloc.ID,
  3102  		Task:        task.Name,
  3103  		NodeID:      alloc.NodeID,
  3104  		Accessor:    accessor,
  3105  		CreationTTL: ttl,
  3106  	}
  3107  
  3108  	if !reflect.DeepEqual(expected, va) {
  3109  		t.Fatalf("Got %#v; want %#v", va, expected)
  3110  	}
  3111  }
  3112  
  3113  func TestClientEndpoint_DeriveVaultToken_VaultError(t *testing.T) {
  3114  	t.Parallel()
  3115  
  3116  	s1, cleanupS1 := TestServer(t, nil)
  3117  	defer cleanupS1()
  3118  	state := s1.fsm.State()
  3119  	codec := rpcClient(t, s1)
  3120  	testutil.WaitForLeader(t, s1.RPC)
  3121  
  3122  	// Enable vault and allow authenticated
  3123  	tr := true
  3124  	s1.config.VaultConfig.Enabled = &tr
  3125  	s1.config.VaultConfig.AllowUnauthenticated = &tr
  3126  
  3127  	// Replace the Vault Client on the server
  3128  	tvc := &TestVaultClient{}
  3129  	s1.vault = tvc
  3130  
  3131  	// Create the node
  3132  	node := mock.Node()
  3133  	if err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node); err != nil {
  3134  		t.Fatalf("err: %v", err)
  3135  	}
  3136  
  3137  	// Create an alloc an allocation that has vault policies required
  3138  	alloc := mock.Alloc()
  3139  	alloc.NodeID = node.ID
  3140  	task := alloc.Job.TaskGroups[0].Tasks[0]
  3141  	tasks := []string{task.Name}
  3142  	task.Vault = &structs.Vault{Policies: []string{"a", "b"}}
  3143  	if err := state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc}); err != nil {
  3144  		t.Fatalf("err: %v", err)
  3145  	}
  3146  
  3147  	// Return an error when creating the token
  3148  	tvc.SetCreateTokenError(alloc.ID, task.Name,
  3149  		structs.NewRecoverableError(fmt.Errorf("recover"), true))
  3150  
  3151  	req := &structs.DeriveVaultTokenRequest{
  3152  		NodeID:   node.ID,
  3153  		SecretID: node.SecretID,
  3154  		AllocID:  alloc.ID,
  3155  		Tasks:    tasks,
  3156  		QueryOptions: structs.QueryOptions{
  3157  			Region: "global",
  3158  		},
  3159  	}
  3160  
  3161  	var resp structs.DeriveVaultTokenResponse
  3162  	err := msgpackrpc.CallWithCodec(codec, "Node.DeriveVaultToken", req, &resp)
  3163  	if err != nil {
  3164  		t.Fatalf("bad: %v", err)
  3165  	}
  3166  	if resp.Error == nil || !resp.Error.IsRecoverable() {
  3167  		t.Fatalf("bad: %+v", resp.Error)
  3168  	}
  3169  }
  3170  
  3171  func TestClientEndpoint_taskUsesConnect(t *testing.T) {
  3172  	t.Parallel()
  3173  
  3174  	try := func(t *testing.T, task *structs.Task, exp bool) {
  3175  		result := taskUsesConnect(task)
  3176  		require.Equal(t, exp, result)
  3177  	}
  3178  
  3179  	t.Run("task uses connect", func(t *testing.T) {
  3180  		try(t, &structs.Task{
  3181  			// see nomad.newConnectSidecarTask for how this works
  3182  			Name: "connect-proxy-myservice",
  3183  			Kind: "connect-proxy:myservice",
  3184  		}, true)
  3185  	})
  3186  
  3187  	t.Run("task does not use connect", func(t *testing.T) {
  3188  		try(t, &structs.Task{
  3189  			Name: "mytask",
  3190  			Kind: "incorrect:mytask",
  3191  		}, false)
  3192  	})
  3193  
  3194  	t.Run("task does not exist", func(t *testing.T) {
  3195  		try(t, nil, false)
  3196  	})
  3197  }
  3198  
  3199  func TestClientEndpoint_tasksNotUsingConnect(t *testing.T) {
  3200  	t.Parallel()
  3201  
  3202  	taskGroup := &structs.TaskGroup{
  3203  		Name: "testgroup",
  3204  		Tasks: []*structs.Task{{
  3205  			Name: "connect-proxy-service1",
  3206  			Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service1"),
  3207  		}, {
  3208  			Name: "incorrect-task3",
  3209  			Kind: "incorrect:task3",
  3210  		}, {
  3211  			Name: "connect-proxy-service4",
  3212  			Kind: structs.NewTaskKind(structs.ConnectProxyPrefix, "service4"),
  3213  		}, {
  3214  			Name: "incorrect-task5",
  3215  			Kind: "incorrect:task5",
  3216  		}, {
  3217  			Name: "task6",
  3218  			Kind: structs.NewTaskKind(structs.ConnectNativePrefix, "service6"),
  3219  		}},
  3220  	}
  3221  
  3222  	requestingTasks := []string{
  3223  		"connect-proxy-service1", // yes
  3224  		"task2",                  // does not exist
  3225  		"task3",                  // no
  3226  		"connect-proxy-service4", // yes
  3227  		"task5",                  // no
  3228  		"task6",                  // yes, native
  3229  	}
  3230  
  3231  	notConnect, usingConnect := connectTasks(taskGroup, requestingTasks)
  3232  
  3233  	notConnectExp := []string{"task2", "task3", "task5"}
  3234  	usingConnectExp := []connectTask{
  3235  		{TaskName: "connect-proxy-service1", TaskKind: "connect-proxy:service1"},
  3236  		{TaskName: "connect-proxy-service4", TaskKind: "connect-proxy:service4"},
  3237  		{TaskName: "task6", TaskKind: "connect-native:service6"},
  3238  	}
  3239  
  3240  	require.Equal(t, notConnectExp, notConnect)
  3241  	require.Equal(t, usingConnectExp, usingConnect)
  3242  }
  3243  
  3244  func mutateConnectJob(t *testing.T, job *structs.Job) {
  3245  	var jch jobConnectHook
  3246  	_, warnings, err := jch.Mutate(job)
  3247  	require.Empty(t, warnings)
  3248  	require.NoError(t, err)
  3249  }
  3250  
  3251  func TestClientEndpoint_DeriveSIToken(t *testing.T) {
  3252  	t.Parallel()
  3253  	r := require.New(t)
  3254  
  3255  	s1, cleanupS1 := TestServer(t, nil) // already sets consul mocks
  3256  	defer cleanupS1()
  3257  
  3258  	state := s1.fsm.State()
  3259  	codec := rpcClient(t, s1)
  3260  	testutil.WaitForLeader(t, s1.RPC)
  3261  
  3262  	// Set allow unauthenticated (no operator token required)
  3263  	s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true)
  3264  
  3265  	// Create the node
  3266  	node := mock.Node()
  3267  	err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
  3268  	r.NoError(err)
  3269  
  3270  	// Create an alloc with a typical connect service (sidecar) defined
  3271  	alloc := mock.ConnectAlloc()
  3272  	alloc.NodeID = node.ID
  3273  	mutateConnectJob(t, alloc.Job) // appends sidecar task
  3274  	sidecarTask := alloc.Job.TaskGroups[0].Tasks[1]
  3275  
  3276  	err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
  3277  	r.NoError(err)
  3278  
  3279  	request := &structs.DeriveSITokenRequest{
  3280  		NodeID:       node.ID,
  3281  		SecretID:     node.SecretID,
  3282  		AllocID:      alloc.ID,
  3283  		Tasks:        []string{sidecarTask.Name},
  3284  		QueryOptions: structs.QueryOptions{Region: "global"},
  3285  	}
  3286  
  3287  	var response structs.DeriveSITokenResponse
  3288  	err = msgpackrpc.CallWithCodec(codec, "Node.DeriveSIToken", request, &response)
  3289  	r.NoError(err)
  3290  	r.Nil(response.Error)
  3291  
  3292  	// Check the state store and ensure we created a Consul SI Token Accessor
  3293  	ws := memdb.NewWatchSet()
  3294  	accessors, err := state.SITokenAccessorsByNode(ws, node.ID)
  3295  	r.NoError(err)
  3296  	r.Equal(1, len(accessors))                                  // only asked for one
  3297  	r.Equal("connect-proxy-testconnect", accessors[0].TaskName) // set by the mock
  3298  	r.Equal(node.ID, accessors[0].NodeID)                       // should match
  3299  	r.Equal(alloc.ID, accessors[0].AllocID)                     // should match
  3300  	r.True(helper.IsUUID(accessors[0].AccessorID))              // should be set
  3301  	r.Greater(accessors[0].CreateIndex, uint64(3))              // more than 3rd
  3302  }
  3303  
  3304  func TestClientEndpoint_DeriveSIToken_ConsulError(t *testing.T) {
  3305  	t.Parallel()
  3306  	r := require.New(t)
  3307  
  3308  	s1, cleanupS1 := TestServer(t, nil)
  3309  	defer cleanupS1()
  3310  	state := s1.fsm.State()
  3311  	codec := rpcClient(t, s1)
  3312  	testutil.WaitForLeader(t, s1.RPC)
  3313  
  3314  	// Set allow unauthenticated (no operator token required)
  3315  	s1.config.ConsulConfig.AllowUnauthenticated = helper.BoolToPtr(true)
  3316  
  3317  	// Create the node
  3318  	node := mock.Node()
  3319  	err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
  3320  	r.NoError(err)
  3321  
  3322  	// Create an alloc with a typical connect service (sidecar) defined
  3323  	alloc := mock.ConnectAlloc()
  3324  	alloc.NodeID = node.ID
  3325  	mutateConnectJob(t, alloc.Job) // appends sidecar task
  3326  	sidecarTask := alloc.Job.TaskGroups[0].Tasks[1]
  3327  
  3328  	// rejigger the server to use a broken mock consul
  3329  	mockACLsAPI := consul.NewMockACLsAPI(s1.logger)
  3330  	mockACLsAPI.SetError(structs.NewRecoverableError(errors.New("consul recoverable error"), true))
  3331  	m := NewConsulACLsAPI(mockACLsAPI, s1.logger, nil)
  3332  	s1.consulACLs = m
  3333  
  3334  	err = state.UpsertAllocs(structs.MsgTypeTestSetup, 3, []*structs.Allocation{alloc})
  3335  	r.NoError(err)
  3336  
  3337  	request := &structs.DeriveSITokenRequest{
  3338  		NodeID:       node.ID,
  3339  		SecretID:     node.SecretID,
  3340  		AllocID:      alloc.ID,
  3341  		Tasks:        []string{sidecarTask.Name},
  3342  		QueryOptions: structs.QueryOptions{Region: "global"},
  3343  	}
  3344  
  3345  	var response structs.DeriveSITokenResponse
  3346  	err = msgpackrpc.CallWithCodec(codec, "Node.DeriveSIToken", request, &response)
  3347  	r.NoError(err)
  3348  	r.NotNil(response.Error)               // error should be set
  3349  	r.True(response.Error.IsRecoverable()) // and is recoverable
  3350  }
  3351  
  3352  func TestClientEndpoint_EmitEvents(t *testing.T) {
  3353  	t.Parallel()
  3354  	require := require.New(t)
  3355  
  3356  	s1, cleanupS1 := TestServer(t, nil)
  3357  	defer cleanupS1()
  3358  	state := s1.fsm.State()
  3359  	codec := rpcClient(t, s1)
  3360  	testutil.WaitForLeader(t, s1.RPC)
  3361  
  3362  	// create a node that we can register our event to
  3363  	node := mock.Node()
  3364  	err := state.UpsertNode(structs.MsgTypeTestSetup, 2, node)
  3365  	require.Nil(err)
  3366  
  3367  	nodeEvent := &structs.NodeEvent{
  3368  		Message:   "Registration failed",
  3369  		Subsystem: "Server",
  3370  		Timestamp: time.Now(),
  3371  	}
  3372  
  3373  	nodeEvents := map[string][]*structs.NodeEvent{node.ID: {nodeEvent}}
  3374  	req := structs.EmitNodeEventsRequest{
  3375  		NodeEvents:   nodeEvents,
  3376  		WriteRequest: structs.WriteRequest{Region: "global"},
  3377  	}
  3378  
  3379  	var resp structs.GenericResponse
  3380  	err = msgpackrpc.CallWithCodec(codec, "Node.EmitEvents", &req, &resp)
  3381  	require.Nil(err)
  3382  	require.NotEqual(uint64(0), resp.Index)
  3383  
  3384  	// Check for the node in the FSM
  3385  	ws := memdb.NewWatchSet()
  3386  	out, err := state.NodeByID(ws, node.ID)
  3387  	require.Nil(err)
  3388  	require.False(len(out.Events) < 2)
  3389  }