github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/client/alloc_endpoint_test.go (about)

     1  package client
     2  
     3  import (
     4  	"encoding/json"
     5  	"fmt"
     6  	"io"
     7  	"net"
     8  	"runtime"
     9  	"strings"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/hashicorp/go-msgpack/codec"
    14  	"github.com/hashicorp/nomad/acl"
    15  	"github.com/hashicorp/nomad/client/config"
    16  	cstructs "github.com/hashicorp/nomad/client/structs"
    17  	"github.com/hashicorp/nomad/helper/pluginutils/catalog"
    18  	"github.com/hashicorp/nomad/helper/uuid"
    19  	"github.com/hashicorp/nomad/nomad"
    20  	"github.com/hashicorp/nomad/nomad/mock"
    21  	"github.com/hashicorp/nomad/nomad/structs"
    22  	nstructs "github.com/hashicorp/nomad/nomad/structs"
    23  	nconfig "github.com/hashicorp/nomad/nomad/structs/config"
    24  	"github.com/hashicorp/nomad/plugins/drivers"
    25  	"github.com/hashicorp/nomad/testutil"
    26  	"github.com/stretchr/testify/require"
    27  	"golang.org/x/sys/unix"
    28  )
    29  
    30  func TestAllocations_Restart(t *testing.T) {
    31  	t.Parallel()
    32  	require := require.New(t)
    33  	client, cleanup := TestClient(t, nil)
    34  	defer cleanup()
    35  
    36  	a := mock.Alloc()
    37  	a.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
    38  	a.Job.TaskGroups[0].RestartPolicy = &nstructs.RestartPolicy{
    39  		Attempts: 0,
    40  		Mode:     nstructs.RestartPolicyModeFail,
    41  	}
    42  	a.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
    43  		"run_for": "10s",
    44  	}
    45  	require.Nil(client.addAlloc(a, ""))
    46  
    47  	// Try with bad alloc
    48  	req := &nstructs.AllocRestartRequest{}
    49  	var resp nstructs.GenericResponse
    50  	err := client.ClientRPC("Allocations.Restart", &req, &resp)
    51  	require.Error(err)
    52  
    53  	// Try with good alloc
    54  	req.AllocID = a.ID
    55  
    56  	testutil.WaitForResult(func() (bool, error) {
    57  		var resp2 nstructs.GenericResponse
    58  		err := client.ClientRPC("Allocations.Restart", &req, &resp2)
    59  		if err != nil && strings.Contains(err.Error(), "not running") {
    60  			return false, err
    61  		}
    62  
    63  		return true, nil
    64  	}, func(err error) {
    65  		t.Fatalf("err: %v", err)
    66  	})
    67  }
    68  
    69  func TestAllocations_Restart_ACL(t *testing.T) {
    70  	t.Parallel()
    71  	require := require.New(t)
    72  
    73  	server, addr, root, cleanupS := testACLServer(t, nil)
    74  	defer cleanupS()
    75  
    76  	client, cleanupC := TestClient(t, func(c *config.Config) {
    77  		c.Servers = []string{addr}
    78  		c.ACLEnabled = true
    79  	})
    80  	defer cleanupC()
    81  
    82  	job := mock.BatchJob()
    83  	job.TaskGroups[0].Count = 1
    84  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
    85  		"run_for": "20s",
    86  	}
    87  
    88  	// Wait for client to be running job
    89  	alloc := testutil.WaitForRunningWithToken(t, server.RPC, job, root.SecretID)[0]
    90  
    91  	// Try request without a token and expect failure
    92  	{
    93  		req := &nstructs.AllocRestartRequest{}
    94  		req.AllocID = alloc.ID
    95  		var resp nstructs.GenericResponse
    96  		err := client.ClientRPC("Allocations.Restart", &req, &resp)
    97  		require.NotNil(err)
    98  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
    99  	}
   100  
   101  	// Try request with an invalid token and expect failure
   102  	{
   103  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "invalid", mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{}))
   104  		req := &nstructs.AllocRestartRequest{}
   105  		req.AllocID = alloc.ID
   106  		req.AuthToken = token.SecretID
   107  
   108  		var resp nstructs.GenericResponse
   109  		err := client.ClientRPC("Allocations.Restart", &req, &resp)
   110  
   111  		require.NotNil(err)
   112  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   113  	}
   114  
   115  	// Try request with a valid token
   116  	{
   117  		policyHCL := mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{acl.NamespaceCapabilityAllocLifecycle})
   118  		token := mock.CreatePolicyAndToken(t, server.State(), 1007, "valid", policyHCL)
   119  		require.NotNil(token)
   120  		req := &nstructs.AllocRestartRequest{}
   121  		req.AllocID = alloc.ID
   122  		req.AuthToken = token.SecretID
   123  		req.Namespace = nstructs.DefaultNamespace
   124  		var resp nstructs.GenericResponse
   125  		err := client.ClientRPC("Allocations.Restart", &req, &resp)
   126  		require.NoError(err)
   127  		//require.True(nstructs.IsErrUnknownAllocation(err), "Expected unknown alloc, found: %v", err)
   128  	}
   129  
   130  	// Try request with a management token
   131  	{
   132  		req := &nstructs.AllocRestartRequest{}
   133  		req.AllocID = alloc.ID
   134  		req.AuthToken = root.SecretID
   135  		var resp nstructs.GenericResponse
   136  		err := client.ClientRPC("Allocations.Restart", &req, &resp)
   137  		// Depending on how quickly the alloc restarts there may be no
   138  		// error *or* a task not running error; either is fine.
   139  		if err != nil {
   140  			require.Contains(err.Error(), "Task not running", err)
   141  		}
   142  	}
   143  }
   144  
   145  func TestAllocations_GarbageCollectAll(t *testing.T) {
   146  	t.Parallel()
   147  	require := require.New(t)
   148  	client, cleanup := TestClient(t, nil)
   149  	defer cleanup()
   150  
   151  	req := &nstructs.NodeSpecificRequest{}
   152  	var resp nstructs.GenericResponse
   153  	require.Nil(client.ClientRPC("Allocations.GarbageCollectAll", &req, &resp))
   154  }
   155  
   156  func TestAllocations_GarbageCollectAll_ACL(t *testing.T) {
   157  	t.Parallel()
   158  	require := require.New(t)
   159  
   160  	server, addr, root, cleanupS := testACLServer(t, nil)
   161  	defer cleanupS()
   162  
   163  	client, cleanupC := TestClient(t, func(c *config.Config) {
   164  		c.Servers = []string{addr}
   165  		c.ACLEnabled = true
   166  	})
   167  	defer cleanupC()
   168  
   169  	// Try request without a token and expect failure
   170  	{
   171  		req := &nstructs.NodeSpecificRequest{}
   172  		var resp nstructs.GenericResponse
   173  		err := client.ClientRPC("Allocations.GarbageCollectAll", &req, &resp)
   174  		require.NotNil(err)
   175  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   176  	}
   177  
   178  	// Try request with an invalid token and expect failure
   179  	{
   180  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "invalid", mock.NodePolicy(acl.PolicyDeny))
   181  		req := &nstructs.NodeSpecificRequest{}
   182  		req.AuthToken = token.SecretID
   183  
   184  		var resp nstructs.GenericResponse
   185  		err := client.ClientRPC("Allocations.GarbageCollectAll", &req, &resp)
   186  
   187  		require.NotNil(err)
   188  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   189  	}
   190  
   191  	// Try request with a valid token
   192  	{
   193  		token := mock.CreatePolicyAndToken(t, server.State(), 1007, "valid", mock.NodePolicy(acl.PolicyWrite))
   194  		req := &nstructs.NodeSpecificRequest{}
   195  		req.AuthToken = token.SecretID
   196  		var resp nstructs.GenericResponse
   197  		require.Nil(client.ClientRPC("Allocations.GarbageCollectAll", &req, &resp))
   198  	}
   199  
   200  	// Try request with a management token
   201  	{
   202  		req := &nstructs.NodeSpecificRequest{}
   203  		req.AuthToken = root.SecretID
   204  		var resp nstructs.GenericResponse
   205  		require.Nil(client.ClientRPC("Allocations.GarbageCollectAll", &req, &resp))
   206  	}
   207  }
   208  
   209  func TestAllocations_GarbageCollect(t *testing.T) {
   210  	t.Parallel()
   211  	require := require.New(t)
   212  	client, cleanup := TestClient(t, func(c *config.Config) {
   213  		c.GCDiskUsageThreshold = 100.0
   214  	})
   215  	defer cleanup()
   216  
   217  	a := mock.Alloc()
   218  	a.Job.TaskGroups[0].Tasks[0].Driver = "mock_driver"
   219  	rp := &nstructs.RestartPolicy{
   220  		Attempts: 0,
   221  		Mode:     nstructs.RestartPolicyModeFail,
   222  	}
   223  	a.Job.TaskGroups[0].RestartPolicy = rp
   224  	a.Job.TaskGroups[0].Tasks[0].RestartPolicy = rp
   225  	a.Job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   226  		"run_for": "10ms",
   227  	}
   228  	require.Nil(client.addAlloc(a, ""))
   229  
   230  	// Try with bad alloc
   231  	req := &nstructs.AllocSpecificRequest{}
   232  	var resp nstructs.GenericResponse
   233  	err := client.ClientRPC("Allocations.GarbageCollect", &req, &resp)
   234  	require.NotNil(err)
   235  
   236  	// Try with good alloc
   237  	req.AllocID = a.ID
   238  	testutil.WaitForResult(func() (bool, error) {
   239  		// Check if has been removed first
   240  		if ar, ok := client.allocs[a.ID]; !ok || ar.IsDestroyed() {
   241  			return true, nil
   242  		}
   243  
   244  		var resp2 nstructs.GenericResponse
   245  		err := client.ClientRPC("Allocations.GarbageCollect", &req, &resp2)
   246  		return err == nil, err
   247  	}, func(err error) {
   248  		t.Fatalf("err: %v", err)
   249  	})
   250  }
   251  
   252  func TestAllocations_GarbageCollect_ACL(t *testing.T) {
   253  	t.Parallel()
   254  	require := require.New(t)
   255  
   256  	server, addr, root, cleanupS := testACLServer(t, nil)
   257  	defer cleanupS()
   258  
   259  	client, cleanupC := TestClient(t, func(c *config.Config) {
   260  		c.Servers = []string{addr}
   261  		c.ACLEnabled = true
   262  	})
   263  	defer cleanupC()
   264  
   265  	job := mock.BatchJob()
   266  	job.TaskGroups[0].Count = 1
   267  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   268  		"run_for": "20s",
   269  	}
   270  
   271  	noSuchAllocErr := fmt.Errorf("No such allocation on client or allocation not eligible for GC")
   272  
   273  	// Wait for client to be running job
   274  	alloc := testutil.WaitForRunningWithToken(t, server.RPC, job, root.SecretID)[0]
   275  
   276  	// Try request without a token and expect failure
   277  	{
   278  		req := &nstructs.AllocSpecificRequest{}
   279  		req.AllocID = alloc.ID
   280  		var resp nstructs.GenericResponse
   281  		err := client.ClientRPC("Allocations.GarbageCollect", &req, &resp)
   282  		require.NotNil(err)
   283  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   284  	}
   285  
   286  	// Try request with an invalid token and expect failure
   287  	{
   288  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "invalid", mock.NodePolicy(acl.PolicyDeny))
   289  		req := &nstructs.AllocSpecificRequest{}
   290  		req.AllocID = alloc.ID
   291  		req.AuthToken = token.SecretID
   292  
   293  		var resp nstructs.GenericResponse
   294  		err := client.ClientRPC("Allocations.GarbageCollect", &req, &resp)
   295  
   296  		require.NotNil(err)
   297  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   298  	}
   299  
   300  	// Try request with a valid token
   301  	{
   302  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "test-valid",
   303  			mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{acl.NamespaceCapabilitySubmitJob}))
   304  		req := &nstructs.AllocSpecificRequest{}
   305  		req.AllocID = alloc.ID
   306  		req.AuthToken = token.SecretID
   307  		req.Namespace = nstructs.DefaultNamespace
   308  
   309  		var resp nstructs.GenericResponse
   310  		err := client.ClientRPC("Allocations.GarbageCollect", &req, &resp)
   311  		require.Error(err, noSuchAllocErr)
   312  	}
   313  
   314  	// Try request with a management token
   315  	{
   316  		req := &nstructs.AllocSpecificRequest{}
   317  		req.AuthToken = root.SecretID
   318  
   319  		var resp nstructs.GenericResponse
   320  		err := client.ClientRPC("Allocations.GarbageCollect", &req, &resp)
   321  		require.Error(err, noSuchAllocErr)
   322  	}
   323  }
   324  
   325  func TestAllocations_Signal(t *testing.T) {
   326  	t.Parallel()
   327  
   328  	client, cleanup := TestClient(t, nil)
   329  	defer cleanup()
   330  
   331  	a := mock.Alloc()
   332  	require.Nil(t, client.addAlloc(a, ""))
   333  
   334  	// Try with bad alloc
   335  	req := &nstructs.AllocSignalRequest{}
   336  	var resp nstructs.GenericResponse
   337  	err := client.ClientRPC("Allocations.Signal", &req, &resp)
   338  	require.NotNil(t, err)
   339  	require.True(t, nstructs.IsErrUnknownAllocation(err))
   340  
   341  	// Try with good alloc
   342  	req.AllocID = a.ID
   343  
   344  	var resp2 nstructs.GenericResponse
   345  	err = client.ClientRPC("Allocations.Signal", &req, &resp2)
   346  
   347  	require.Error(t, err, "Expected error, got: %s, resp: %#+v", err, resp2)
   348  	require.Contains(t, err.Error(), "Failed to signal task: web, err: Task not running")
   349  }
   350  
   351  func TestAllocations_Signal_ACL(t *testing.T) {
   352  	t.Parallel()
   353  	require := require.New(t)
   354  
   355  	server, addr, root, cleanupS := testACLServer(t, nil)
   356  	defer cleanupS()
   357  
   358  	client, cleanupC := TestClient(t, func(c *config.Config) {
   359  		c.Servers = []string{addr}
   360  		c.ACLEnabled = true
   361  	})
   362  	defer cleanupC()
   363  
   364  	job := mock.BatchJob()
   365  	job.TaskGroups[0].Count = 1
   366  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   367  		"run_for": "20s",
   368  	}
   369  
   370  	// Wait for client to be running job
   371  	alloc := testutil.WaitForRunningWithToken(t, server.RPC, job, root.SecretID)[0]
   372  
   373  	// Try request without a token and expect failure
   374  	{
   375  		req := &nstructs.AllocSignalRequest{}
   376  		req.AllocID = alloc.ID
   377  		var resp nstructs.GenericResponse
   378  		err := client.ClientRPC("Allocations.Signal", &req, &resp)
   379  		require.NotNil(err)
   380  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   381  	}
   382  
   383  	// Try request with an invalid token and expect failure
   384  	{
   385  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "invalid", mock.NodePolicy(acl.PolicyDeny))
   386  		req := &nstructs.AllocSignalRequest{}
   387  		req.AllocID = alloc.ID
   388  		req.AuthToken = token.SecretID
   389  
   390  		var resp nstructs.GenericResponse
   391  		err := client.ClientRPC("Allocations.Signal", &req, &resp)
   392  
   393  		require.NotNil(err)
   394  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   395  	}
   396  
   397  	// Try request with a valid token
   398  	{
   399  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "test-valid",
   400  			mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{acl.NamespaceCapabilityAllocLifecycle}))
   401  		req := &nstructs.AllocSignalRequest{}
   402  		req.AllocID = alloc.ID
   403  		req.AuthToken = token.SecretID
   404  		req.Namespace = nstructs.DefaultNamespace
   405  
   406  		var resp nstructs.GenericResponse
   407  		err := client.ClientRPC("Allocations.Signal", &req, &resp)
   408  		require.NoError(err)
   409  	}
   410  
   411  	// Try request with a management token
   412  	{
   413  		req := &nstructs.AllocSignalRequest{}
   414  		req.AllocID = alloc.ID
   415  		req.AuthToken = root.SecretID
   416  
   417  		var resp nstructs.GenericResponse
   418  		err := client.ClientRPC("Allocations.Signal", &req, &resp)
   419  		require.NoError(err)
   420  	}
   421  }
   422  
   423  func TestAllocations_Stats(t *testing.T) {
   424  	t.Parallel()
   425  	require := require.New(t)
   426  	client, cleanup := TestClient(t, nil)
   427  	defer cleanup()
   428  
   429  	a := mock.Alloc()
   430  	require.Nil(client.addAlloc(a, ""))
   431  
   432  	// Try with bad alloc
   433  	req := &cstructs.AllocStatsRequest{}
   434  	var resp cstructs.AllocStatsResponse
   435  	err := client.ClientRPC("Allocations.Stats", &req, &resp)
   436  	require.NotNil(err)
   437  
   438  	// Try with good alloc
   439  	req.AllocID = a.ID
   440  	testutil.WaitForResult(func() (bool, error) {
   441  		var resp2 cstructs.AllocStatsResponse
   442  		err := client.ClientRPC("Allocations.Stats", &req, &resp2)
   443  		if err != nil {
   444  			return false, err
   445  		}
   446  		if resp2.Stats == nil {
   447  			return false, fmt.Errorf("invalid stats object")
   448  		}
   449  
   450  		return true, nil
   451  	}, func(err error) {
   452  		t.Fatalf("err: %v", err)
   453  	})
   454  }
   455  
   456  func TestAllocations_Stats_ACL(t *testing.T) {
   457  	t.Parallel()
   458  	require := require.New(t)
   459  
   460  	server, addr, root, cleanupS := testACLServer(t, nil)
   461  	defer cleanupS()
   462  
   463  	client, cleanupC := TestClient(t, func(c *config.Config) {
   464  		c.Servers = []string{addr}
   465  		c.ACLEnabled = true
   466  	})
   467  	defer cleanupC()
   468  
   469  	job := mock.BatchJob()
   470  	job.TaskGroups[0].Count = 1
   471  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   472  		"run_for": "20s",
   473  	}
   474  
   475  	// Wait for client to be running job
   476  	alloc := testutil.WaitForRunningWithToken(t, server.RPC, job, root.SecretID)[0]
   477  
   478  	// Try request without a token and expect failure
   479  	{
   480  		req := &cstructs.AllocStatsRequest{}
   481  		req.AllocID = alloc.ID
   482  		var resp cstructs.AllocStatsResponse
   483  		err := client.ClientRPC("Allocations.Stats", &req, &resp)
   484  		require.NotNil(err)
   485  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   486  	}
   487  
   488  	// Try request with an invalid token and expect failure
   489  	{
   490  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "invalid", mock.NodePolicy(acl.PolicyDeny))
   491  		req := &cstructs.AllocStatsRequest{}
   492  		req.AllocID = alloc.ID
   493  		req.AuthToken = token.SecretID
   494  
   495  		var resp cstructs.AllocStatsResponse
   496  		err := client.ClientRPC("Allocations.Stats", &req, &resp)
   497  
   498  		require.NotNil(err)
   499  		require.EqualError(err, nstructs.ErrPermissionDenied.Error())
   500  	}
   501  
   502  	// Try request with a valid token
   503  	{
   504  		token := mock.CreatePolicyAndToken(t, server.State(), 1005, "test-valid",
   505  			mock.NamespacePolicy(nstructs.DefaultNamespace, "", []string{acl.NamespaceCapabilityReadJob}))
   506  		req := &cstructs.AllocStatsRequest{}
   507  		req.AllocID = alloc.ID
   508  		req.AuthToken = token.SecretID
   509  		req.Namespace = nstructs.DefaultNamespace
   510  
   511  		var resp cstructs.AllocStatsResponse
   512  		err := client.ClientRPC("Allocations.Stats", &req, &resp)
   513  		require.NoError(err)
   514  	}
   515  
   516  	// Try request with a management token
   517  	{
   518  		req := &cstructs.AllocStatsRequest{}
   519  		req.AllocID = alloc.ID
   520  		req.AuthToken = root.SecretID
   521  
   522  		var resp cstructs.AllocStatsResponse
   523  		err := client.ClientRPC("Allocations.Stats", &req, &resp)
   524  		require.NoError(err)
   525  	}
   526  }
   527  
   528  func TestAlloc_ExecStreaming(t *testing.T) {
   529  	t.Parallel()
   530  	require := require.New(t)
   531  
   532  	// Start a server and client
   533  	s, cleanupS := nomad.TestServer(t, nil)
   534  	defer cleanupS()
   535  	testutil.WaitForLeader(t, s.RPC)
   536  
   537  	c, cleanupC := TestClient(t, func(c *config.Config) {
   538  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   539  	})
   540  	defer cleanupC()
   541  
   542  	expectedStdout := "Hello from the other side\n"
   543  	expectedStderr := "Hello from the other side\n"
   544  	job := mock.BatchJob()
   545  	job.TaskGroups[0].Count = 1
   546  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   547  		"run_for": "20s",
   548  		"exec_command": map[string]interface{}{
   549  			"run_for":       "1ms",
   550  			"stdout_string": expectedStdout,
   551  			"stderr_string": expectedStderr,
   552  			"exit_code":     3,
   553  		},
   554  	}
   555  
   556  	// Wait for client to be running job
   557  	testutil.WaitForRunning(t, s.RPC, job)
   558  
   559  	// Get the allocation ID
   560  	args := nstructs.AllocListRequest{}
   561  	args.Region = "global"
   562  	resp := nstructs.AllocListResponse{}
   563  	require.NoError(s.RPC("Alloc.List", &args, &resp))
   564  	require.Len(resp.Allocations, 1)
   565  	allocID := resp.Allocations[0].ID
   566  
   567  	// Make the request
   568  	req := &cstructs.AllocExecRequest{
   569  		AllocID:      allocID,
   570  		Task:         job.TaskGroups[0].Tasks[0].Name,
   571  		Tty:          true,
   572  		Cmd:          []string{"placeholder command"},
   573  		QueryOptions: nstructs.QueryOptions{Region: "global"},
   574  	}
   575  
   576  	// Get the handler
   577  	handler, err := c.StreamingRpcHandler("Allocations.Exec")
   578  	require.Nil(err)
   579  
   580  	// Create a pipe
   581  	p1, p2 := net.Pipe()
   582  	defer p1.Close()
   583  	defer p2.Close()
   584  
   585  	errCh := make(chan error)
   586  	frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
   587  
   588  	// Start the handler
   589  	go handler(p2)
   590  	go decodeFrames(t, p1, frames, errCh)
   591  
   592  	// Send the request
   593  	encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
   594  	require.Nil(encoder.Encode(req))
   595  
   596  	timeout := time.After(3 * time.Second)
   597  
   598  	exitCode := -1
   599  	receivedStdout := ""
   600  	receivedStderr := ""
   601  
   602  OUTER:
   603  	for {
   604  		select {
   605  		case <-timeout:
   606  			// time out report
   607  			require.Equal(expectedStdout, receivedStderr, "didn't receive expected stdout")
   608  			require.Equal(expectedStderr, receivedStderr, "didn't receive expected stderr")
   609  			require.Equal(3, exitCode, "failed to get exit code")
   610  			require.FailNow("timed out")
   611  		case err := <-errCh:
   612  			require.NoError(err)
   613  		case f := <-frames:
   614  			switch {
   615  			case f.Stdout != nil && len(f.Stdout.Data) != 0:
   616  				receivedStdout += string(f.Stdout.Data)
   617  			case f.Stderr != nil && len(f.Stderr.Data) != 0:
   618  				receivedStderr += string(f.Stderr.Data)
   619  			case f.Exited && f.Result != nil:
   620  				exitCode = int(f.Result.ExitCode)
   621  			default:
   622  				t.Logf("received unrelevant frame: %v", f)
   623  			}
   624  
   625  			if expectedStdout == receivedStdout && expectedStderr == receivedStderr && exitCode == 3 {
   626  				break OUTER
   627  			}
   628  		}
   629  	}
   630  }
   631  
   632  func TestAlloc_ExecStreaming_NoAllocation(t *testing.T) {
   633  	t.Parallel()
   634  	require := require.New(t)
   635  
   636  	// Start a server and client
   637  	s, cleanupS := nomad.TestServer(t, nil)
   638  	defer cleanupS()
   639  	testutil.WaitForLeader(t, s.RPC)
   640  
   641  	c, cleanupC := TestClient(t, func(c *config.Config) {
   642  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   643  	})
   644  	defer cleanupC()
   645  
   646  	// Make the request
   647  	req := &cstructs.AllocExecRequest{
   648  		AllocID:      uuid.Generate(),
   649  		Task:         "testtask",
   650  		Tty:          true,
   651  		Cmd:          []string{"placeholder command"},
   652  		QueryOptions: nstructs.QueryOptions{Region: "global"},
   653  	}
   654  
   655  	// Get the handler
   656  	handler, err := c.StreamingRpcHandler("Allocations.Exec")
   657  	require.Nil(err)
   658  
   659  	// Create a pipe
   660  	p1, p2 := net.Pipe()
   661  	defer p1.Close()
   662  	defer p2.Close()
   663  
   664  	errCh := make(chan error)
   665  	frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
   666  
   667  	// Start the handler
   668  	go handler(p2)
   669  	go decodeFrames(t, p1, frames, errCh)
   670  
   671  	// Send the request
   672  	encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
   673  	require.Nil(encoder.Encode(req))
   674  
   675  	timeout := time.After(3 * time.Second)
   676  
   677  	select {
   678  	case <-timeout:
   679  		require.FailNow("timed out")
   680  	case err := <-errCh:
   681  		require.True(nstructs.IsErrUnknownAllocation(err), "expected no allocation error but found: %v", err)
   682  	case f := <-frames:
   683  		require.Fail("received unexpected frame", "frame: %#v", f)
   684  	}
   685  }
   686  
   687  func TestAlloc_ExecStreaming_DisableRemoteExec(t *testing.T) {
   688  	t.Parallel()
   689  	require := require.New(t)
   690  
   691  	// Start a server and client
   692  	s, cleanupS := nomad.TestServer(t, nil)
   693  	defer cleanupS()
   694  	testutil.WaitForLeader(t, s.RPC)
   695  
   696  	c, cleanupC := TestClient(t, func(c *config.Config) {
   697  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   698  		c.DisableRemoteExec = true
   699  	})
   700  	defer cleanupC()
   701  
   702  	// Make the request
   703  	req := &cstructs.AllocExecRequest{
   704  		AllocID:      uuid.Generate(),
   705  		Task:         "testtask",
   706  		Tty:          true,
   707  		Cmd:          []string{"placeholder command"},
   708  		QueryOptions: nstructs.QueryOptions{Region: "global"},
   709  	}
   710  
   711  	// Get the handler
   712  	handler, err := c.StreamingRpcHandler("Allocations.Exec")
   713  	require.Nil(err)
   714  
   715  	// Create a pipe
   716  	p1, p2 := net.Pipe()
   717  	defer p1.Close()
   718  	defer p2.Close()
   719  
   720  	errCh := make(chan error)
   721  	frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
   722  
   723  	// Start the handler
   724  	go handler(p2)
   725  	go decodeFrames(t, p1, frames, errCh)
   726  
   727  	// Send the request
   728  	encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
   729  	require.Nil(encoder.Encode(req))
   730  
   731  	timeout := time.After(3 * time.Second)
   732  
   733  	select {
   734  	case <-timeout:
   735  		require.FailNow("timed out")
   736  	case err := <-errCh:
   737  		require.True(nstructs.IsErrPermissionDenied(err), "expected permission denied error but found: %v", err)
   738  	case f := <-frames:
   739  		require.Fail("received unexpected frame", "frame: %#v", f)
   740  	}
   741  }
   742  
   743  func TestAlloc_ExecStreaming_ACL_Basic(t *testing.T) {
   744  	t.Parallel()
   745  
   746  	// Start a server and client
   747  	s, root, cleanupS := nomad.TestACLServer(t, nil)
   748  	defer cleanupS()
   749  	testutil.WaitForLeader(t, s.RPC)
   750  
   751  	client, cleanupC := TestClient(t, func(c *config.Config) {
   752  		c.ACLEnabled = true
   753  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   754  	})
   755  	defer cleanupC()
   756  
   757  	// Create a bad token
   758  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
   759  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
   760  
   761  	policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "",
   762  		[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityReadFS})
   763  	tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
   764  
   765  	job := mock.BatchJob()
   766  	job.TaskGroups[0].Count = 1
   767  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   768  		"run_for": "20s",
   769  	}
   770  
   771  	// Wait for client to be running job
   772  	alloc := testutil.WaitForRunningWithToken(t, s.RPC, job, root.SecretID)[0]
   773  
   774  	cases := []struct {
   775  		Name          string
   776  		Token         string
   777  		ExpectedError string
   778  	}{
   779  		{
   780  			Name:          "bad token",
   781  			Token:         tokenBad.SecretID,
   782  			ExpectedError: structs.ErrPermissionDenied.Error(),
   783  		},
   784  		{
   785  			Name:          "good token",
   786  			Token:         tokenGood.SecretID,
   787  			ExpectedError: "task not found",
   788  		},
   789  		{
   790  			Name:          "root token",
   791  			Token:         root.SecretID,
   792  			ExpectedError: "task not found",
   793  		},
   794  	}
   795  
   796  	for _, c := range cases {
   797  		t.Run(c.Name, func(t *testing.T) {
   798  
   799  			// Make the request
   800  			req := &cstructs.AllocExecRequest{
   801  				AllocID: alloc.ID,
   802  				Task:    "testtask",
   803  				Tty:     true,
   804  				Cmd:     []string{"placeholder command"},
   805  				QueryOptions: nstructs.QueryOptions{
   806  					Region:    "global",
   807  					AuthToken: c.Token,
   808  					Namespace: nstructs.DefaultNamespace,
   809  				},
   810  			}
   811  
   812  			// Get the handler
   813  			handler, err := client.StreamingRpcHandler("Allocations.Exec")
   814  			require.Nil(t, err)
   815  
   816  			// Create a pipe
   817  			p1, p2 := net.Pipe()
   818  			defer p1.Close()
   819  			defer p2.Close()
   820  
   821  			errCh := make(chan error)
   822  			frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
   823  
   824  			// Start the handler
   825  			go handler(p2)
   826  			go decodeFrames(t, p1, frames, errCh)
   827  
   828  			// Send the request
   829  			encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
   830  			require.Nil(t, encoder.Encode(req))
   831  
   832  			select {
   833  			case <-time.After(3 * time.Second):
   834  				require.FailNow(t, "timed out")
   835  			case err := <-errCh:
   836  				require.Contains(t, err.Error(), c.ExpectedError)
   837  			case f := <-frames:
   838  				require.Fail(t, "received unexpected frame", "frame: %#v", f)
   839  			}
   840  		})
   841  	}
   842  }
   843  
   844  // TestAlloc_ExecStreaming_ACL_WithIsolation_Image asserts that token only needs
   845  // alloc-exec acl policy when image isolation is used
   846  func TestAlloc_ExecStreaming_ACL_WithIsolation_Image(t *testing.T) {
   847  	t.Parallel()
   848  	isolation := drivers.FSIsolationImage
   849  
   850  	// Start a server and client
   851  	s, root, cleanupS := nomad.TestACLServer(t, nil)
   852  	defer cleanupS()
   853  	testutil.WaitForLeader(t, s.RPC)
   854  
   855  	client, cleanupC := TestClient(t, func(c *config.Config) {
   856  		c.ACLEnabled = true
   857  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   858  
   859  		pluginConfig := []*nconfig.PluginConfig{
   860  			{
   861  				Name: "mock_driver",
   862  				Config: map[string]interface{}{
   863  					"fs_isolation": string(isolation),
   864  				},
   865  			},
   866  		}
   867  
   868  		c.PluginLoader = catalog.TestPluginLoaderWithOptions(t, "", map[string]string{}, pluginConfig)
   869  	})
   870  	defer cleanupC()
   871  
   872  	// Create a bad token
   873  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
   874  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
   875  
   876  	policyAllocExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
   877  		[]string{acl.NamespaceCapabilityAllocExec})
   878  	tokenAllocExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyAllocExec)
   879  
   880  	policyAllocNodeExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
   881  		[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityAllocNodeExec})
   882  	tokenAllocNodeExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyAllocNodeExec)
   883  
   884  	job := mock.BatchJob()
   885  	job.TaskGroups[0].Count = 1
   886  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   887  		"run_for": "20s",
   888  		"exec_command": map[string]interface{}{
   889  			"run_for":       "1ms",
   890  			"stdout_string": "some output",
   891  		},
   892  	}
   893  
   894  	// Wait for client to be running job
   895  	testutil.WaitForRunningWithToken(t, s.RPC, job, root.SecretID)
   896  
   897  	// Get the allocation ID
   898  	args := nstructs.AllocListRequest{}
   899  	args.Region = "global"
   900  	args.AuthToken = root.SecretID
   901  	args.Namespace = nstructs.DefaultNamespace
   902  	resp := nstructs.AllocListResponse{}
   903  	require.NoError(t, s.RPC("Alloc.List", &args, &resp))
   904  	require.Len(t, resp.Allocations, 1)
   905  	allocID := resp.Allocations[0].ID
   906  
   907  	cases := []struct {
   908  		Name          string
   909  		Token         string
   910  		ExpectedError string
   911  	}{
   912  		{
   913  			Name:          "bad token",
   914  			Token:         tokenBad.SecretID,
   915  			ExpectedError: structs.ErrPermissionDenied.Error(),
   916  		},
   917  		{
   918  			Name:          "alloc-exec token",
   919  			Token:         tokenAllocExec.SecretID,
   920  			ExpectedError: "",
   921  		},
   922  		{
   923  			Name:          "alloc-node-exec token",
   924  			Token:         tokenAllocNodeExec.SecretID,
   925  			ExpectedError: "",
   926  		},
   927  		{
   928  			Name:          "root token",
   929  			Token:         root.SecretID,
   930  			ExpectedError: "",
   931  		},
   932  	}
   933  
   934  	for _, c := range cases {
   935  		t.Run(c.Name, func(t *testing.T) {
   936  
   937  			// Make the request
   938  			req := &cstructs.AllocExecRequest{
   939  				AllocID: allocID,
   940  				Task:    job.TaskGroups[0].Tasks[0].Name,
   941  				Tty:     true,
   942  				Cmd:     []string{"placeholder command"},
   943  				QueryOptions: nstructs.QueryOptions{
   944  					Region:    "global",
   945  					AuthToken: c.Token,
   946  					Namespace: nstructs.DefaultNamespace,
   947  				},
   948  			}
   949  
   950  			// Get the handler
   951  			handler, err := client.StreamingRpcHandler("Allocations.Exec")
   952  			require.Nil(t, err)
   953  
   954  			// Create a pipe
   955  			p1, p2 := net.Pipe()
   956  			defer p1.Close()
   957  			defer p2.Close()
   958  
   959  			errCh := make(chan error)
   960  			frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
   961  
   962  			// Start the handler
   963  			go handler(p2)
   964  			go decodeFrames(t, p1, frames, errCh)
   965  
   966  			// Send the request
   967  			encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
   968  			require.Nil(t, encoder.Encode(req))
   969  
   970  			select {
   971  			case <-time.After(3 * time.Second):
   972  			case err := <-errCh:
   973  				if c.ExpectedError == "" {
   974  					require.NoError(t, err)
   975  				} else {
   976  					require.Contains(t, err.Error(), c.ExpectedError)
   977  				}
   978  			case f := <-frames:
   979  				// we are good if we don't expect an error
   980  				if c.ExpectedError != "" {
   981  					require.Fail(t, "unexpected frame", "frame: %#v", f)
   982  				}
   983  			}
   984  		})
   985  	}
   986  }
   987  
   988  // TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot asserts that token only needs
   989  // alloc-exec acl policy when chroot isolation is used
   990  func TestAlloc_ExecStreaming_ACL_WithIsolation_Chroot(t *testing.T) {
   991  	t.Parallel()
   992  
   993  	if runtime.GOOS != "linux" || unix.Geteuid() != 0 {
   994  		t.Skip("chroot isolation requires linux root")
   995  	}
   996  
   997  	isolation := drivers.FSIsolationChroot
   998  
   999  	// Start a server and client
  1000  	s, root, cleanupS := nomad.TestACLServer(t, nil)
  1001  	defer cleanupS()
  1002  	testutil.WaitForLeader(t, s.RPC)
  1003  
  1004  	client, cleanup := TestClient(t, func(c *config.Config) {
  1005  		c.ACLEnabled = true
  1006  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
  1007  
  1008  		pluginConfig := []*nconfig.PluginConfig{
  1009  			{
  1010  				Name: "mock_driver",
  1011  				Config: map[string]interface{}{
  1012  					"fs_isolation": string(isolation),
  1013  				},
  1014  			},
  1015  		}
  1016  
  1017  		c.PluginLoader = catalog.TestPluginLoaderWithOptions(t, "", map[string]string{}, pluginConfig)
  1018  	})
  1019  	defer cleanup()
  1020  
  1021  	// Create a bad token
  1022  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
  1023  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
  1024  
  1025  	policyAllocExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
  1026  		[]string{acl.NamespaceCapabilityAllocExec})
  1027  	tokenAllocExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-exec", policyAllocExec)
  1028  
  1029  	policyAllocNodeExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
  1030  		[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityAllocNodeExec})
  1031  	tokenAllocNodeExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-node-exec", policyAllocNodeExec)
  1032  
  1033  	job := mock.BatchJob()
  1034  	job.TaskGroups[0].Count = 1
  1035  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1036  		"run_for": "20s",
  1037  		"exec_command": map[string]interface{}{
  1038  			"run_for":       "1ms",
  1039  			"stdout_string": "some output",
  1040  		},
  1041  	}
  1042  
  1043  	// Wait for client to be running job
  1044  	testutil.WaitForRunningWithToken(t, s.RPC, job, root.SecretID)
  1045  
  1046  	// Get the allocation ID
  1047  	args := nstructs.AllocListRequest{}
  1048  	args.Region = "global"
  1049  	args.AuthToken = root.SecretID
  1050  	args.Namespace = nstructs.DefaultNamespace
  1051  	resp := nstructs.AllocListResponse{}
  1052  	require.NoError(t, s.RPC("Alloc.List", &args, &resp))
  1053  	require.Len(t, resp.Allocations, 1)
  1054  	allocID := resp.Allocations[0].ID
  1055  
  1056  	cases := []struct {
  1057  		Name          string
  1058  		Token         string
  1059  		ExpectedError string
  1060  	}{
  1061  		{
  1062  			Name:          "bad token",
  1063  			Token:         tokenBad.SecretID,
  1064  			ExpectedError: structs.ErrPermissionDenied.Error(),
  1065  		},
  1066  		{
  1067  			Name:          "alloc-exec token",
  1068  			Token:         tokenAllocExec.SecretID,
  1069  			ExpectedError: "",
  1070  		},
  1071  		{
  1072  			Name:          "alloc-node-exec token",
  1073  			Token:         tokenAllocNodeExec.SecretID,
  1074  			ExpectedError: "",
  1075  		},
  1076  		{
  1077  			Name:          "root token",
  1078  			Token:         root.SecretID,
  1079  			ExpectedError: "",
  1080  		},
  1081  	}
  1082  
  1083  	for _, c := range cases {
  1084  		t.Run(c.Name, func(t *testing.T) {
  1085  
  1086  			// Make the request
  1087  			req := &cstructs.AllocExecRequest{
  1088  				AllocID: allocID,
  1089  				Task:    job.TaskGroups[0].Tasks[0].Name,
  1090  				Tty:     true,
  1091  				Cmd:     []string{"placeholder command"},
  1092  				QueryOptions: nstructs.QueryOptions{
  1093  					Region:    "global",
  1094  					AuthToken: c.Token,
  1095  					Namespace: nstructs.DefaultNamespace,
  1096  				},
  1097  			}
  1098  
  1099  			// Get the handler
  1100  			handler, err := client.StreamingRpcHandler("Allocations.Exec")
  1101  			require.Nil(t, err)
  1102  
  1103  			// Create a pipe
  1104  			p1, p2 := net.Pipe()
  1105  			defer p1.Close()
  1106  			defer p2.Close()
  1107  
  1108  			errCh := make(chan error)
  1109  			frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
  1110  
  1111  			// Start the handler
  1112  			go handler(p2)
  1113  			go decodeFrames(t, p1, frames, errCh)
  1114  
  1115  			// Send the request
  1116  			encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
  1117  			require.Nil(t, encoder.Encode(req))
  1118  
  1119  			select {
  1120  			case <-time.After(3 * time.Second):
  1121  			case err := <-errCh:
  1122  				if c.ExpectedError == "" {
  1123  					require.NoError(t, err)
  1124  				} else {
  1125  					require.Contains(t, err.Error(), c.ExpectedError)
  1126  				}
  1127  			case f := <-frames:
  1128  				// we are good if we don't expect an error
  1129  				if c.ExpectedError != "" {
  1130  					require.Fail(t, "unexpected frame", "frame: %#v", f)
  1131  				}
  1132  			}
  1133  		})
  1134  	}
  1135  }
  1136  
  1137  // TestAlloc_ExecStreaming_ACL_WithIsolation_None asserts that token needs
  1138  // alloc-node-exec acl policy as well when no isolation is used
  1139  func TestAlloc_ExecStreaming_ACL_WithIsolation_None(t *testing.T) {
  1140  	t.Parallel()
  1141  	isolation := drivers.FSIsolationNone
  1142  
  1143  	// Start a server and client
  1144  	s, root, cleanupS := nomad.TestACLServer(t, nil)
  1145  	defer cleanupS()
  1146  	testutil.WaitForLeader(t, s.RPC)
  1147  
  1148  	client, cleanup := TestClient(t, func(c *config.Config) {
  1149  		c.ACLEnabled = true
  1150  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
  1151  
  1152  		pluginConfig := []*nconfig.PluginConfig{
  1153  			{
  1154  				Name: "mock_driver",
  1155  				Config: map[string]interface{}{
  1156  					"fs_isolation": string(isolation),
  1157  				},
  1158  			},
  1159  		}
  1160  
  1161  		c.PluginLoader = catalog.TestPluginLoaderWithOptions(t, "", map[string]string{}, pluginConfig)
  1162  	})
  1163  	defer cleanup()
  1164  
  1165  	// Create a bad token
  1166  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
  1167  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
  1168  
  1169  	policyAllocExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
  1170  		[]string{acl.NamespaceCapabilityAllocExec})
  1171  	tokenAllocExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-exec", policyAllocExec)
  1172  
  1173  	policyAllocNodeExec := mock.NamespacePolicy(structs.DefaultNamespace, "",
  1174  		[]string{acl.NamespaceCapabilityAllocExec, acl.NamespaceCapabilityAllocNodeExec})
  1175  	tokenAllocNodeExec := mock.CreatePolicyAndToken(t, s.State(), 1009, "alloc-node-exec", policyAllocNodeExec)
  1176  
  1177  	job := mock.BatchJob()
  1178  	job.TaskGroups[0].Count = 1
  1179  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1180  		"run_for": "20s",
  1181  		"exec_command": map[string]interface{}{
  1182  			"run_for":       "1ms",
  1183  			"stdout_string": "some output",
  1184  		},
  1185  	}
  1186  
  1187  	// Wait for client to be running job
  1188  	testutil.WaitForRunningWithToken(t, s.RPC, job, root.SecretID)
  1189  
  1190  	// Get the allocation ID
  1191  	args := nstructs.AllocListRequest{}
  1192  	args.Region = "global"
  1193  	args.AuthToken = root.SecretID
  1194  	args.Namespace = nstructs.DefaultNamespace
  1195  	resp := nstructs.AllocListResponse{}
  1196  	require.NoError(t, s.RPC("Alloc.List", &args, &resp))
  1197  	require.Len(t, resp.Allocations, 1)
  1198  	allocID := resp.Allocations[0].ID
  1199  
  1200  	cases := []struct {
  1201  		Name          string
  1202  		Token         string
  1203  		ExpectedError string
  1204  	}{
  1205  		{
  1206  			Name:          "bad token",
  1207  			Token:         tokenBad.SecretID,
  1208  			ExpectedError: structs.ErrPermissionDenied.Error(),
  1209  		},
  1210  		{
  1211  			Name:          "alloc-exec token",
  1212  			Token:         tokenAllocExec.SecretID,
  1213  			ExpectedError: structs.ErrPermissionDenied.Error(),
  1214  		},
  1215  		{
  1216  			Name:          "alloc-node-exec token",
  1217  			Token:         tokenAllocNodeExec.SecretID,
  1218  			ExpectedError: "",
  1219  		},
  1220  		{
  1221  			Name:          "root token",
  1222  			Token:         root.SecretID,
  1223  			ExpectedError: "",
  1224  		},
  1225  	}
  1226  
  1227  	for _, c := range cases {
  1228  		t.Run(c.Name, func(t *testing.T) {
  1229  
  1230  			// Make the request
  1231  			req := &cstructs.AllocExecRequest{
  1232  				AllocID: allocID,
  1233  				Task:    job.TaskGroups[0].Tasks[0].Name,
  1234  				Tty:     true,
  1235  				Cmd:     []string{"placeholder command"},
  1236  				QueryOptions: nstructs.QueryOptions{
  1237  					Region:    "global",
  1238  					AuthToken: c.Token,
  1239  					Namespace: nstructs.DefaultNamespace,
  1240  				},
  1241  			}
  1242  
  1243  			// Get the handler
  1244  			handler, err := client.StreamingRpcHandler("Allocations.Exec")
  1245  			require.Nil(t, err)
  1246  
  1247  			// Create a pipe
  1248  			p1, p2 := net.Pipe()
  1249  			defer p1.Close()
  1250  			defer p2.Close()
  1251  
  1252  			errCh := make(chan error)
  1253  			frames := make(chan *drivers.ExecTaskStreamingResponseMsg)
  1254  
  1255  			// Start the handler
  1256  			go handler(p2)
  1257  			go decodeFrames(t, p1, frames, errCh)
  1258  
  1259  			// Send the request
  1260  			encoder := codec.NewEncoder(p1, nstructs.MsgpackHandle)
  1261  			require.Nil(t, encoder.Encode(req))
  1262  
  1263  			select {
  1264  			case <-time.After(3 * time.Second):
  1265  			case err := <-errCh:
  1266  				if c.ExpectedError == "" {
  1267  					require.NoError(t, err)
  1268  				} else {
  1269  					require.Contains(t, err.Error(), c.ExpectedError)
  1270  				}
  1271  			case f := <-frames:
  1272  				// we are good if we don't expect an error
  1273  				if c.ExpectedError != "" {
  1274  					require.Fail(t, "unexpected frame", "frame: %#v", f)
  1275  				}
  1276  			}
  1277  		})
  1278  	}
  1279  }
  1280  
  1281  func decodeFrames(t *testing.T, p1 net.Conn, frames chan<- *drivers.ExecTaskStreamingResponseMsg, errCh chan<- error) {
  1282  	// Start the decoder
  1283  	decoder := codec.NewDecoder(p1, nstructs.MsgpackHandle)
  1284  
  1285  	for {
  1286  		var msg cstructs.StreamErrWrapper
  1287  		if err := decoder.Decode(&msg); err != nil {
  1288  			if err == io.EOF || strings.Contains(err.Error(), "closed") {
  1289  				return
  1290  			}
  1291  			t.Logf("received error decoding: %#v", err)
  1292  
  1293  			errCh <- fmt.Errorf("error decoding: %v", err)
  1294  			return
  1295  		}
  1296  
  1297  		if msg.Error != nil {
  1298  			errCh <- msg.Error
  1299  			continue
  1300  		}
  1301  
  1302  		var frame drivers.ExecTaskStreamingResponseMsg
  1303  		if err := json.Unmarshal(msg.Payload, &frame); err != nil {
  1304  			errCh <- err
  1305  			return
  1306  		}
  1307  		t.Logf("received message: %#v", msg)
  1308  		frames <- &frame
  1309  	}
  1310  }