github.com/ferranbt/nomad@v0.9.3-0.20190607002617-85c449b7667c/client/fs_endpoint_test.go (about)

     1  package client
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"io"
     7  	"io/ioutil"
     8  	"math"
     9  	"net"
    10  	"os"
    11  	"path/filepath"
    12  	"reflect"
    13  	"runtime"
    14  	"strings"
    15  	"sync"
    16  	"testing"
    17  	"time"
    18  
    19  	"github.com/hashicorp/nomad/acl"
    20  	"github.com/hashicorp/nomad/client/allocdir"
    21  	"github.com/hashicorp/nomad/client/config"
    22  	sframer "github.com/hashicorp/nomad/client/lib/streamframer"
    23  	cstructs "github.com/hashicorp/nomad/client/structs"
    24  	"github.com/hashicorp/nomad/helper/testlog"
    25  	"github.com/hashicorp/nomad/helper/uuid"
    26  	"github.com/hashicorp/nomad/nomad"
    27  	"github.com/hashicorp/nomad/nomad/mock"
    28  	"github.com/hashicorp/nomad/nomad/structs"
    29  	"github.com/hashicorp/nomad/testutil"
    30  	"github.com/stretchr/testify/require"
    31  	"github.com/ugorji/go/codec"
    32  )
    33  
    34  // tempAllocDir returns a new alloc dir that is rooted in a temp dir. The caller
    35  // should destroy the temp dir.
    36  func tempAllocDir(t testing.TB) *allocdir.AllocDir {
    37  	dir, err := ioutil.TempDir("", "nomadtest")
    38  	if err != nil {
    39  		t.Fatalf("TempDir() failed: %v", err)
    40  	}
    41  
    42  	if err := os.Chmod(dir, 0777); err != nil {
    43  		t.Fatalf("failed to chmod dir: %v", err)
    44  	}
    45  
    46  	return allocdir.NewAllocDir(testlog.HCLogger(t), dir)
    47  }
    48  
    49  type nopWriteCloser struct {
    50  	io.Writer
    51  }
    52  
    53  func (n nopWriteCloser) Close() error {
    54  	return nil
    55  }
    56  
    57  func TestFS_Stat_NoAlloc(t *testing.T) {
    58  	t.Parallel()
    59  	require := require.New(t)
    60  
    61  	// Start a client
    62  	c, cleanup := TestClient(t, nil)
    63  	defer cleanup()
    64  
    65  	// Make the request with bad allocation id
    66  	req := &cstructs.FsStatRequest{
    67  		AllocID:      uuid.Generate(),
    68  		Path:         "foo",
    69  		QueryOptions: structs.QueryOptions{Region: "global"},
    70  	}
    71  
    72  	var resp cstructs.FsStatResponse
    73  	err := c.ClientRPC("FileSystem.Stat", req, &resp)
    74  	require.NotNil(err)
    75  	require.True(structs.IsErrUnknownAllocation(err))
    76  }
    77  
    78  func TestFS_Stat(t *testing.T) {
    79  	t.Parallel()
    80  	require := require.New(t)
    81  
    82  	// Start a server and client
    83  	s := nomad.TestServer(t, nil)
    84  	defer s.Shutdown()
    85  	testutil.WaitForLeader(t, s.RPC)
    86  
    87  	c, cleanup := TestClient(t, func(c *config.Config) {
    88  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
    89  	})
    90  	defer cleanup()
    91  
    92  	// Create and add an alloc
    93  	job := mock.BatchJob()
    94  	job.TaskGroups[0].Count = 1
    95  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
    96  		"run_for": "500ms",
    97  	}
    98  	// Wait for alloc to be running
    99  	alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
   100  
   101  	// Make the request with bad allocation id
   102  	req := &cstructs.FsStatRequest{
   103  		AllocID:      alloc.ID,
   104  		Path:         "/",
   105  		QueryOptions: structs.QueryOptions{Region: "global"},
   106  	}
   107  
   108  	var resp cstructs.FsStatResponse
   109  	err := c.ClientRPC("FileSystem.Stat", req, &resp)
   110  	require.Nil(err)
   111  	require.NotNil(resp.Info)
   112  	require.True(resp.Info.IsDir)
   113  }
   114  
   115  func TestFS_Stat_ACL(t *testing.T) {
   116  	t.Parallel()
   117  	require := require.New(t)
   118  
   119  	// Start a server
   120  	s, root := nomad.TestACLServer(t, nil)
   121  	defer s.Shutdown()
   122  	testutil.WaitForLeader(t, s.RPC)
   123  
   124  	client, cleanup := TestClient(t, func(c *config.Config) {
   125  		c.ACLEnabled = true
   126  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   127  	})
   128  	defer cleanup()
   129  
   130  	// Create a bad token
   131  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
   132  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
   133  
   134  	policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "",
   135  		[]string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS})
   136  	tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
   137  
   138  	cases := []struct {
   139  		Name          string
   140  		Token         string
   141  		ExpectedError string
   142  	}{
   143  		{
   144  			Name:          "bad token",
   145  			Token:         tokenBad.SecretID,
   146  			ExpectedError: structs.ErrPermissionDenied.Error(),
   147  		},
   148  		{
   149  			Name:          "good token",
   150  			Token:         tokenGood.SecretID,
   151  			ExpectedError: structs.ErrUnknownAllocationPrefix,
   152  		},
   153  		{
   154  			Name:          "root token",
   155  			Token:         root.SecretID,
   156  			ExpectedError: structs.ErrUnknownAllocationPrefix,
   157  		},
   158  	}
   159  
   160  	for _, c := range cases {
   161  		t.Run(c.Name, func(t *testing.T) {
   162  			// Make the request with bad allocation id
   163  			req := &cstructs.FsStatRequest{
   164  				AllocID: uuid.Generate(),
   165  				Path:    "/",
   166  				QueryOptions: structs.QueryOptions{
   167  					Region:    "global",
   168  					AuthToken: c.Token,
   169  					Namespace: structs.DefaultNamespace,
   170  				},
   171  			}
   172  
   173  			var resp cstructs.FsStatResponse
   174  			err := client.ClientRPC("FileSystem.Stat", req, &resp)
   175  			require.NotNil(err)
   176  			require.Contains(err.Error(), c.ExpectedError)
   177  		})
   178  	}
   179  }
   180  
   181  func TestFS_List_NoAlloc(t *testing.T) {
   182  	t.Parallel()
   183  	require := require.New(t)
   184  
   185  	// Start a client
   186  	c, cleanup := TestClient(t, nil)
   187  	defer cleanup()
   188  
   189  	// Make the request with bad allocation id
   190  	req := &cstructs.FsListRequest{
   191  		AllocID:      uuid.Generate(),
   192  		Path:         "foo",
   193  		QueryOptions: structs.QueryOptions{Region: "global"},
   194  	}
   195  
   196  	var resp cstructs.FsListResponse
   197  	err := c.ClientRPC("FileSystem.List", req, &resp)
   198  	require.NotNil(err)
   199  	require.True(structs.IsErrUnknownAllocation(err))
   200  }
   201  
   202  func TestFS_List(t *testing.T) {
   203  	t.Parallel()
   204  	require := require.New(t)
   205  
   206  	// Start a server and client
   207  	s := nomad.TestServer(t, nil)
   208  	defer s.Shutdown()
   209  	testutil.WaitForLeader(t, s.RPC)
   210  
   211  	c, cleanup := TestClient(t, func(c *config.Config) {
   212  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   213  	})
   214  	defer cleanup()
   215  
   216  	// Create and add an alloc
   217  	job := mock.BatchJob()
   218  	job.TaskGroups[0].Count = 1
   219  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   220  		"run_for": "500ms",
   221  	}
   222  	// Wait for alloc to be running
   223  	alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
   224  
   225  	// Make the request
   226  	req := &cstructs.FsListRequest{
   227  		AllocID:      alloc.ID,
   228  		Path:         "/",
   229  		QueryOptions: structs.QueryOptions{Region: "global"},
   230  	}
   231  
   232  	var resp cstructs.FsListResponse
   233  	err := c.ClientRPC("FileSystem.List", req, &resp)
   234  	require.Nil(err)
   235  	require.NotEmpty(resp.Files)
   236  	require.True(resp.Files[0].IsDir)
   237  }
   238  
   239  func TestFS_List_ACL(t *testing.T) {
   240  	t.Parallel()
   241  	require := require.New(t)
   242  
   243  	// Start a server
   244  	s, root := nomad.TestACLServer(t, nil)
   245  	defer s.Shutdown()
   246  	testutil.WaitForLeader(t, s.RPC)
   247  
   248  	client, cleanup := TestClient(t, func(c *config.Config) {
   249  		c.ACLEnabled = true
   250  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   251  	})
   252  	defer cleanup()
   253  
   254  	// Create a bad token
   255  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityDeny})
   256  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
   257  
   258  	policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "",
   259  		[]string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS})
   260  	tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
   261  
   262  	cases := []struct {
   263  		Name          string
   264  		Token         string
   265  		ExpectedError string
   266  	}{
   267  		{
   268  			Name:          "bad token",
   269  			Token:         tokenBad.SecretID,
   270  			ExpectedError: structs.ErrPermissionDenied.Error(),
   271  		},
   272  		{
   273  			Name:          "good token",
   274  			Token:         tokenGood.SecretID,
   275  			ExpectedError: structs.ErrUnknownAllocationPrefix,
   276  		},
   277  		{
   278  			Name:          "root token",
   279  			Token:         root.SecretID,
   280  			ExpectedError: structs.ErrUnknownAllocationPrefix,
   281  		},
   282  	}
   283  
   284  	for _, c := range cases {
   285  		t.Run(c.Name, func(t *testing.T) {
   286  			// Make the request with bad allocation id
   287  			req := &cstructs.FsListRequest{
   288  				AllocID: uuid.Generate(),
   289  				Path:    "/",
   290  				QueryOptions: structs.QueryOptions{
   291  					Region:    "global",
   292  					AuthToken: c.Token,
   293  					Namespace: structs.DefaultNamespace,
   294  				},
   295  			}
   296  
   297  			var resp cstructs.FsListResponse
   298  			err := client.ClientRPC("FileSystem.List", req, &resp)
   299  			require.NotNil(err)
   300  			require.Contains(err.Error(), c.ExpectedError)
   301  		})
   302  	}
   303  }
   304  
   305  func TestFS_Stream_NoAlloc(t *testing.T) {
   306  	t.Parallel()
   307  	require := require.New(t)
   308  
   309  	// Start a client
   310  	c, cleanup := TestClient(t, nil)
   311  	defer cleanup()
   312  
   313  	// Make the request with bad allocation id
   314  	req := &cstructs.FsStreamRequest{
   315  		AllocID:      uuid.Generate(),
   316  		Path:         "foo",
   317  		Origin:       "start",
   318  		QueryOptions: structs.QueryOptions{Region: "global"},
   319  	}
   320  
   321  	// Get the handler
   322  	handler, err := c.StreamingRpcHandler("FileSystem.Stream")
   323  	require.Nil(err)
   324  
   325  	// Create a pipe
   326  	p1, p2 := net.Pipe()
   327  	defer p1.Close()
   328  	defer p2.Close()
   329  
   330  	errCh := make(chan error)
   331  	streamMsg := make(chan *cstructs.StreamErrWrapper)
   332  
   333  	// Start the handler
   334  	go handler(p2)
   335  
   336  	// Start the decoder
   337  	go func() {
   338  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   339  		for {
   340  			var msg cstructs.StreamErrWrapper
   341  			if err := decoder.Decode(&msg); err != nil {
   342  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
   343  					return
   344  				}
   345  				errCh <- fmt.Errorf("error decoding: %v", err)
   346  			}
   347  
   348  			streamMsg <- &msg
   349  		}
   350  	}()
   351  
   352  	// Send the request
   353  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   354  	require.Nil(encoder.Encode(req))
   355  
   356  	timeout := time.After(3 * time.Second)
   357  
   358  OUTER:
   359  	for {
   360  		select {
   361  		case <-timeout:
   362  			t.Fatal("timeout")
   363  		case err := <-errCh:
   364  			t.Fatal(err)
   365  		case msg := <-streamMsg:
   366  			t.Logf("Got msg %+v", msg)
   367  			if msg.Error == nil {
   368  				continue
   369  			}
   370  
   371  			if structs.IsErrUnknownAllocation(msg.Error) {
   372  				break OUTER
   373  			} else {
   374  				t.Fatalf("bad error: %v", err)
   375  			}
   376  		}
   377  	}
   378  }
   379  
   380  func TestFS_Stream_ACL(t *testing.T) {
   381  	t.Parallel()
   382  	require := require.New(t)
   383  
   384  	// Start a server
   385  	s, root := nomad.TestACLServer(t, nil)
   386  	defer s.Shutdown()
   387  	testutil.WaitForLeader(t, s.RPC)
   388  
   389  	client, cleanup := TestClient(t, func(c *config.Config) {
   390  		c.ACLEnabled = true
   391  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   392  	})
   393  	defer cleanup()
   394  
   395  	// Create a bad token
   396  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS})
   397  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
   398  
   399  	policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "",
   400  		[]string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS})
   401  	tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
   402  
   403  	cases := []struct {
   404  		Name          string
   405  		Token         string
   406  		ExpectedError string
   407  	}{
   408  		{
   409  			Name:          "bad token",
   410  			Token:         tokenBad.SecretID,
   411  			ExpectedError: structs.ErrPermissionDenied.Error(),
   412  		},
   413  		{
   414  			Name:          "good token",
   415  			Token:         tokenGood.SecretID,
   416  			ExpectedError: structs.ErrUnknownAllocationPrefix,
   417  		},
   418  		{
   419  			Name:          "root token",
   420  			Token:         root.SecretID,
   421  			ExpectedError: structs.ErrUnknownAllocationPrefix,
   422  		},
   423  	}
   424  
   425  	for _, c := range cases {
   426  		t.Run(c.Name, func(t *testing.T) {
   427  			// Make the request with bad allocation id
   428  			req := &cstructs.FsStreamRequest{
   429  				AllocID: uuid.Generate(),
   430  				Path:    "foo",
   431  				Origin:  "start",
   432  				QueryOptions: structs.QueryOptions{
   433  					Namespace: structs.DefaultNamespace,
   434  					Region:    "global",
   435  					AuthToken: c.Token,
   436  				},
   437  			}
   438  
   439  			// Get the handler
   440  			handler, err := client.StreamingRpcHandler("FileSystem.Stream")
   441  			require.Nil(err)
   442  
   443  			// Create a pipe
   444  			p1, p2 := net.Pipe()
   445  			defer p1.Close()
   446  			defer p2.Close()
   447  
   448  			errCh := make(chan error)
   449  			streamMsg := make(chan *cstructs.StreamErrWrapper)
   450  
   451  			// Start the handler
   452  			go handler(p2)
   453  
   454  			// Start the decoder
   455  			go func() {
   456  				decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   457  				for {
   458  					var msg cstructs.StreamErrWrapper
   459  					if err := decoder.Decode(&msg); err != nil {
   460  						if err == io.EOF || strings.Contains(err.Error(), "closed") {
   461  							return
   462  						}
   463  						errCh <- fmt.Errorf("error decoding: %v", err)
   464  					}
   465  
   466  					streamMsg <- &msg
   467  				}
   468  			}()
   469  
   470  			// Send the request
   471  			encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   472  			require.Nil(encoder.Encode(req))
   473  
   474  			timeout := time.After(5 * time.Second)
   475  
   476  		OUTER:
   477  			for {
   478  				select {
   479  				case <-timeout:
   480  					t.Fatal("timeout")
   481  				case err := <-errCh:
   482  					t.Fatal(err)
   483  				case msg := <-streamMsg:
   484  					if msg.Error == nil {
   485  						continue
   486  					}
   487  
   488  					if strings.Contains(msg.Error.Error(), c.ExpectedError) {
   489  						break OUTER
   490  					} else {
   491  						t.Fatalf("Bad error: %v", msg.Error)
   492  					}
   493  				}
   494  			}
   495  		})
   496  	}
   497  }
   498  
   499  func TestFS_Stream(t *testing.T) {
   500  	t.Parallel()
   501  	require := require.New(t)
   502  
   503  	// Start a server and client
   504  	s := nomad.TestServer(t, nil)
   505  	defer s.Shutdown()
   506  	testutil.WaitForLeader(t, s.RPC)
   507  
   508  	c, cleanup := TestClient(t, func(c *config.Config) {
   509  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   510  	})
   511  	defer cleanup()
   512  
   513  	expected := "Hello from the other side"
   514  	job := mock.BatchJob()
   515  	job.TaskGroups[0].Count = 1
   516  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   517  		"run_for":       "2s",
   518  		"stdout_string": expected,
   519  	}
   520  
   521  	// Wait for alloc to be running
   522  	alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
   523  
   524  	// Make the request
   525  	req := &cstructs.FsStreamRequest{
   526  		AllocID:      alloc.ID,
   527  		Path:         "alloc/logs/web.stdout.0",
   528  		PlainText:    true,
   529  		QueryOptions: structs.QueryOptions{Region: "global"},
   530  	}
   531  
   532  	// Get the handler
   533  	handler, err := c.StreamingRpcHandler("FileSystem.Stream")
   534  	require.Nil(err)
   535  
   536  	// Create a pipe
   537  	p1, p2 := net.Pipe()
   538  	defer p1.Close()
   539  	defer p2.Close()
   540  
   541  	// Wrap the pipe so we can check it is closed
   542  	pipeChecker := &ReadWriteCloseChecker{ReadWriteCloser: p2}
   543  
   544  	errCh := make(chan error)
   545  	streamMsg := make(chan *cstructs.StreamErrWrapper)
   546  
   547  	// Start the handler
   548  	go handler(pipeChecker)
   549  
   550  	// Start the decoder
   551  	go func() {
   552  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   553  		for {
   554  			var msg cstructs.StreamErrWrapper
   555  			if err := decoder.Decode(&msg); err != nil {
   556  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
   557  					return
   558  				}
   559  				errCh <- fmt.Errorf("error decoding: %v", err)
   560  			}
   561  
   562  			streamMsg <- &msg
   563  		}
   564  	}()
   565  
   566  	// Send the request
   567  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   568  	require.Nil(encoder.Encode(req))
   569  
   570  	timeout := time.After(3 * time.Second)
   571  	received := ""
   572  OUTER:
   573  	for {
   574  		select {
   575  		case <-timeout:
   576  			t.Fatal("timeout")
   577  		case err := <-errCh:
   578  			t.Fatal(err)
   579  		case msg := <-streamMsg:
   580  			if msg.Error != nil {
   581  				t.Fatalf("Got error: %v", msg.Error.Error())
   582  			}
   583  
   584  			// Add the payload
   585  			received += string(msg.Payload)
   586  			if received == expected {
   587  				break OUTER
   588  			}
   589  		}
   590  	}
   591  
   592  	testutil.WaitForResult(func() (bool, error) {
   593  		pipeChecker.l.Lock()
   594  		defer pipeChecker.l.Unlock()
   595  
   596  		return pipeChecker.Closed, nil
   597  	}, func(err error) {
   598  		t.Fatal("Pipe not closed")
   599  	})
   600  }
   601  
   602  type ReadWriteCloseChecker struct {
   603  	io.ReadWriteCloser
   604  	l      sync.Mutex
   605  	Closed bool
   606  }
   607  
   608  func (r *ReadWriteCloseChecker) Close() error {
   609  	r.l.Lock()
   610  	r.Closed = true
   611  	r.l.Unlock()
   612  	return r.ReadWriteCloser.Close()
   613  }
   614  
   615  func TestFS_Stream_Follow(t *testing.T) {
   616  	t.Parallel()
   617  	require := require.New(t)
   618  
   619  	// Start a server and client
   620  	s := nomad.TestServer(t, nil)
   621  	defer s.Shutdown()
   622  	testutil.WaitForLeader(t, s.RPC)
   623  
   624  	c, cleanup := TestClient(t, func(c *config.Config) {
   625  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   626  	})
   627  	defer cleanup()
   628  
   629  	expectedBase := "Hello from the other side"
   630  	repeat := 10
   631  
   632  	job := mock.BatchJob()
   633  	job.TaskGroups[0].Count = 1
   634  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   635  		"run_for":                "20s",
   636  		"stdout_string":          expectedBase,
   637  		"stdout_repeat":          repeat,
   638  		"stdout_repeat_duration": "200ms",
   639  	}
   640  
   641  	// Wait for alloc to be running
   642  	alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
   643  
   644  	// Make the request
   645  	req := &cstructs.FsStreamRequest{
   646  		AllocID:      alloc.ID,
   647  		Path:         "alloc/logs/web.stdout.0",
   648  		PlainText:    true,
   649  		Follow:       true,
   650  		QueryOptions: structs.QueryOptions{Region: "global"},
   651  	}
   652  
   653  	// Get the handler
   654  	handler, err := c.StreamingRpcHandler("FileSystem.Stream")
   655  	require.Nil(err)
   656  
   657  	// Create a pipe
   658  	p1, p2 := net.Pipe()
   659  	defer p1.Close()
   660  	defer p2.Close()
   661  
   662  	errCh := make(chan error)
   663  	streamMsg := make(chan *cstructs.StreamErrWrapper)
   664  
   665  	// Start the handler
   666  	go handler(p2)
   667  
   668  	// Start the decoder
   669  	go func() {
   670  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   671  		for {
   672  			var msg cstructs.StreamErrWrapper
   673  			if err := decoder.Decode(&msg); err != nil {
   674  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
   675  					return
   676  				}
   677  				errCh <- fmt.Errorf("error decoding: %v", err)
   678  			}
   679  
   680  			streamMsg <- &msg
   681  		}
   682  	}()
   683  
   684  	// Send the request
   685  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   686  	require.Nil(encoder.Encode(req))
   687  
   688  	timeout := time.After(20 * time.Second)
   689  	expected := strings.Repeat(expectedBase, repeat+1)
   690  	received := ""
   691  OUTER:
   692  	for {
   693  		select {
   694  		case <-timeout:
   695  			t.Fatal("timeout")
   696  		case err := <-errCh:
   697  			t.Fatal(err)
   698  		case msg := <-streamMsg:
   699  			if msg.Error != nil {
   700  				t.Fatalf("Got error: %v", msg.Error.Error())
   701  			}
   702  
   703  			// Add the payload
   704  			received += string(msg.Payload)
   705  			if received == expected {
   706  				break OUTER
   707  			}
   708  		}
   709  	}
   710  }
   711  
   712  func TestFS_Stream_Limit(t *testing.T) {
   713  	t.Parallel()
   714  	require := require.New(t)
   715  
   716  	// Start a server and client
   717  	s := nomad.TestServer(t, nil)
   718  	defer s.Shutdown()
   719  	testutil.WaitForLeader(t, s.RPC)
   720  
   721  	c, cleanup := TestClient(t, func(c *config.Config) {
   722  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   723  	})
   724  	defer cleanup()
   725  
   726  	var limit int64 = 5
   727  	full := "Hello from the other side"
   728  	expected := full[:limit]
   729  	job := mock.BatchJob()
   730  	job.TaskGroups[0].Count = 1
   731  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   732  		"run_for":       "2s",
   733  		"stdout_string": full,
   734  	}
   735  
   736  	// Wait for alloc to be running
   737  	alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
   738  
   739  	// Make the request
   740  	req := &cstructs.FsStreamRequest{
   741  		AllocID:      alloc.ID,
   742  		Path:         "alloc/logs/web.stdout.0",
   743  		PlainText:    true,
   744  		Limit:        limit,
   745  		QueryOptions: structs.QueryOptions{Region: "global"},
   746  	}
   747  
   748  	// Get the handler
   749  	handler, err := c.StreamingRpcHandler("FileSystem.Stream")
   750  	require.Nil(err)
   751  
   752  	// Create a pipe
   753  	p1, p2 := net.Pipe()
   754  	defer p1.Close()
   755  	defer p2.Close()
   756  
   757  	errCh := make(chan error)
   758  	streamMsg := make(chan *cstructs.StreamErrWrapper)
   759  
   760  	// Start the handler
   761  	go handler(p2)
   762  
   763  	// Start the decoder
   764  	go func() {
   765  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   766  		for {
   767  			var msg cstructs.StreamErrWrapper
   768  			if err := decoder.Decode(&msg); err != nil {
   769  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
   770  					return
   771  				}
   772  				errCh <- fmt.Errorf("error decoding: %v", err)
   773  			}
   774  
   775  			streamMsg <- &msg
   776  		}
   777  	}()
   778  
   779  	// Send the request
   780  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   781  	require.Nil(encoder.Encode(req))
   782  
   783  	timeout := time.After(3 * time.Second)
   784  	received := ""
   785  OUTER:
   786  	for {
   787  		select {
   788  		case <-timeout:
   789  			t.Fatal("timeout")
   790  		case err := <-errCh:
   791  			t.Fatal(err)
   792  		case msg := <-streamMsg:
   793  			if msg.Error != nil {
   794  				t.Fatalf("Got error: %v", msg.Error.Error())
   795  			}
   796  
   797  			// Add the payload
   798  			received += string(msg.Payload)
   799  			if received == expected {
   800  				break OUTER
   801  			}
   802  		}
   803  	}
   804  }
   805  
   806  func TestFS_Logs_NoAlloc(t *testing.T) {
   807  	t.Parallel()
   808  	require := require.New(t)
   809  
   810  	// Start a client
   811  	c, cleanup := TestClient(t, nil)
   812  	defer cleanup()
   813  
   814  	// Make the request with bad allocation id
   815  	req := &cstructs.FsLogsRequest{
   816  		AllocID:      uuid.Generate(),
   817  		Task:         "foo",
   818  		LogType:      "stdout",
   819  		Origin:       "start",
   820  		QueryOptions: structs.QueryOptions{Region: "global"},
   821  	}
   822  
   823  	// Get the handler
   824  	handler, err := c.StreamingRpcHandler("FileSystem.Logs")
   825  	require.Nil(err)
   826  
   827  	// Create a pipe
   828  	p1, p2 := net.Pipe()
   829  	defer p1.Close()
   830  	defer p2.Close()
   831  
   832  	errCh := make(chan error)
   833  	streamMsg := make(chan *cstructs.StreamErrWrapper)
   834  
   835  	// Start the handler
   836  	go handler(p2)
   837  
   838  	// Start the decoder
   839  	go func() {
   840  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   841  		for {
   842  			var msg cstructs.StreamErrWrapper
   843  			if err := decoder.Decode(&msg); err != nil {
   844  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
   845  					return
   846  				}
   847  				errCh <- fmt.Errorf("error decoding: %v", err)
   848  			}
   849  
   850  			streamMsg <- &msg
   851  		}
   852  	}()
   853  
   854  	// Send the request
   855  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   856  	require.Nil(encoder.Encode(req))
   857  
   858  	timeout := time.After(3 * time.Second)
   859  
   860  OUTER:
   861  	for {
   862  		select {
   863  		case <-timeout:
   864  			t.Fatal("timeout")
   865  		case err := <-errCh:
   866  			t.Fatal(err)
   867  		case msg := <-streamMsg:
   868  			t.Logf("Got msg %+v", msg)
   869  			if msg.Error == nil {
   870  				continue
   871  			}
   872  
   873  			if structs.IsErrUnknownAllocation(msg.Error) {
   874  				break OUTER
   875  			} else {
   876  				t.Fatalf("bad error: %v", err)
   877  			}
   878  		}
   879  	}
   880  }
   881  
   882  // TestFS_Logs_TaskPending asserts that trying to stream logs for tasks which
   883  // have not started returns a 404 error.
   884  func TestFS_Logs_TaskPending(t *testing.T) {
   885  	t.Parallel()
   886  	require := require.New(t)
   887  
   888  	// Start a server and client
   889  	s := nomad.TestServer(t, nil)
   890  	defer s.Shutdown()
   891  	testutil.WaitForLeader(t, s.RPC)
   892  
   893  	c, cleanup := TestClient(t, func(c *config.Config) {
   894  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
   895  	})
   896  	defer cleanup()
   897  
   898  	job := mock.BatchJob()
   899  	job.TaskGroups[0].Count = 1
   900  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
   901  		"start_block_for": "10s",
   902  	}
   903  
   904  	// Register job
   905  	args := &structs.JobRegisterRequest{}
   906  	args.Job = job
   907  	args.WriteRequest.Region = "global"
   908  	var jobResp structs.JobRegisterResponse
   909  	require.NoError(s.RPC("Job.Register", args, &jobResp))
   910  
   911  	// Get the allocation ID
   912  	var allocID string
   913  	testutil.WaitForResult(func() (bool, error) {
   914  		args := structs.AllocListRequest{}
   915  		args.Region = "global"
   916  		resp := structs.AllocListResponse{}
   917  		if err := s.RPC("Alloc.List", &args, &resp); err != nil {
   918  			return false, err
   919  		}
   920  
   921  		if len(resp.Allocations) != 1 {
   922  			return false, fmt.Errorf("expected 1 alloc, found %d", len(resp.Allocations))
   923  		}
   924  
   925  		allocID = resp.Allocations[0].ID
   926  
   927  		// wait for alloc runner to be created; otherwise, we get no alloc found error
   928  		if _, err := c.getAllocRunner(allocID); err != nil {
   929  			return false, fmt.Errorf("alloc runner was not created yet for %v", allocID)
   930  		}
   931  
   932  		return true, nil
   933  	}, func(err error) {
   934  		t.Fatalf("error getting alloc id: %v", err)
   935  	})
   936  
   937  	// Make the request
   938  	req := &cstructs.FsLogsRequest{
   939  		AllocID:      allocID,
   940  		Task:         job.TaskGroups[0].Tasks[0].Name,
   941  		LogType:      "stdout",
   942  		Origin:       "start",
   943  		PlainText:    true,
   944  		QueryOptions: structs.QueryOptions{Region: "global"},
   945  	}
   946  
   947  	// Get the handler
   948  	handler, err := c.StreamingRpcHandler("FileSystem.Logs")
   949  	require.Nil(err)
   950  
   951  	// Create a pipe
   952  	p1, p2 := net.Pipe()
   953  	defer p1.Close()
   954  	defer p2.Close()
   955  
   956  	errCh := make(chan error)
   957  	streamMsg := make(chan *cstructs.StreamErrWrapper)
   958  
   959  	// Start the handler
   960  	go handler(p2)
   961  
   962  	// Start the decoder
   963  	go func() {
   964  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
   965  		for {
   966  			var msg cstructs.StreamErrWrapper
   967  			if err := decoder.Decode(&msg); err != nil {
   968  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
   969  					return
   970  				}
   971  				errCh <- fmt.Errorf("error decoding: %v", err)
   972  			}
   973  
   974  			streamMsg <- &msg
   975  		}
   976  	}()
   977  
   978  	// Send the request
   979  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
   980  	require.Nil(encoder.Encode(req))
   981  
   982  	for {
   983  		select {
   984  		case <-time.After(3 * time.Second):
   985  			t.Fatal("timeout")
   986  		case err := <-errCh:
   987  			t.Fatalf("unexpected stream error: %v", err)
   988  		case msg := <-streamMsg:
   989  			require.NotNil(msg.Error)
   990  			require.NotNil(msg.Error.Code)
   991  			require.EqualValues(404, *msg.Error.Code)
   992  			require.Contains(msg.Error.Message, "not started")
   993  			return
   994  		}
   995  	}
   996  }
   997  
   998  func TestFS_Logs_ACL(t *testing.T) {
   999  	t.Parallel()
  1000  	require := require.New(t)
  1001  
  1002  	// Start a server
  1003  	s, root := nomad.TestACLServer(t, nil)
  1004  	defer s.Shutdown()
  1005  	testutil.WaitForLeader(t, s.RPC)
  1006  
  1007  	client, cleanup := TestClient(t, func(c *config.Config) {
  1008  		c.ACLEnabled = true
  1009  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
  1010  	})
  1011  	defer cleanup()
  1012  
  1013  	// Create a bad token
  1014  	policyBad := mock.NamespacePolicy("other", "", []string{acl.NamespaceCapabilityReadFS})
  1015  	tokenBad := mock.CreatePolicyAndToken(t, s.State(), 1005, "invalid", policyBad)
  1016  
  1017  	policyGood := mock.NamespacePolicy(structs.DefaultNamespace, "",
  1018  		[]string{acl.NamespaceCapabilityReadLogs, acl.NamespaceCapabilityReadFS})
  1019  	tokenGood := mock.CreatePolicyAndToken(t, s.State(), 1009, "valid2", policyGood)
  1020  
  1021  	cases := []struct {
  1022  		Name          string
  1023  		Token         string
  1024  		ExpectedError string
  1025  	}{
  1026  		{
  1027  			Name:          "bad token",
  1028  			Token:         tokenBad.SecretID,
  1029  			ExpectedError: structs.ErrPermissionDenied.Error(),
  1030  		},
  1031  		{
  1032  			Name:          "good token",
  1033  			Token:         tokenGood.SecretID,
  1034  			ExpectedError: structs.ErrUnknownAllocationPrefix,
  1035  		},
  1036  		{
  1037  			Name:          "root token",
  1038  			Token:         root.SecretID,
  1039  			ExpectedError: structs.ErrUnknownAllocationPrefix,
  1040  		},
  1041  	}
  1042  
  1043  	for _, c := range cases {
  1044  		t.Run(c.Name, func(t *testing.T) {
  1045  			// Make the request with bad allocation id
  1046  			req := &cstructs.FsLogsRequest{
  1047  				AllocID: uuid.Generate(),
  1048  				Task:    "foo",
  1049  				LogType: "stdout",
  1050  				Origin:  "start",
  1051  				QueryOptions: structs.QueryOptions{
  1052  					Namespace: structs.DefaultNamespace,
  1053  					Region:    "global",
  1054  					AuthToken: c.Token,
  1055  				},
  1056  			}
  1057  
  1058  			// Get the handler
  1059  			handler, err := client.StreamingRpcHandler("FileSystem.Logs")
  1060  			require.Nil(err)
  1061  
  1062  			// Create a pipe
  1063  			p1, p2 := net.Pipe()
  1064  			defer p1.Close()
  1065  			defer p2.Close()
  1066  
  1067  			errCh := make(chan error)
  1068  			streamMsg := make(chan *cstructs.StreamErrWrapper)
  1069  
  1070  			// Start the handler
  1071  			go handler(p2)
  1072  
  1073  			// Start the decoder
  1074  			go func() {
  1075  				decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
  1076  				for {
  1077  					var msg cstructs.StreamErrWrapper
  1078  					if err := decoder.Decode(&msg); err != nil {
  1079  						if err == io.EOF || strings.Contains(err.Error(), "closed") {
  1080  							return
  1081  						}
  1082  						errCh <- fmt.Errorf("error decoding: %v", err)
  1083  					}
  1084  
  1085  					streamMsg <- &msg
  1086  				}
  1087  			}()
  1088  
  1089  			// Send the request
  1090  			encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
  1091  			require.Nil(encoder.Encode(req))
  1092  
  1093  			timeout := time.After(5 * time.Second)
  1094  
  1095  		OUTER:
  1096  			for {
  1097  				select {
  1098  				case <-timeout:
  1099  					t.Fatal("timeout")
  1100  				case err := <-errCh:
  1101  					t.Fatal(err)
  1102  				case msg := <-streamMsg:
  1103  					if msg.Error == nil {
  1104  						continue
  1105  					}
  1106  
  1107  					if strings.Contains(msg.Error.Error(), c.ExpectedError) {
  1108  						break OUTER
  1109  					} else {
  1110  						t.Fatalf("Bad error: %v", msg.Error)
  1111  					}
  1112  				}
  1113  			}
  1114  		})
  1115  	}
  1116  }
  1117  
  1118  func TestFS_Logs(t *testing.T) {
  1119  	t.Parallel()
  1120  	require := require.New(t)
  1121  
  1122  	// Start a server and client
  1123  	s := nomad.TestServer(t, nil)
  1124  	defer s.Shutdown()
  1125  	testutil.WaitForLeader(t, s.RPC)
  1126  
  1127  	c, cleanup := TestClient(t, func(c *config.Config) {
  1128  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
  1129  	})
  1130  	defer cleanup()
  1131  
  1132  	expected := "Hello from the other side\n"
  1133  	job := mock.BatchJob()
  1134  	job.TaskGroups[0].Count = 1
  1135  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1136  		"run_for":       "2s",
  1137  		"stdout_string": expected,
  1138  	}
  1139  
  1140  	// Wait for client to be running job
  1141  	testutil.WaitForRunning(t, s.RPC, job)
  1142  
  1143  	// Get the allocation ID
  1144  	args := structs.AllocListRequest{}
  1145  	args.Region = "global"
  1146  	resp := structs.AllocListResponse{}
  1147  	require.NoError(s.RPC("Alloc.List", &args, &resp))
  1148  	require.Len(resp.Allocations, 1)
  1149  	allocID := resp.Allocations[0].ID
  1150  
  1151  	// Make the request
  1152  	req := &cstructs.FsLogsRequest{
  1153  		AllocID:      allocID,
  1154  		Task:         job.TaskGroups[0].Tasks[0].Name,
  1155  		LogType:      "stdout",
  1156  		Origin:       "start",
  1157  		PlainText:    true,
  1158  		QueryOptions: structs.QueryOptions{Region: "global"},
  1159  	}
  1160  
  1161  	// Get the handler
  1162  	handler, err := c.StreamingRpcHandler("FileSystem.Logs")
  1163  	require.Nil(err)
  1164  
  1165  	// Create a pipe
  1166  	p1, p2 := net.Pipe()
  1167  	defer p1.Close()
  1168  	defer p2.Close()
  1169  
  1170  	errCh := make(chan error)
  1171  	streamMsg := make(chan *cstructs.StreamErrWrapper)
  1172  
  1173  	// Start the handler
  1174  	go handler(p2)
  1175  
  1176  	// Start the decoder
  1177  	go func() {
  1178  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
  1179  		for {
  1180  			var msg cstructs.StreamErrWrapper
  1181  			if err := decoder.Decode(&msg); err != nil {
  1182  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
  1183  					return
  1184  				}
  1185  				errCh <- fmt.Errorf("error decoding: %v", err)
  1186  			}
  1187  
  1188  			streamMsg <- &msg
  1189  		}
  1190  	}()
  1191  
  1192  	// Send the request
  1193  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
  1194  	require.Nil(encoder.Encode(req))
  1195  
  1196  	timeout := time.After(3 * time.Second)
  1197  	received := ""
  1198  OUTER:
  1199  	for {
  1200  		select {
  1201  		case <-timeout:
  1202  			t.Fatal("timeout")
  1203  		case err := <-errCh:
  1204  			t.Fatal(err)
  1205  		case msg := <-streamMsg:
  1206  			if msg.Error != nil {
  1207  				t.Fatalf("Got error: %v", msg.Error.Error())
  1208  			}
  1209  
  1210  			// Add the payload
  1211  			received += string(msg.Payload)
  1212  			if received == expected {
  1213  				break OUTER
  1214  			}
  1215  		}
  1216  	}
  1217  }
  1218  
  1219  func TestFS_Logs_Follow(t *testing.T) {
  1220  	t.Parallel()
  1221  	require := require.New(t)
  1222  
  1223  	// Start a server and client
  1224  	s := nomad.TestServer(t, nil)
  1225  	defer s.Shutdown()
  1226  	testutil.WaitForLeader(t, s.RPC)
  1227  
  1228  	c, cleanup := TestClient(t, func(c *config.Config) {
  1229  		c.Servers = []string{s.GetConfig().RPCAddr.String()}
  1230  	})
  1231  	defer cleanup()
  1232  
  1233  	expectedBase := "Hello from the other side\n"
  1234  	repeat := 10
  1235  
  1236  	job := mock.BatchJob()
  1237  	job.TaskGroups[0].Count = 1
  1238  	job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{
  1239  		"run_for":                "20s",
  1240  		"stdout_string":          expectedBase,
  1241  		"stdout_repeat":          repeat,
  1242  		"stdout_repeat_duration": "200ms",
  1243  	}
  1244  
  1245  	// Wait for client to be running job
  1246  	alloc := testutil.WaitForRunning(t, s.RPC, job)[0]
  1247  
  1248  	// Make the request
  1249  	req := &cstructs.FsLogsRequest{
  1250  		AllocID:      alloc.ID,
  1251  		Task:         job.TaskGroups[0].Tasks[0].Name,
  1252  		LogType:      "stdout",
  1253  		Origin:       "start",
  1254  		PlainText:    true,
  1255  		Follow:       true,
  1256  		QueryOptions: structs.QueryOptions{Region: "global"},
  1257  	}
  1258  
  1259  	// Get the handler
  1260  	handler, err := c.StreamingRpcHandler("FileSystem.Logs")
  1261  	require.NoError(err)
  1262  
  1263  	// Create a pipe
  1264  	p1, p2 := net.Pipe()
  1265  	defer p1.Close()
  1266  	defer p2.Close()
  1267  
  1268  	errCh := make(chan error)
  1269  	streamMsg := make(chan *cstructs.StreamErrWrapper)
  1270  
  1271  	// Start the handler
  1272  	go handler(p2)
  1273  
  1274  	// Start the decoder
  1275  	go func() {
  1276  		decoder := codec.NewDecoder(p1, structs.MsgpackHandle)
  1277  		for {
  1278  			var msg cstructs.StreamErrWrapper
  1279  			if err := decoder.Decode(&msg); err != nil {
  1280  				if err == io.EOF || strings.Contains(err.Error(), "closed") {
  1281  					return
  1282  				}
  1283  				errCh <- fmt.Errorf("error decoding: %v", err)
  1284  			}
  1285  
  1286  			streamMsg <- &msg
  1287  		}
  1288  	}()
  1289  
  1290  	// Send the request
  1291  	encoder := codec.NewEncoder(p1, structs.MsgpackHandle)
  1292  	require.Nil(encoder.Encode(req))
  1293  
  1294  	timeout := time.After(20 * time.Second)
  1295  	expected := strings.Repeat(expectedBase, repeat+1)
  1296  	received := ""
  1297  OUTER:
  1298  	for {
  1299  		select {
  1300  		case <-timeout:
  1301  			t.Fatal("timeout")
  1302  		case err := <-errCh:
  1303  			t.Fatal(err)
  1304  		case msg := <-streamMsg:
  1305  			if msg.Error != nil {
  1306  				t.Fatalf("Got error: %v", msg.Error.Error())
  1307  			}
  1308  
  1309  			// Add the payload
  1310  			received += string(msg.Payload)
  1311  			if received == expected {
  1312  				break OUTER
  1313  			}
  1314  		}
  1315  	}
  1316  }
  1317  
  1318  func TestFS_findClosest(t *testing.T) {
  1319  	task := "foo"
  1320  	entries := []*cstructs.AllocFileInfo{
  1321  		{
  1322  			Name: "foo.stdout.0",
  1323  			Size: 100,
  1324  		},
  1325  		{
  1326  			Name: "foo.stdout.1",
  1327  			Size: 100,
  1328  		},
  1329  		{
  1330  			Name: "foo.stdout.2",
  1331  			Size: 100,
  1332  		},
  1333  		{
  1334  			Name: "foo.stdout.3",
  1335  			Size: 100,
  1336  		},
  1337  		{
  1338  			Name: "foo.stderr.0",
  1339  			Size: 100,
  1340  		},
  1341  		{
  1342  			Name: "foo.stderr.1",
  1343  			Size: 100,
  1344  		},
  1345  		{
  1346  			Name: "foo.stderr.2",
  1347  			Size: 100,
  1348  		},
  1349  	}
  1350  
  1351  	cases := []struct {
  1352  		Entries        []*cstructs.AllocFileInfo
  1353  		DesiredIdx     int64
  1354  		DesiredOffset  int64
  1355  		Task           string
  1356  		LogType        string
  1357  		ExpectedFile   string
  1358  		ExpectedIdx    int64
  1359  		ExpectedOffset int64
  1360  		Error          bool
  1361  	}{
  1362  		// Test error cases
  1363  		{
  1364  			Entries:    nil,
  1365  			DesiredIdx: 0,
  1366  			Task:       task,
  1367  			LogType:    "stdout",
  1368  			Error:      true,
  1369  		},
  1370  		{
  1371  			Entries:    entries[0:3],
  1372  			DesiredIdx: 0,
  1373  			Task:       task,
  1374  			LogType:    "stderr",
  1375  			Error:      true,
  1376  		},
  1377  
  1378  		// Test beginning cases
  1379  		{
  1380  			Entries:      entries,
  1381  			DesiredIdx:   0,
  1382  			Task:         task,
  1383  			LogType:      "stdout",
  1384  			ExpectedFile: entries[0].Name,
  1385  			ExpectedIdx:  0,
  1386  		},
  1387  		{
  1388  			// Desired offset should be ignored at edges
  1389  			Entries:        entries,
  1390  			DesiredIdx:     0,
  1391  			DesiredOffset:  -100,
  1392  			Task:           task,
  1393  			LogType:        "stdout",
  1394  			ExpectedFile:   entries[0].Name,
  1395  			ExpectedIdx:    0,
  1396  			ExpectedOffset: 0,
  1397  		},
  1398  		{
  1399  			// Desired offset should be ignored at edges
  1400  			Entries:        entries,
  1401  			DesiredIdx:     1,
  1402  			DesiredOffset:  -1000,
  1403  			Task:           task,
  1404  			LogType:        "stdout",
  1405  			ExpectedFile:   entries[0].Name,
  1406  			ExpectedIdx:    0,
  1407  			ExpectedOffset: 0,
  1408  		},
  1409  		{
  1410  			Entries:      entries,
  1411  			DesiredIdx:   0,
  1412  			Task:         task,
  1413  			LogType:      "stderr",
  1414  			ExpectedFile: entries[4].Name,
  1415  			ExpectedIdx:  0,
  1416  		},
  1417  		{
  1418  			Entries:      entries,
  1419  			DesiredIdx:   0,
  1420  			Task:         task,
  1421  			LogType:      "stdout",
  1422  			ExpectedFile: entries[0].Name,
  1423  			ExpectedIdx:  0,
  1424  		},
  1425  
  1426  		// Test middle cases
  1427  		{
  1428  			Entries:      entries,
  1429  			DesiredIdx:   1,
  1430  			Task:         task,
  1431  			LogType:      "stdout",
  1432  			ExpectedFile: entries[1].Name,
  1433  			ExpectedIdx:  1,
  1434  		},
  1435  		{
  1436  			Entries:        entries,
  1437  			DesiredIdx:     1,
  1438  			DesiredOffset:  10,
  1439  			Task:           task,
  1440  			LogType:        "stdout",
  1441  			ExpectedFile:   entries[1].Name,
  1442  			ExpectedIdx:    1,
  1443  			ExpectedOffset: 10,
  1444  		},
  1445  		{
  1446  			Entries:        entries,
  1447  			DesiredIdx:     1,
  1448  			DesiredOffset:  110,
  1449  			Task:           task,
  1450  			LogType:        "stdout",
  1451  			ExpectedFile:   entries[2].Name,
  1452  			ExpectedIdx:    2,
  1453  			ExpectedOffset: 10,
  1454  		},
  1455  		{
  1456  			Entries:      entries,
  1457  			DesiredIdx:   1,
  1458  			Task:         task,
  1459  			LogType:      "stderr",
  1460  			ExpectedFile: entries[5].Name,
  1461  			ExpectedIdx:  1,
  1462  		},
  1463  		// Test end cases
  1464  		{
  1465  			Entries:      entries,
  1466  			DesiredIdx:   math.MaxInt64,
  1467  			Task:         task,
  1468  			LogType:      "stdout",
  1469  			ExpectedFile: entries[3].Name,
  1470  			ExpectedIdx:  3,
  1471  		},
  1472  		{
  1473  			Entries:        entries,
  1474  			DesiredIdx:     math.MaxInt64,
  1475  			DesiredOffset:  math.MaxInt64,
  1476  			Task:           task,
  1477  			LogType:        "stdout",
  1478  			ExpectedFile:   entries[3].Name,
  1479  			ExpectedIdx:    3,
  1480  			ExpectedOffset: 100,
  1481  		},
  1482  		{
  1483  			Entries:        entries,
  1484  			DesiredIdx:     math.MaxInt64,
  1485  			DesiredOffset:  -10,
  1486  			Task:           task,
  1487  			LogType:        "stdout",
  1488  			ExpectedFile:   entries[3].Name,
  1489  			ExpectedIdx:    3,
  1490  			ExpectedOffset: 90,
  1491  		},
  1492  		{
  1493  			Entries:      entries,
  1494  			DesiredIdx:   math.MaxInt64,
  1495  			Task:         task,
  1496  			LogType:      "stderr",
  1497  			ExpectedFile: entries[6].Name,
  1498  			ExpectedIdx:  2,
  1499  		},
  1500  	}
  1501  
  1502  	for i, c := range cases {
  1503  		entry, idx, offset, err := findClosest(c.Entries, c.DesiredIdx, c.DesiredOffset, c.Task, c.LogType)
  1504  		if err != nil {
  1505  			if !c.Error {
  1506  				t.Fatalf("case %d: Unexpected error: %v", i, err)
  1507  			}
  1508  			continue
  1509  		}
  1510  
  1511  		if entry.Name != c.ExpectedFile {
  1512  			t.Fatalf("case %d: Got file %q; want %q", i, entry.Name, c.ExpectedFile)
  1513  		}
  1514  		if idx != c.ExpectedIdx {
  1515  			t.Fatalf("case %d: Got index %d; want %d", i, idx, c.ExpectedIdx)
  1516  		}
  1517  		if offset != c.ExpectedOffset {
  1518  			t.Fatalf("case %d: Got offset %d; want %d", i, offset, c.ExpectedOffset)
  1519  		}
  1520  	}
  1521  }
  1522  
  1523  func TestFS_streamFile_NoFile(t *testing.T) {
  1524  	t.Parallel()
  1525  	require := require.New(t)
  1526  	c, cleanup := TestClient(t, nil)
  1527  	defer cleanup()
  1528  
  1529  	ad := tempAllocDir(t)
  1530  	defer os.RemoveAll(ad.AllocDir)
  1531  
  1532  	frames := make(chan *sframer.StreamFrame, 32)
  1533  	framer := sframer.NewStreamFramer(frames, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
  1534  	framer.Run()
  1535  	defer framer.Destroy()
  1536  
  1537  	err := c.endpoints.FileSystem.streamFile(
  1538  		context.Background(), 0, "foo", 0, ad, framer, nil)
  1539  	require.NotNil(err)
  1540  	if runtime.GOOS == "windows" {
  1541  		require.Contains(err.Error(), "cannot find the file")
  1542  	} else {
  1543  		require.Contains(err.Error(), "no such file")
  1544  	}
  1545  }
  1546  
  1547  func TestFS_streamFile_Modify(t *testing.T) {
  1548  	t.Parallel()
  1549  
  1550  	c, cleanup := TestClient(t, nil)
  1551  	defer cleanup()
  1552  
  1553  	// Get a temp alloc dir
  1554  	ad := tempAllocDir(t)
  1555  	defer os.RemoveAll(ad.AllocDir)
  1556  
  1557  	// Create a file in the temp dir
  1558  	streamFile := "stream_file"
  1559  	f, err := os.Create(filepath.Join(ad.AllocDir, streamFile))
  1560  	if err != nil {
  1561  		t.Fatalf("Failed to create file: %v", err)
  1562  	}
  1563  	defer f.Close()
  1564  
  1565  	data := []byte("helloworld")
  1566  
  1567  	// Start the reader
  1568  	resultCh := make(chan struct{})
  1569  	frames := make(chan *sframer.StreamFrame, 4)
  1570  	go func() {
  1571  		var collected []byte
  1572  		for {
  1573  			frame := <-frames
  1574  			if frame.IsHeartbeat() {
  1575  				continue
  1576  			}
  1577  
  1578  			collected = append(collected, frame.Data...)
  1579  			if reflect.DeepEqual(data, collected) {
  1580  				resultCh <- struct{}{}
  1581  				return
  1582  			}
  1583  		}
  1584  	}()
  1585  
  1586  	// Write a few bytes
  1587  	if _, err := f.Write(data[:3]); err != nil {
  1588  		t.Fatalf("write failed: %v", err)
  1589  	}
  1590  
  1591  	framer := sframer.NewStreamFramer(frames, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
  1592  	framer.Run()
  1593  	defer framer.Destroy()
  1594  
  1595  	// Start streaming
  1596  	go func() {
  1597  		if err := c.endpoints.FileSystem.streamFile(
  1598  			context.Background(), 0, streamFile, 0, ad, framer, nil); err != nil {
  1599  			t.Fatalf("stream() failed: %v", err)
  1600  		}
  1601  	}()
  1602  
  1603  	// Sleep a little before writing more. This lets us check if the watch
  1604  	// is working.
  1605  	time.Sleep(1 * time.Duration(testutil.TestMultiplier()) * time.Second)
  1606  	if _, err := f.Write(data[3:]); err != nil {
  1607  		t.Fatalf("write failed: %v", err)
  1608  	}
  1609  
  1610  	select {
  1611  	case <-resultCh:
  1612  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1613  		t.Fatalf("failed to send new data")
  1614  	}
  1615  }
  1616  
  1617  func TestFS_streamFile_Truncate(t *testing.T) {
  1618  	t.Parallel()
  1619  	c, cleanup := TestClient(t, nil)
  1620  	defer cleanup()
  1621  
  1622  	// Get a temp alloc dir
  1623  	ad := tempAllocDir(t)
  1624  	defer os.RemoveAll(ad.AllocDir)
  1625  
  1626  	// Create a file in the temp dir
  1627  	data := []byte("helloworld")
  1628  	streamFile := "stream_file"
  1629  	streamFilePath := filepath.Join(ad.AllocDir, streamFile)
  1630  	f, err := os.Create(streamFilePath)
  1631  	if err != nil {
  1632  		t.Fatalf("Failed to create file: %v", err)
  1633  	}
  1634  	defer f.Close()
  1635  
  1636  	// Start the reader
  1637  	truncateCh := make(chan struct{})
  1638  	truncateClosed := false
  1639  	dataPostTruncCh := make(chan struct{})
  1640  	frames := make(chan *sframer.StreamFrame, 4)
  1641  	go func() {
  1642  		var collected []byte
  1643  		for {
  1644  			frame := <-frames
  1645  			if frame.IsHeartbeat() {
  1646  				continue
  1647  			}
  1648  
  1649  			if frame.FileEvent == truncateEvent && !truncateClosed {
  1650  				close(truncateCh)
  1651  				truncateClosed = true
  1652  			}
  1653  
  1654  			collected = append(collected, frame.Data...)
  1655  			if reflect.DeepEqual(data, collected) {
  1656  				close(dataPostTruncCh)
  1657  				return
  1658  			}
  1659  		}
  1660  	}()
  1661  
  1662  	// Write a few bytes
  1663  	if _, err := f.Write(data[:3]); err != nil {
  1664  		t.Fatalf("write failed: %v", err)
  1665  	}
  1666  
  1667  	framer := sframer.NewStreamFramer(frames, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
  1668  	framer.Run()
  1669  	defer framer.Destroy()
  1670  
  1671  	// Start streaming
  1672  	go func() {
  1673  		if err := c.endpoints.FileSystem.streamFile(
  1674  			context.Background(), 0, streamFile, 0, ad, framer, nil); err != nil {
  1675  			t.Fatalf("stream() failed: %v", err)
  1676  		}
  1677  	}()
  1678  
  1679  	// Sleep a little before truncating. This lets us check if the watch
  1680  	// is working.
  1681  	time.Sleep(1 * time.Duration(testutil.TestMultiplier()) * time.Second)
  1682  	if err := f.Truncate(0); err != nil {
  1683  		t.Fatalf("truncate failed: %v", err)
  1684  	}
  1685  	if err := f.Sync(); err != nil {
  1686  		t.Fatalf("sync failed: %v", err)
  1687  	}
  1688  	if err := f.Close(); err != nil {
  1689  		t.Fatalf("failed to close file: %v", err)
  1690  	}
  1691  
  1692  	f2, err := os.OpenFile(streamFilePath, os.O_RDWR, 0)
  1693  	if err != nil {
  1694  		t.Fatalf("failed to reopen file: %v", err)
  1695  	}
  1696  	defer f2.Close()
  1697  	if _, err := f2.Write(data[3:5]); err != nil {
  1698  		t.Fatalf("write failed: %v", err)
  1699  	}
  1700  
  1701  	select {
  1702  	case <-truncateCh:
  1703  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1704  		t.Fatalf("did not receive truncate")
  1705  	}
  1706  
  1707  	// Sleep a little before writing more. This lets us check if the watch
  1708  	// is working.
  1709  	time.Sleep(1 * time.Duration(testutil.TestMultiplier()) * time.Second)
  1710  	if _, err := f2.Write(data[5:]); err != nil {
  1711  		t.Fatalf("write failed: %v", err)
  1712  	}
  1713  
  1714  	select {
  1715  	case <-dataPostTruncCh:
  1716  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1717  		t.Fatalf("did not receive post truncate data")
  1718  	}
  1719  }
  1720  
  1721  func TestFS_streamImpl_Delete(t *testing.T) {
  1722  	if runtime.GOOS == "windows" {
  1723  		t.Skip("Windows does not allow us to delete a file while it is open")
  1724  	}
  1725  	t.Parallel()
  1726  
  1727  	c, cleanup := TestClient(t, nil)
  1728  	defer cleanup()
  1729  
  1730  	// Get a temp alloc dir
  1731  	ad := tempAllocDir(t)
  1732  	defer os.RemoveAll(ad.AllocDir)
  1733  
  1734  	// Create a file in the temp dir
  1735  	data := []byte("helloworld")
  1736  	streamFile := "stream_file"
  1737  	streamFilePath := filepath.Join(ad.AllocDir, streamFile)
  1738  	f, err := os.Create(streamFilePath)
  1739  	if err != nil {
  1740  		t.Fatalf("Failed to create file: %v", err)
  1741  	}
  1742  	defer f.Close()
  1743  
  1744  	// Start the reader
  1745  	deleteCh := make(chan struct{})
  1746  	frames := make(chan *sframer.StreamFrame, 4)
  1747  	go func() {
  1748  		for {
  1749  			frame, ok := <-frames
  1750  			if !ok {
  1751  				return
  1752  			}
  1753  
  1754  			if frame.IsHeartbeat() {
  1755  				continue
  1756  			}
  1757  
  1758  			if frame.FileEvent == deleteEvent {
  1759  				close(deleteCh)
  1760  				return
  1761  			}
  1762  		}
  1763  	}()
  1764  
  1765  	// Write a few bytes
  1766  	if _, err := f.Write(data[:3]); err != nil {
  1767  		t.Fatalf("write failed: %v", err)
  1768  	}
  1769  
  1770  	framer := sframer.NewStreamFramer(frames, streamHeartbeatRate, streamBatchWindow, streamFrameSize)
  1771  	framer.Run()
  1772  	defer framer.Destroy()
  1773  
  1774  	// Start streaming
  1775  	go func() {
  1776  		if err := c.endpoints.FileSystem.streamFile(
  1777  			context.Background(), 0, streamFile, 0, ad, framer, nil); err != nil {
  1778  			t.Fatalf("stream() failed: %v", err)
  1779  		}
  1780  	}()
  1781  
  1782  	// Sleep a little before deleting. This lets us check if the watch
  1783  	// is working.
  1784  	time.Sleep(1 * time.Duration(testutil.TestMultiplier()) * time.Second)
  1785  	if err := os.Remove(streamFilePath); err != nil {
  1786  		t.Fatalf("delete failed: %v", err)
  1787  	}
  1788  
  1789  	select {
  1790  	case <-deleteCh:
  1791  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1792  		t.Fatalf("did not receive delete")
  1793  	}
  1794  }
  1795  
  1796  func TestFS_logsImpl_NoFollow(t *testing.T) {
  1797  	t.Parallel()
  1798  
  1799  	c, cleanup := TestClient(t, nil)
  1800  	defer cleanup()
  1801  
  1802  	// Get a temp alloc dir and create the log dir
  1803  	ad := tempAllocDir(t)
  1804  	defer os.RemoveAll(ad.AllocDir)
  1805  
  1806  	logDir := filepath.Join(ad.SharedDir, allocdir.LogDirName)
  1807  	if err := os.MkdirAll(logDir, 0777); err != nil {
  1808  		t.Fatalf("Failed to make log dir: %v", err)
  1809  	}
  1810  
  1811  	// Create a series of log files in the temp dir
  1812  	task := "foo"
  1813  	logType := "stdout"
  1814  	expected := []byte("012")
  1815  	for i := 0; i < 3; i++ {
  1816  		logFile := fmt.Sprintf("%s.%s.%d", task, logType, i)
  1817  		logFilePath := filepath.Join(logDir, logFile)
  1818  		err := ioutil.WriteFile(logFilePath, expected[i:i+1], 777)
  1819  		if err != nil {
  1820  			t.Fatalf("Failed to create file: %v", err)
  1821  		}
  1822  	}
  1823  
  1824  	// Start the reader
  1825  	resultCh := make(chan struct{})
  1826  	frames := make(chan *sframer.StreamFrame, 4)
  1827  	var received []byte
  1828  	go func() {
  1829  		for {
  1830  			frame, ok := <-frames
  1831  			if !ok {
  1832  				return
  1833  			}
  1834  
  1835  			if frame.IsHeartbeat() {
  1836  				continue
  1837  			}
  1838  
  1839  			received = append(received, frame.Data...)
  1840  			if reflect.DeepEqual(received, expected) {
  1841  				close(resultCh)
  1842  				return
  1843  			}
  1844  		}
  1845  	}()
  1846  
  1847  	// Start streaming logs
  1848  	go func() {
  1849  		if err := c.endpoints.FileSystem.logsImpl(
  1850  			context.Background(), false, false, 0,
  1851  			OriginStart, task, logType, ad, frames); err != nil {
  1852  			t.Fatalf("logs() failed: %v", err)
  1853  		}
  1854  	}()
  1855  
  1856  	select {
  1857  	case <-resultCh:
  1858  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1859  		t.Fatalf("did not receive data: got %q", string(received))
  1860  	}
  1861  }
  1862  
  1863  func TestFS_logsImpl_Follow(t *testing.T) {
  1864  	t.Parallel()
  1865  
  1866  	c, cleanup := TestClient(t, nil)
  1867  	defer cleanup()
  1868  
  1869  	// Get a temp alloc dir and create the log dir
  1870  	ad := tempAllocDir(t)
  1871  	defer os.RemoveAll(ad.AllocDir)
  1872  
  1873  	logDir := filepath.Join(ad.SharedDir, allocdir.LogDirName)
  1874  	if err := os.MkdirAll(logDir, 0777); err != nil {
  1875  		t.Fatalf("Failed to make log dir: %v", err)
  1876  	}
  1877  
  1878  	// Create a series of log files in the temp dir
  1879  	task := "foo"
  1880  	logType := "stdout"
  1881  	expected := []byte("012345")
  1882  	initialWrites := 3
  1883  
  1884  	writeToFile := func(index int, data []byte) {
  1885  		logFile := fmt.Sprintf("%s.%s.%d", task, logType, index)
  1886  		logFilePath := filepath.Join(logDir, logFile)
  1887  		err := ioutil.WriteFile(logFilePath, data, 777)
  1888  		if err != nil {
  1889  			t.Fatalf("Failed to create file: %v", err)
  1890  		}
  1891  	}
  1892  	for i := 0; i < initialWrites; i++ {
  1893  		writeToFile(i, expected[i:i+1])
  1894  	}
  1895  
  1896  	// Start the reader
  1897  	firstResultCh := make(chan struct{})
  1898  	fullResultCh := make(chan struct{})
  1899  	frames := make(chan *sframer.StreamFrame, 4)
  1900  	var received []byte
  1901  	go func() {
  1902  		for {
  1903  			frame, ok := <-frames
  1904  			if !ok {
  1905  				return
  1906  			}
  1907  
  1908  			if frame.IsHeartbeat() {
  1909  				continue
  1910  			}
  1911  
  1912  			received = append(received, frame.Data...)
  1913  			if reflect.DeepEqual(received, expected[:initialWrites]) {
  1914  				close(firstResultCh)
  1915  			} else if reflect.DeepEqual(received, expected) {
  1916  				close(fullResultCh)
  1917  				return
  1918  			}
  1919  		}
  1920  	}()
  1921  
  1922  	// Start streaming logs
  1923  	go c.endpoints.FileSystem.logsImpl(
  1924  		context.Background(), true, false, 0,
  1925  		OriginStart, task, logType, ad, frames)
  1926  
  1927  	select {
  1928  	case <-firstResultCh:
  1929  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1930  		t.Fatalf("did not receive data: got %q", string(received))
  1931  	}
  1932  
  1933  	// We got the first chunk of data, write out the rest to the next file
  1934  	// at an index much ahead to check that it is following and detecting
  1935  	// skips
  1936  	skipTo := initialWrites + 10
  1937  	writeToFile(skipTo, expected[initialWrites:])
  1938  
  1939  	select {
  1940  	case <-fullResultCh:
  1941  	case <-time.After(10 * time.Duration(testutil.TestMultiplier()) * streamBatchWindow):
  1942  		t.Fatalf("did not receive data: got %q", string(received))
  1943  	}
  1944  }