github.com/Jeffail/benthos/v3@v3.65.0/internal/codec/reader_test.go (about)

     1  package codec
     2  
     3  import (
     4  	"archive/tar"
     5  	"bytes"
     6  	"compress/gzip"
     7  	"context"
     8  	"errors"
     9  	"fmt"
    10  	"io"
    11  	"sync"
    12  	"testing"
    13  
    14  	"github.com/Jeffail/benthos/v3/lib/types"
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  )
    18  
    19  type noopCloser struct {
    20  	io.Reader
    21  	returnEOFOnRead bool
    22  }
    23  
    24  func (n noopCloser) Read(p []byte) (int, error) {
    25  	byteCount, err := n.Reader.Read(p)
    26  	if err != nil {
    27  		return byteCount, err
    28  	}
    29  
    30  	if n.returnEOFOnRead {
    31  		return byteCount, io.EOF
    32  	}
    33  
    34  	return byteCount, err
    35  }
    36  
    37  func (n noopCloser) Close() error {
    38  	return nil
    39  }
    40  
    41  type microReader struct {
    42  	io.Reader
    43  }
    44  
    45  func (n microReader) Read(p []byte) (int, error) {
    46  	// Only a max of 5 bytes at a time
    47  	if len(p) < 5 {
    48  		return n.Reader.Read(p)
    49  	}
    50  
    51  	micro := make([]byte, 5)
    52  	byteCount, err := n.Reader.Read(micro)
    53  	if err != nil {
    54  		return byteCount, err
    55  	}
    56  
    57  	_ = copy(p, micro)
    58  	return byteCount, nil
    59  }
    60  
    61  func testReaderSuite(t *testing.T, codec, path string, data []byte, expected ...string) {
    62  	t.Run("close before reading", func(t *testing.T) {
    63  		buf := noopCloser{bytes.NewReader(data), false}
    64  
    65  		ctor, err := GetReader(codec, NewReaderConfig())
    66  		require.NoError(t, err)
    67  
    68  		ack := errors.New("default err")
    69  
    70  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
    71  			ack = err
    72  			return nil
    73  		})
    74  		require.NoError(t, err)
    75  
    76  		assert.NoError(t, r.Close(context.Background()))
    77  		assert.EqualError(t, ack, "service shutting down")
    78  	})
    79  
    80  	t.Run("returns all data even if EOF is encountered during the last read", func(t *testing.T) {
    81  		buf := noopCloser{bytes.NewReader(data), false}
    82  
    83  		ctor, err := GetReader(codec, NewReaderConfig())
    84  		require.NoError(t, err)
    85  
    86  		ack := errors.New("default err")
    87  
    88  		r, err := ctor(path, &buf, func(ctx context.Context, err error) error {
    89  			ack = err
    90  			return nil
    91  		})
    92  		require.NoError(t, err)
    93  
    94  		allReads := map[string][]byte{}
    95  
    96  		for i, exp := range expected {
    97  			if i == len(expected)-1 {
    98  				buf.returnEOFOnRead = true
    99  			}
   100  			p, ackFn, err := r.Next(context.Background())
   101  			require.NoError(t, err)
   102  			require.NoError(t, ackFn(context.Background(), nil))
   103  			require.Len(t, p, 1)
   104  			assert.Equal(t, exp, string(p[0].Get()))
   105  			allReads[string(p[0].Get())] = p[0].Get()
   106  		}
   107  
   108  		_, _, err = r.Next(context.Background())
   109  		assert.EqualError(t, err, "EOF")
   110  
   111  		assert.NoError(t, r.Close(context.Background()))
   112  		assert.NoError(t, ack)
   113  
   114  		for k, v := range allReads {
   115  			assert.Equal(t, k, string(v), "Must not corrupt previous reads")
   116  		}
   117  	})
   118  
   119  	t.Run("can consume micro flushes", func(t *testing.T) {
   120  		buf := noopCloser{microReader{bytes.NewReader(data)}, false}
   121  
   122  		ctor, err := GetReader(codec, NewReaderConfig())
   123  		require.NoError(t, err)
   124  
   125  		ack := errors.New("default err")
   126  
   127  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   128  			ack = err
   129  			return nil
   130  		})
   131  		require.NoError(t, err)
   132  
   133  		allReads := map[string][]byte{}
   134  
   135  		for _, exp := range expected {
   136  			p, ackFn, err := r.Next(context.Background())
   137  			require.NoError(t, err)
   138  			require.NoError(t, ackFn(context.Background(), nil))
   139  			require.Len(t, p, 1)
   140  			assert.Equal(t, exp, string(p[0].Get()))
   141  			allReads[string(p[0].Get())] = p[0].Get()
   142  		}
   143  
   144  		_, _, err = r.Next(context.Background())
   145  		assert.EqualError(t, err, "EOF")
   146  
   147  		assert.NoError(t, r.Close(context.Background()))
   148  		assert.NoError(t, ack)
   149  
   150  		for k, v := range allReads {
   151  			assert.Equal(t, k, string(v), "Must not corrupt previous reads")
   152  		}
   153  	})
   154  
   155  	t.Run("acks ordered reads", func(t *testing.T) {
   156  		buf := noopCloser{bytes.NewReader(data), false}
   157  
   158  		ctor, err := GetReader(codec, NewReaderConfig())
   159  		require.NoError(t, err)
   160  
   161  		ack := errors.New("default err")
   162  
   163  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   164  			ack = err
   165  			return nil
   166  		})
   167  		require.NoError(t, err)
   168  
   169  		allReads := map[string][]byte{}
   170  
   171  		for _, exp := range expected {
   172  			p, ackFn, err := r.Next(context.Background())
   173  			require.NoError(t, err)
   174  			require.NoError(t, ackFn(context.Background(), nil))
   175  			require.Len(t, p, 1)
   176  			assert.Equal(t, exp, string(p[0].Get()))
   177  			allReads[string(p[0].Get())] = p[0].Get()
   178  		}
   179  
   180  		_, _, err = r.Next(context.Background())
   181  		assert.EqualError(t, err, "EOF")
   182  
   183  		assert.NoError(t, r.Close(context.Background()))
   184  		assert.NoError(t, ack)
   185  
   186  		for k, v := range allReads {
   187  			assert.Equal(t, k, string(v), "Must not corrupt previous reads")
   188  		}
   189  	})
   190  
   191  	t.Run("acks unordered reads", func(t *testing.T) {
   192  		buf := noopCloser{bytes.NewReader(data), false}
   193  
   194  		ctor, err := GetReader(codec, NewReaderConfig())
   195  		require.NoError(t, err)
   196  
   197  		ack := errors.New("default err")
   198  
   199  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   200  			ack = err
   201  			return nil
   202  		})
   203  		require.NoError(t, err)
   204  
   205  		allReads := map[string][]byte{}
   206  
   207  		var ackFns []ReaderAckFn
   208  		for _, exp := range expected {
   209  			p, ackFn, err := r.Next(context.Background())
   210  			require.NoError(t, err)
   211  			require.Len(t, p, 1)
   212  			ackFns = append(ackFns, ackFn)
   213  			assert.Equal(t, exp, string(p[0].Get()))
   214  			allReads[string(p[0].Get())] = p[0].Get()
   215  		}
   216  
   217  		_, _, err = r.Next(context.Background())
   218  		assert.EqualError(t, err, "EOF")
   219  		assert.NoError(t, r.Close(context.Background()))
   220  
   221  		for _, ackFn := range ackFns {
   222  			require.NoError(t, ackFn(context.Background(), nil))
   223  		}
   224  
   225  		assert.NoError(t, ack)
   226  
   227  		for k, v := range allReads {
   228  			assert.Equal(t, k, string(v), "Must not corrupt previous reads")
   229  		}
   230  	})
   231  
   232  	t.Run("acks parallel reads", func(t *testing.T) {
   233  		buf := noopCloser{bytes.NewReader(data), false}
   234  
   235  		ctor, err := GetReader(codec, NewReaderConfig())
   236  		require.NoError(t, err)
   237  
   238  		ack := errors.New("default err")
   239  
   240  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   241  			ack = err
   242  			return nil
   243  		})
   244  		require.NoError(t, err)
   245  
   246  		allReads := map[string][]byte{}
   247  
   248  		wg := sync.WaitGroup{}
   249  		wg.Add(len(expected))
   250  
   251  		for _, exp := range expected {
   252  			exp := exp
   253  			p, ackFn, err := r.Next(context.Background())
   254  			require.NoError(t, err)
   255  			require.Len(t, p, 1)
   256  			assert.Equal(t, exp, string(p[0].Get()))
   257  			allReads[string(p[0].Get())] = p[0].Get()
   258  
   259  			go func() {
   260  				defer wg.Done()
   261  				require.NoError(t, ackFn(context.Background(), nil))
   262  			}()
   263  		}
   264  
   265  		_, _, err = r.Next(context.Background())
   266  		assert.EqualError(t, err, "EOF")
   267  
   268  		wg.Wait()
   269  		assert.NoError(t, r.Close(context.Background()))
   270  
   271  		assert.NoError(t, ack)
   272  
   273  		for k, v := range allReads {
   274  			assert.Equal(t, k, string(v), "Must not corrupt previous reads")
   275  		}
   276  	})
   277  
   278  	if len(expected) > 0 {
   279  		t.Run("nacks unordered reads", func(t *testing.T) {
   280  			buf := noopCloser{bytes.NewReader(data), false}
   281  
   282  			ctor, err := GetReader(codec, NewReaderConfig())
   283  			require.NoError(t, err)
   284  
   285  			ack := errors.New("default err")
   286  			exp := errors.New("real err")
   287  
   288  			r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   289  				ack = err
   290  				return nil
   291  			})
   292  			require.NoError(t, err)
   293  
   294  			allReads := map[string][]byte{}
   295  
   296  			var ackFns []ReaderAckFn
   297  			for _, exp := range expected {
   298  				p, ackFn, err := r.Next(context.Background())
   299  				require.NoError(t, err)
   300  				require.Len(t, p, 1)
   301  				ackFns = append(ackFns, ackFn)
   302  				assert.Equal(t, exp, string(p[0].Get()))
   303  				allReads[string(p[0].Get())] = p[0].Get()
   304  			}
   305  
   306  			_, _, err = r.Next(context.Background())
   307  			assert.EqualError(t, err, "EOF")
   308  			assert.NoError(t, r.Close(context.Background()))
   309  
   310  			for i, ackFn := range ackFns {
   311  				if i == 0 {
   312  					require.NoError(t, ackFn(context.Background(), exp))
   313  				} else {
   314  					require.NoError(t, ackFn(context.Background(), nil))
   315  				}
   316  			}
   317  
   318  			assert.EqualError(t, ack, exp.Error())
   319  
   320  			for k, v := range allReads {
   321  				assert.Equal(t, k, string(v), "Must not corrupt previous reads")
   322  			}
   323  		})
   324  	}
   325  }
   326  
   327  func TestLinesReader(t *testing.T) {
   328  	data := []byte("foo\nbar\nbaz")
   329  	testReaderSuite(t, "lines", "", data, "foo", "bar", "baz")
   330  
   331  	data = []byte("")
   332  	testReaderSuite(t, "lines", "", data)
   333  }
   334  
   335  func TestCSVReader(t *testing.T) {
   336  	data := []byte("col1,col2,col3\nfoo1,bar1,baz1\nfoo2,bar2,baz2\nfoo3,bar3,baz3")
   337  	testReaderSuite(
   338  		t, "csv", "", data,
   339  		`{"col1":"foo1","col2":"bar1","col3":"baz1"}`,
   340  		`{"col1":"foo2","col2":"bar2","col3":"baz2"}`,
   341  		`{"col1":"foo3","col2":"bar3","col3":"baz3"}`,
   342  	)
   343  
   344  	data = []byte("col1,col2,col3")
   345  	testReaderSuite(t, "csv", "", data)
   346  }
   347  
   348  func TestPSVReader(t *testing.T) {
   349  	data := []byte("col1|col2|col3\nfoo1|bar1|baz1\nfoo2|bar2|baz2\nfoo3|bar3|baz3")
   350  	testReaderSuite(
   351  		t, "csv:|", "", data,
   352  		`{"col1":"foo1","col2":"bar1","col3":"baz1"}`,
   353  		`{"col1":"foo2","col2":"bar2","col3":"baz2"}`,
   354  		`{"col1":"foo3","col2":"bar3","col3":"baz3"}`,
   355  	)
   356  
   357  	data = []byte("col1|col2|col3")
   358  	testReaderSuite(t, "csv:|", "", data)
   359  }
   360  
   361  func TestAutoReader(t *testing.T) {
   362  	data := []byte("col1,col2,col3\nfoo1,bar1,baz1\nfoo2,bar2,baz2\nfoo3,bar3,baz3")
   363  	testReaderSuite(
   364  		t, "auto", "foo.csv", data,
   365  		`{"col1":"foo1","col2":"bar1","col3":"baz1"}`,
   366  		`{"col1":"foo2","col2":"bar2","col3":"baz2"}`,
   367  		`{"col1":"foo3","col2":"bar3","col3":"baz3"}`,
   368  	)
   369  
   370  	data = []byte("col1,col2,col3")
   371  	testReaderSuite(t, "auto", "foo.csv", data)
   372  }
   373  
   374  func TestCSVGzipReader(t *testing.T) {
   375  	var gzipBuf bytes.Buffer
   376  	zw := gzip.NewWriter(&gzipBuf)
   377  	zw.Write([]byte("col1,col2,col3\nfoo1,bar1,baz1\nfoo2,bar2,baz2\nfoo3,bar3,baz3"))
   378  	zw.Close()
   379  
   380  	testReaderSuite(
   381  		t, "gzip/csv", "", gzipBuf.Bytes(),
   382  		`{"col1":"foo1","col2":"bar1","col3":"baz1"}`,
   383  		`{"col1":"foo2","col2":"bar2","col3":"baz2"}`,
   384  		`{"col1":"foo3","col2":"bar3","col3":"baz3"}`,
   385  	)
   386  }
   387  
   388  func TestCSVGzipReaderOld(t *testing.T) {
   389  	var gzipBuf bytes.Buffer
   390  	zw := gzip.NewWriter(&gzipBuf)
   391  	zw.Write([]byte("col1,col2,col3\nfoo1,bar1,baz1\nfoo2,bar2,baz2\nfoo3,bar3,baz3"))
   392  	zw.Close()
   393  
   394  	testReaderSuite(
   395  		t, "csv-gzip", "", gzipBuf.Bytes(),
   396  		`{"col1":"foo1","col2":"bar1","col3":"baz1"}`,
   397  		`{"col1":"foo2","col2":"bar2","col3":"baz2"}`,
   398  		`{"col1":"foo3","col2":"bar3","col3":"baz3"}`,
   399  	)
   400  }
   401  
   402  func TestAllBytesReader(t *testing.T) {
   403  	data := []byte("foo\nbar\nbaz")
   404  	testReaderSuite(t, "all-bytes", "", data, "foo\nbar\nbaz")
   405  }
   406  
   407  func TestDelimReader(t *testing.T) {
   408  	data := []byte("fooXbarXbaz")
   409  	testReaderSuite(t, "delim:X", "", data, "foo", "bar", "baz")
   410  
   411  	data = []byte("")
   412  	testReaderSuite(t, "delim:X", "", data)
   413  }
   414  
   415  func TestChunkerReader(t *testing.T) {
   416  	t.Run("with exact chunks", func(t *testing.T) {
   417  		data := []byte("foobarbaz")
   418  		testReaderSuite(t, "chunker:3", "", data, "foo", "bar", "baz")
   419  	})
   420  
   421  	t.Run("with remainder", func(t *testing.T) {
   422  		data := []byte("fooxbarybaz")
   423  		testReaderSuite(t, "chunker:3", "", data, "foo", "xba", "ryb", "az")
   424  	})
   425  
   426  	t.Run("tiny chunks", func(t *testing.T) {
   427  		data := []byte("")
   428  		testReaderSuite(t, "chunker:1", "", data)
   429  	})
   430  
   431  	t.Run("larger chunks", func(t *testing.T) {
   432  		data := []byte("hell1worldhell2worldhell3worldhell4worldhell5worldhell6world")
   433  		testReaderSuite(
   434  			t, "chunker:10", "", data,
   435  			"hell1world", "hell2world", "hell3world",
   436  			"hell4world", "hell5world", "hell6world",
   437  		)
   438  	})
   439  }
   440  
   441  func TestTarReader(t *testing.T) {
   442  	input := []string{
   443  		"first document",
   444  		"second document",
   445  		"third document",
   446  	}
   447  
   448  	var tarBuf bytes.Buffer
   449  	tw := tar.NewWriter(&tarBuf)
   450  	for i := range input {
   451  		hdr := &tar.Header{
   452  			Name: fmt.Sprintf("testfile%v", i),
   453  			Mode: 0o600,
   454  			Size: int64(len(input[i])),
   455  		}
   456  
   457  		err := tw.WriteHeader(hdr)
   458  		require.NoError(t, err)
   459  
   460  		_, err = tw.Write([]byte(input[i]))
   461  		require.NoError(t, err)
   462  	}
   463  	require.NoError(t, tw.Close())
   464  
   465  	testReaderSuite(t, "tar", "", tarBuf.Bytes(), input...)
   466  	testReaderSuite(t, "auto", "foo.tar", tarBuf.Bytes(), input...)
   467  }
   468  
   469  func TestTarGzipReader(t *testing.T) {
   470  	input := []string{
   471  		"first document",
   472  		"second document",
   473  		"third document",
   474  	}
   475  
   476  	var gzipBuf bytes.Buffer
   477  
   478  	zw := gzip.NewWriter(&gzipBuf)
   479  	tw := tar.NewWriter(zw)
   480  	for i := range input {
   481  		hdr := &tar.Header{
   482  			Name: fmt.Sprintf("testfile%v", i),
   483  			Mode: 0o600,
   484  			Size: int64(len(input[i])),
   485  		}
   486  
   487  		err := tw.WriteHeader(hdr)
   488  		require.NoError(t, err)
   489  
   490  		_, err = tw.Write([]byte(input[i]))
   491  		require.NoError(t, err)
   492  	}
   493  	require.NoError(t, tw.Close())
   494  	require.NoError(t, zw.Close())
   495  
   496  	testReaderSuite(t, "gzip/tar", "", gzipBuf.Bytes(), input...)
   497  	testReaderSuite(t, "auto", "foo.tar.gz", gzipBuf.Bytes(), input...)
   498  	testReaderSuite(t, "auto", "foo.tar.gzip", gzipBuf.Bytes(), input...)
   499  	testReaderSuite(t, "auto", "foo.tgz", gzipBuf.Bytes(), input...)
   500  }
   501  
   502  func TestTarGzipReaderOld(t *testing.T) {
   503  	input := []string{
   504  		"first document",
   505  		"second document",
   506  		"third document",
   507  	}
   508  
   509  	var gzipBuf bytes.Buffer
   510  
   511  	zw := gzip.NewWriter(&gzipBuf)
   512  	tw := tar.NewWriter(zw)
   513  	for i := range input {
   514  		hdr := &tar.Header{
   515  			Name: fmt.Sprintf("testfile%v", i),
   516  			Mode: 0o600,
   517  			Size: int64(len(input[i])),
   518  		}
   519  
   520  		err := tw.WriteHeader(hdr)
   521  		require.NoError(t, err)
   522  
   523  		_, err = tw.Write([]byte(input[i]))
   524  		require.NoError(t, err)
   525  	}
   526  	require.NoError(t, tw.Close())
   527  	require.NoError(t, zw.Close())
   528  
   529  	testReaderSuite(t, "tar-gzip", "", gzipBuf.Bytes(), input...)
   530  	testReaderSuite(t, "auto", "foo.tar.gz", gzipBuf.Bytes(), input...)
   531  	testReaderSuite(t, "auto", "foo.tar.gzip", gzipBuf.Bytes(), input...)
   532  	testReaderSuite(t, "auto", "foo.tgz", gzipBuf.Bytes(), input...)
   533  }
   534  
   535  func strsFromParts(ps []types.Part) []string {
   536  	var strs []string
   537  	for _, part := range ps {
   538  		strs = append(strs, string(part.Get()))
   539  	}
   540  	return strs
   541  }
   542  
   543  func testMultipartReaderSuite(t *testing.T, codec, path string, data []byte, expected ...[]string) {
   544  	t.Run("close before reading", func(t *testing.T) {
   545  		buf := noopCloser{bytes.NewReader(data), false}
   546  
   547  		ctor, err := GetReader(codec, NewReaderConfig())
   548  		require.NoError(t, err)
   549  
   550  		ack := errors.New("default err")
   551  
   552  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   553  			ack = err
   554  			return nil
   555  		})
   556  		require.NoError(t, err)
   557  
   558  		assert.NoError(t, r.Close(context.Background()))
   559  		assert.EqualError(t, ack, "service shutting down")
   560  	})
   561  
   562  	t.Run("returns all data even if EOF is encountered during the last read", func(t *testing.T) {
   563  		buf := noopCloser{bytes.NewReader(data), false}
   564  
   565  		ctor, err := GetReader(codec, NewReaderConfig())
   566  		require.NoError(t, err)
   567  
   568  		ack := errors.New("default err")
   569  
   570  		r, err := ctor(path, &buf, func(ctx context.Context, err error) error {
   571  			ack = err
   572  			return nil
   573  		})
   574  		require.NoError(t, err)
   575  
   576  		for i, exp := range expected {
   577  			if i == len(expected)-1 {
   578  				buf.returnEOFOnRead = true
   579  			}
   580  			p, ackFn, err := r.Next(context.Background())
   581  			require.NoError(t, err)
   582  			require.NoError(t, ackFn(context.Background(), nil))
   583  			require.Len(t, p, len(exp))
   584  			assert.Equal(t, exp, strsFromParts(p))
   585  		}
   586  
   587  		_, _, err = r.Next(context.Background())
   588  		assert.EqualError(t, err, "EOF")
   589  
   590  		assert.NoError(t, r.Close(context.Background()))
   591  		assert.NoError(t, ack)
   592  	})
   593  
   594  	t.Run("acks ordered reads", func(t *testing.T) {
   595  		buf := noopCloser{bytes.NewReader(data), false}
   596  
   597  		ctor, err := GetReader(codec, NewReaderConfig())
   598  		require.NoError(t, err)
   599  
   600  		ack := errors.New("default err")
   601  
   602  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   603  			ack = err
   604  			return nil
   605  		})
   606  		require.NoError(t, err)
   607  
   608  		for _, exp := range expected {
   609  			p, ackFn, err := r.Next(context.Background())
   610  			require.NoError(t, err)
   611  			require.NoError(t, ackFn(context.Background(), nil))
   612  			require.Len(t, p, len(exp))
   613  			assert.Equal(t, exp, strsFromParts(p))
   614  		}
   615  
   616  		_, _, err = r.Next(context.Background())
   617  		assert.EqualError(t, err, "EOF")
   618  
   619  		assert.NoError(t, r.Close(context.Background()))
   620  		assert.NoError(t, ack)
   621  	})
   622  
   623  	t.Run("acks unordered reads", func(t *testing.T) {
   624  		buf := noopCloser{bytes.NewReader(data), false}
   625  
   626  		ctor, err := GetReader(codec, NewReaderConfig())
   627  		require.NoError(t, err)
   628  
   629  		ack := errors.New("default err")
   630  
   631  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   632  			ack = err
   633  			return nil
   634  		})
   635  		require.NoError(t, err)
   636  
   637  		var ackFns []ReaderAckFn
   638  		for _, exp := range expected {
   639  			p, ackFn, err := r.Next(context.Background())
   640  			require.NoError(t, err)
   641  			require.Len(t, p, len(exp))
   642  			ackFns = append(ackFns, ackFn)
   643  			assert.Equal(t, exp, strsFromParts(p))
   644  		}
   645  
   646  		_, _, err = r.Next(context.Background())
   647  		assert.EqualError(t, err, "EOF")
   648  		assert.NoError(t, r.Close(context.Background()))
   649  
   650  		for _, ackFn := range ackFns {
   651  			require.NoError(t, ackFn(context.Background(), nil))
   652  		}
   653  
   654  		assert.NoError(t, ack)
   655  	})
   656  
   657  	t.Run("acks parallel reads", func(t *testing.T) {
   658  		buf := noopCloser{bytes.NewReader(data), false}
   659  
   660  		ctor, err := GetReader(codec, NewReaderConfig())
   661  		require.NoError(t, err)
   662  
   663  		ack := errors.New("default err")
   664  
   665  		r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   666  			ack = err
   667  			return nil
   668  		})
   669  		require.NoError(t, err)
   670  
   671  		wg := sync.WaitGroup{}
   672  		wg.Add(len(expected))
   673  
   674  		for _, exp := range expected {
   675  			exp := exp
   676  			p, ackFn, err := r.Next(context.Background())
   677  			require.NoError(t, err)
   678  			require.Len(t, p, len(exp))
   679  			assert.Equal(t, exp, strsFromParts(p))
   680  
   681  			go func() {
   682  				defer wg.Done()
   683  				require.NoError(t, ackFn(context.Background(), nil))
   684  			}()
   685  		}
   686  
   687  		_, _, err = r.Next(context.Background())
   688  		assert.EqualError(t, err, "EOF")
   689  
   690  		wg.Wait()
   691  		assert.NoError(t, r.Close(context.Background()))
   692  
   693  		assert.NoError(t, ack)
   694  	})
   695  
   696  	if len(expected) > 0 {
   697  		t.Run("nacks unordered reads", func(t *testing.T) {
   698  			buf := noopCloser{bytes.NewReader(data), false}
   699  
   700  			ctor, err := GetReader(codec, NewReaderConfig())
   701  			require.NoError(t, err)
   702  
   703  			ack := errors.New("default err")
   704  			exp := errors.New("real err")
   705  
   706  			r, err := ctor(path, buf, func(ctx context.Context, err error) error {
   707  				ack = err
   708  				return nil
   709  			})
   710  			require.NoError(t, err)
   711  
   712  			var ackFns []ReaderAckFn
   713  			for _, exp := range expected {
   714  				p, ackFn, err := r.Next(context.Background())
   715  				require.NoError(t, err)
   716  				require.Len(t, p, len(exp))
   717  				ackFns = append(ackFns, ackFn)
   718  				assert.Equal(t, exp, strsFromParts(p))
   719  			}
   720  
   721  			_, _, err = r.Next(context.Background())
   722  			assert.EqualError(t, err, "EOF")
   723  			assert.NoError(t, r.Close(context.Background()))
   724  
   725  			for i, ackFn := range ackFns {
   726  				if i == 0 {
   727  					require.NoError(t, ackFn(context.Background(), exp))
   728  				} else {
   729  					require.NoError(t, ackFn(context.Background(), nil))
   730  				}
   731  			}
   732  
   733  			assert.EqualError(t, ack, exp.Error())
   734  		})
   735  	}
   736  }
   737  
   738  func TestMultipartLinesReader(t *testing.T) {
   739  	data := []byte("foo\nbar\nbaz\n\nbuz\nqux\nquz\n")
   740  	testMultipartReaderSuite(t, "lines/multipart", "", data, []string{"foo", "bar", "baz"}, []string{"buz", "qux", "quz"})
   741  
   742  	data = []byte("")
   743  	testReaderSuite(t, "lines/multipart", "", data)
   744  }
   745  
   746  func TestRegexpSplitReader(t *testing.T) {
   747  	data := []byte("foo\nbar\nbaz")
   748  	testReaderSuite(t, "regex:(?m)^", "", data, "foo\n", "bar\n", "baz")
   749  
   750  	data = []byte("foo\nbar\nsplit\nbaz\nsplitsplit")
   751  	testReaderSuite(t, "regex:split", "", data, "foo\nbar\n", "split\nbaz\n", "split", "split")
   752  
   753  	data = []byte("split")
   754  	testReaderSuite(t, "regex:\\n", "", data, "split")
   755  	testReaderSuite(t, "regex:split", "", data, "split")
   756  
   757  	data = []byte("foo\nbar\nsplit\nbaz\nsplitsplit")
   758  	testReaderSuite(t, "regex:\\n", "", data, "foo", "\nbar", "\nsplit", "\nbaz", "\nsplitsplit")
   759  
   760  	data = []byte("foo\nbar\nsplit\nbaz")
   761  	testReaderSuite(t, "regex:\\n", "", data, "foo", "\nbar", "\nsplit", "\nbaz")
   762  
   763  	data = []byte("20:20:22 ERROR\nCode\n20:20:21 INFO\n20:20:21 INFO\n20:20:22 ERROR\nCode\n")
   764  	testReaderSuite(t, "regex:\\n\\d", "", data, "20:20:22 ERROR\nCode", "\n20:20:21 INFO", "\n20:20:21 INFO", "\n20:20:22 ERROR\nCode\n")
   765  
   766  	data = []byte("20:20:22 ERROR\nCode\n20:20:21 INFO\n20:20:21 INFO\n20:20\n20:20:22 ERROR\nCode\n2022")
   767  	testReaderSuite(t, "regex:(?m)^\\d\\d:\\d\\d:\\d\\d", "", data, "20:20:22 ERROR\nCode\n", "20:20:21 INFO\n", "20:20:21 INFO\n20:20\n", "20:20:22 ERROR\nCode\n2022")
   768  
   769  	data = []byte("")
   770  	testReaderSuite(t, "regex:split", "", data)
   771  }