github.com/ncw/rclone@v1.48.1-0.20190724201158-a35aa1360e3e/fs/asyncreader/asyncreader_test.go (about)

     1  package asyncreader
     2  
     3  import (
     4  	"bufio"
     5  	"bytes"
     6  	"fmt"
     7  	"io"
     8  	"io/ioutil"
     9  	"math/rand"
    10  	"strings"
    11  	"sync"
    12  	"testing"
    13  	"testing/iotest"
    14  	"time"
    15  
    16  	"github.com/ncw/rclone/lib/israce"
    17  	"github.com/ncw/rclone/lib/readers"
    18  	"github.com/stretchr/testify/assert"
    19  	"github.com/stretchr/testify/require"
    20  )
    21  
    22  func TestAsyncReader(t *testing.T) {
    23  	buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
    24  	ar, err := New(buf, 4)
    25  	require.NoError(t, err)
    26  
    27  	var dst = make([]byte, 100)
    28  	n, err := ar.Read(dst)
    29  	assert.Equal(t, io.EOF, err)
    30  	assert.Equal(t, 10, n)
    31  
    32  	n, err = ar.Read(dst)
    33  	assert.Equal(t, io.EOF, err)
    34  	assert.Equal(t, 0, n)
    35  
    36  	// Test read after error
    37  	n, err = ar.Read(dst)
    38  	assert.Equal(t, io.EOF, err)
    39  	assert.Equal(t, 0, n)
    40  
    41  	err = ar.Close()
    42  	require.NoError(t, err)
    43  	// Test double close
    44  	err = ar.Close()
    45  	require.NoError(t, err)
    46  
    47  	// Test Close without reading everything
    48  	buf = ioutil.NopCloser(bytes.NewBuffer(make([]byte, 50000)))
    49  	ar, err = New(buf, 4)
    50  	require.NoError(t, err)
    51  	err = ar.Close()
    52  	require.NoError(t, err)
    53  
    54  }
    55  
    56  func TestAsyncWriteTo(t *testing.T) {
    57  	buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
    58  	ar, err := New(buf, 4)
    59  	require.NoError(t, err)
    60  
    61  	var dst = &bytes.Buffer{}
    62  	n, err := io.Copy(dst, ar)
    63  	assert.Equal(t, io.EOF, err)
    64  	assert.Equal(t, int64(10), n)
    65  
    66  	// Should still return EOF
    67  	n, err = io.Copy(dst, ar)
    68  	assert.Equal(t, io.EOF, err)
    69  	assert.Equal(t, int64(0), n)
    70  
    71  	err = ar.Close()
    72  	require.NoError(t, err)
    73  }
    74  
    75  func TestAsyncReaderErrors(t *testing.T) {
    76  	// test nil reader
    77  	_, err := New(nil, 4)
    78  	require.Error(t, err)
    79  
    80  	// invalid buffer number
    81  	buf := ioutil.NopCloser(bytes.NewBufferString("Testbuffer"))
    82  	_, err = New(buf, 0)
    83  	require.Error(t, err)
    84  	_, err = New(buf, -1)
    85  	require.Error(t, err)
    86  }
    87  
    88  // Complex read tests, leveraged from "bufio".
    89  
    90  type readMaker struct {
    91  	name string
    92  	fn   func(io.Reader) io.Reader
    93  }
    94  
    95  var readMakers = []readMaker{
    96  	{"full", func(r io.Reader) io.Reader { return r }},
    97  	{"byte", iotest.OneByteReader},
    98  	{"half", iotest.HalfReader},
    99  	{"data+err", iotest.DataErrReader},
   100  	{"timeout", iotest.TimeoutReader},
   101  }
   102  
   103  // Call Read to accumulate the text of a file
   104  func reads(buf io.Reader, m int) string {
   105  	var b [1000]byte
   106  	nb := 0
   107  	for {
   108  		n, err := buf.Read(b[nb : nb+m])
   109  		nb += n
   110  		if err == io.EOF {
   111  			break
   112  		} else if err != nil && err != iotest.ErrTimeout {
   113  			panic("Data: " + err.Error())
   114  		} else if err != nil {
   115  			break
   116  		}
   117  	}
   118  	return string(b[0:nb])
   119  }
   120  
   121  type bufReader struct {
   122  	name string
   123  	fn   func(io.Reader) string
   124  }
   125  
   126  var bufreaders = []bufReader{
   127  	{"1", func(b io.Reader) string { return reads(b, 1) }},
   128  	{"2", func(b io.Reader) string { return reads(b, 2) }},
   129  	{"3", func(b io.Reader) string { return reads(b, 3) }},
   130  	{"4", func(b io.Reader) string { return reads(b, 4) }},
   131  	{"5", func(b io.Reader) string { return reads(b, 5) }},
   132  	{"7", func(b io.Reader) string { return reads(b, 7) }},
   133  }
   134  
   135  const minReadBufferSize = 16
   136  
   137  var bufsizes = []int{
   138  	0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096,
   139  }
   140  
   141  // Test various  input buffer sizes, number of buffers and read sizes.
   142  func TestAsyncReaderSizes(t *testing.T) {
   143  	var texts [31]string
   144  	str := ""
   145  	all := ""
   146  	for i := 0; i < len(texts)-1; i++ {
   147  		texts[i] = str + "\n"
   148  		all += texts[i]
   149  		str += string(i%26 + 'a')
   150  	}
   151  	texts[len(texts)-1] = all
   152  
   153  	for h := 0; h < len(texts); h++ {
   154  		text := texts[h]
   155  		for i := 0; i < len(readMakers); i++ {
   156  			for j := 0; j < len(bufreaders); j++ {
   157  				for k := 0; k < len(bufsizes); k++ {
   158  					for l := 1; l < 10; l++ {
   159  						readmaker := readMakers[i]
   160  						bufreader := bufreaders[j]
   161  						bufsize := bufsizes[k]
   162  						read := readmaker.fn(strings.NewReader(text))
   163  						buf := bufio.NewReaderSize(read, bufsize)
   164  						ar, _ := New(ioutil.NopCloser(buf), l)
   165  						s := bufreader.fn(ar)
   166  						// "timeout" expects the Reader to recover, AsyncReader does not.
   167  						if s != text && readmaker.name != "timeout" {
   168  							t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
   169  								readmaker.name, bufreader.name, bufsize, text, s)
   170  						}
   171  						err := ar.Close()
   172  						require.NoError(t, err)
   173  					}
   174  				}
   175  			}
   176  		}
   177  	}
   178  }
   179  
   180  // Test various input buffer sizes, number of buffers and read sizes.
   181  func TestAsyncReaderWriteTo(t *testing.T) {
   182  	var texts [31]string
   183  	str := ""
   184  	all := ""
   185  	for i := 0; i < len(texts)-1; i++ {
   186  		texts[i] = str + "\n"
   187  		all += texts[i]
   188  		str += string(i%26 + 'a')
   189  	}
   190  	texts[len(texts)-1] = all
   191  
   192  	for h := 0; h < len(texts); h++ {
   193  		text := texts[h]
   194  		for i := 0; i < len(readMakers); i++ {
   195  			for j := 0; j < len(bufreaders); j++ {
   196  				for k := 0; k < len(bufsizes); k++ {
   197  					for l := 1; l < 10; l++ {
   198  						readmaker := readMakers[i]
   199  						bufreader := bufreaders[j]
   200  						bufsize := bufsizes[k]
   201  						read := readmaker.fn(strings.NewReader(text))
   202  						buf := bufio.NewReaderSize(read, bufsize)
   203  						ar, _ := New(ioutil.NopCloser(buf), l)
   204  						dst := &bytes.Buffer{}
   205  						_, err := ar.WriteTo(dst)
   206  						if err != nil && err != io.EOF && err != iotest.ErrTimeout {
   207  							t.Fatal("Copy:", err)
   208  						}
   209  						s := dst.String()
   210  						// "timeout" expects the Reader to recover, AsyncReader does not.
   211  						if s != text && readmaker.name != "timeout" {
   212  							t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q",
   213  								readmaker.name, bufreader.name, bufsize, text, s)
   214  						}
   215  						err = ar.Close()
   216  						require.NoError(t, err)
   217  					}
   218  				}
   219  			}
   220  		}
   221  	}
   222  }
   223  
   224  // Read an infinite number of zeros
   225  type zeroReader struct {
   226  	closed bool
   227  }
   228  
   229  func (z *zeroReader) Read(p []byte) (n int, err error) {
   230  	if z.closed {
   231  		return 0, io.EOF
   232  	}
   233  	for i := range p {
   234  		p[i] = 0
   235  	}
   236  	return len(p), nil
   237  }
   238  
   239  func (z *zeroReader) Close() error {
   240  	if z.closed {
   241  		panic("double close on zeroReader")
   242  	}
   243  	z.closed = true
   244  	return nil
   245  }
   246  
   247  // Test closing and abandoning
   248  func testAsyncReaderClose(t *testing.T, writeto bool) {
   249  	zr := &zeroReader{}
   250  	a, err := New(zr, 16)
   251  	require.NoError(t, err)
   252  	var copyN int64
   253  	var copyErr error
   254  	var wg sync.WaitGroup
   255  	started := make(chan struct{})
   256  	wg.Add(1)
   257  	go func() {
   258  		defer wg.Done()
   259  		close(started)
   260  		if writeto {
   261  			// exercise the WriteTo path
   262  			copyN, copyErr = a.WriteTo(ioutil.Discard)
   263  		} else {
   264  			// exercise the Read path
   265  			buf := make([]byte, 64*1024)
   266  			for {
   267  				var n int
   268  				n, copyErr = a.Read(buf)
   269  				copyN += int64(n)
   270  				if copyErr != nil {
   271  					break
   272  				}
   273  			}
   274  		}
   275  	}()
   276  	// Do some copying
   277  	<-started
   278  	time.Sleep(100 * time.Millisecond)
   279  	// Abandon the copy
   280  	a.Abandon()
   281  	wg.Wait()
   282  	assert.Equal(t, errorStreamAbandoned, copyErr)
   283  	// t.Logf("Copied %d bytes, err %v", copyN, copyErr)
   284  	assert.True(t, copyN > 0)
   285  }
   286  func TestAsyncReaderCloseRead(t *testing.T)    { testAsyncReaderClose(t, false) }
   287  func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) }
   288  
   289  func TestAsyncReaderSkipBytes(t *testing.T) {
   290  	t.Parallel()
   291  	data := make([]byte, 15000)
   292  	buf := make([]byte, len(data))
   293  	r := rand.New(rand.NewSource(42))
   294  
   295  	n, err := r.Read(data)
   296  	require.NoError(t, err)
   297  	require.Equal(t, len(data), n)
   298  
   299  	initialReads := []int{0, 1, 100, 2048,
   300  		softStartInitial - 1, softStartInitial, softStartInitial + 1,
   301  		8000, len(data)}
   302  	skips := []int{-1000, -101, -100, -99, 0, 1, 2048,
   303  		softStartInitial - 1, softStartInitial, softStartInitial + 1,
   304  		8000, len(data), BufferSize, 2 * BufferSize}
   305  
   306  	for buffers := 1; buffers <= 5; buffers++ {
   307  		if israce.Enabled && buffers > 1 {
   308  			t.Skip("FIXME Skipping further tests with race detector until https://github.com/golang/go/issues/27070 is fixed.")
   309  		}
   310  		t.Run(fmt.Sprintf("%d", buffers), func(t *testing.T) {
   311  			for _, initialRead := range initialReads {
   312  				t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) {
   313  					for _, skip := range skips {
   314  						t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) {
   315  							ar, err := New(ioutil.NopCloser(bytes.NewReader(data)), buffers)
   316  							require.NoError(t, err)
   317  
   318  							wantSkipFalse := false
   319  							buf = buf[:initialRead]
   320  							n, err := readers.ReadFill(ar, buf)
   321  							if initialRead >= len(data) {
   322  								wantSkipFalse = true
   323  								if initialRead > len(data) {
   324  									assert.Equal(t, err, io.EOF)
   325  								} else {
   326  									assert.True(t, err == nil || err == io.EOF)
   327  								}
   328  								assert.Equal(t, len(data), n)
   329  								assert.Equal(t, data, buf[:len(data)])
   330  							} else {
   331  								assert.NoError(t, err)
   332  								assert.Equal(t, initialRead, n)
   333  								assert.Equal(t, data[:initialRead], buf)
   334  							}
   335  
   336  							skipped := ar.SkipBytes(skip)
   337  							buf = buf[:1024]
   338  							n, err = readers.ReadFill(ar, buf)
   339  							offset := initialRead + skip
   340  							if skipped {
   341  								assert.False(t, wantSkipFalse)
   342  								l := len(buf)
   343  								if offset >= len(data) {
   344  									assert.Equal(t, err, io.EOF)
   345  								} else {
   346  									if offset+1024 >= len(data) {
   347  										l = len(data) - offset
   348  									}
   349  									assert.Equal(t, l, n)
   350  									assert.Equal(t, data[offset:offset+l], buf[:l])
   351  								}
   352  							} else {
   353  								if initialRead >= len(data) {
   354  									assert.Equal(t, err, io.EOF)
   355  								} else {
   356  									assert.True(t, err == errorStreamAbandoned || err == io.EOF)
   357  								}
   358  							}
   359  						})
   360  					}
   361  				})
   362  			}
   363  		})
   364  	}
   365  }