github.com/artpar/rclone@v1.67.3/fs/asyncreader/asyncreader_test.go (about) 1 package asyncreader 2 3 import ( 4 "bufio" 5 "bytes" 6 "context" 7 "fmt" 8 "io" 9 "math/rand" 10 "strings" 11 "sync" 12 "testing" 13 "testing/iotest" 14 "time" 15 16 "github.com/artpar/rclone/lib/israce" 17 "github.com/artpar/rclone/lib/readers" 18 "github.com/stretchr/testify/assert" 19 "github.com/stretchr/testify/require" 20 ) 21 22 func TestAsyncReader(t *testing.T) { 23 ctx := context.Background() 24 25 buf := io.NopCloser(bytes.NewBufferString("Testbuffer")) 26 ar, err := New(ctx, buf, 4) 27 require.NoError(t, err) 28 29 var dst = make([]byte, 100) 30 n, err := ar.Read(dst) 31 assert.Equal(t, io.EOF, err) 32 assert.Equal(t, 10, n) 33 34 n, err = ar.Read(dst) 35 assert.Equal(t, io.EOF, err) 36 assert.Equal(t, 0, n) 37 38 // Test read after error 39 n, err = ar.Read(dst) 40 assert.Equal(t, io.EOF, err) 41 assert.Equal(t, 0, n) 42 43 err = ar.Close() 44 require.NoError(t, err) 45 // Test double close 46 err = ar.Close() 47 require.NoError(t, err) 48 49 // Test Close without reading everything 50 buf = io.NopCloser(bytes.NewBuffer(make([]byte, 50000))) 51 ar, err = New(ctx, buf, 4) 52 require.NoError(t, err) 53 err = ar.Close() 54 require.NoError(t, err) 55 56 } 57 58 func TestAsyncWriteTo(t *testing.T) { 59 ctx := context.Background() 60 61 buf := io.NopCloser(bytes.NewBufferString("Testbuffer")) 62 ar, err := New(ctx, buf, 4) 63 require.NoError(t, err) 64 65 var dst = &bytes.Buffer{} 66 n, err := io.Copy(dst, ar) 67 require.NoError(t, err) 68 assert.Equal(t, int64(10), n) 69 70 // Should still not return any errors 71 n, err = io.Copy(dst, ar) 72 require.NoError(t, err) 73 assert.Equal(t, int64(0), n) 74 75 err = ar.Close() 76 require.NoError(t, err) 77 } 78 79 func TestAsyncReaderErrors(t *testing.T) { 80 ctx := context.Background() 81 82 // test nil reader 83 _, err := New(ctx, nil, 4) 84 require.Error(t, err) 85 86 // invalid buffer number 87 buf := io.NopCloser(bytes.NewBufferString("Testbuffer")) 88 _, err = New(ctx, buf, 0) 89 require.Error(t, err) 90 _, err = New(ctx, buf, -1) 91 require.Error(t, err) 92 } 93 94 // Complex read tests, leveraged from "bufio". 95 96 type readMaker struct { 97 name string 98 fn func(io.Reader) io.Reader 99 } 100 101 var readMakers = []readMaker{ 102 {"full", func(r io.Reader) io.Reader { return r }}, 103 {"byte", iotest.OneByteReader}, 104 {"half", iotest.HalfReader}, 105 {"data+err", iotest.DataErrReader}, 106 {"timeout", iotest.TimeoutReader}, 107 } 108 109 // Call Read to accumulate the text of a file 110 func reads(buf io.Reader, m int) string { 111 var b [1000]byte 112 nb := 0 113 for { 114 n, err := buf.Read(b[nb : nb+m]) 115 nb += n 116 if err == io.EOF { 117 break 118 } else if err != nil && err != iotest.ErrTimeout { 119 panic("Data: " + err.Error()) 120 } else if err != nil { 121 break 122 } 123 } 124 return string(b[0:nb]) 125 } 126 127 type bufReader struct { 128 name string 129 fn func(io.Reader) string 130 } 131 132 var bufreaders = []bufReader{ 133 {"1", func(b io.Reader) string { return reads(b, 1) }}, 134 {"2", func(b io.Reader) string { return reads(b, 2) }}, 135 {"3", func(b io.Reader) string { return reads(b, 3) }}, 136 {"4", func(b io.Reader) string { return reads(b, 4) }}, 137 {"5", func(b io.Reader) string { return reads(b, 5) }}, 138 {"7", func(b io.Reader) string { return reads(b, 7) }}, 139 } 140 141 const minReadBufferSize = 16 142 143 var bufsizes = []int{ 144 0, minReadBufferSize, 23, 32, 46, 64, 93, 128, 1024, 4096, 145 } 146 147 // Test various input buffer sizes, number of buffers and read sizes. 148 func TestAsyncReaderSizes(t *testing.T) { 149 ctx := context.Background() 150 151 var texts [31]string 152 str := "" 153 all := "" 154 for i := 0; i < len(texts)-1; i++ { 155 texts[i] = str + "\n" 156 all += texts[i] 157 str += string(rune(i)%26 + 'a') 158 } 159 texts[len(texts)-1] = all 160 161 for h := 0; h < len(texts); h++ { 162 text := texts[h] 163 for i := 0; i < len(readMakers); i++ { 164 for j := 0; j < len(bufreaders); j++ { 165 for k := 0; k < len(bufsizes); k++ { 166 for l := 1; l < 10; l++ { 167 readmaker := readMakers[i] 168 bufreader := bufreaders[j] 169 bufsize := bufsizes[k] 170 read := readmaker.fn(strings.NewReader(text)) 171 buf := bufio.NewReaderSize(read, bufsize) 172 ar, _ := New(ctx, io.NopCloser(buf), l) 173 s := bufreader.fn(ar) 174 // "timeout" expects the Reader to recover, AsyncReader does not. 175 if s != text && readmaker.name != "timeout" { 176 t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q", 177 readmaker.name, bufreader.name, bufsize, text, s) 178 } 179 err := ar.Close() 180 require.NoError(t, err) 181 } 182 } 183 } 184 } 185 } 186 } 187 188 // Test various input buffer sizes, number of buffers and read sizes. 189 func TestAsyncReaderWriteTo(t *testing.T) { 190 ctx := context.Background() 191 192 var texts [31]string 193 str := "" 194 all := "" 195 for i := 0; i < len(texts)-1; i++ { 196 texts[i] = str + "\n" 197 all += texts[i] 198 str += string(rune(i)%26 + 'a') 199 } 200 texts[len(texts)-1] = all 201 202 for h := 0; h < len(texts); h++ { 203 text := texts[h] 204 for i := 0; i < len(readMakers); i++ { 205 for j := 0; j < len(bufreaders); j++ { 206 for k := 0; k < len(bufsizes); k++ { 207 for l := 1; l < 10; l++ { 208 readmaker := readMakers[i] 209 bufreader := bufreaders[j] 210 bufsize := bufsizes[k] 211 read := readmaker.fn(strings.NewReader(text)) 212 buf := bufio.NewReaderSize(read, bufsize) 213 ar, _ := New(ctx, io.NopCloser(buf), l) 214 dst := &bytes.Buffer{} 215 _, err := ar.WriteTo(dst) 216 if err != nil && err != io.EOF && err != iotest.ErrTimeout { 217 t.Fatal("Copy:", err) 218 } 219 s := dst.String() 220 // "timeout" expects the Reader to recover, AsyncReader does not. 221 if s != text && readmaker.name != "timeout" { 222 t.Errorf("reader=%s fn=%s bufsize=%d want=%q got=%q", 223 readmaker.name, bufreader.name, bufsize, text, s) 224 } 225 err = ar.Close() 226 require.NoError(t, err) 227 } 228 } 229 } 230 } 231 } 232 } 233 234 // Read an infinite number of zeros 235 type zeroReader struct { 236 closed bool 237 } 238 239 func (z *zeroReader) Read(p []byte) (n int, err error) { 240 if z.closed { 241 return 0, io.EOF 242 } 243 for i := range p { 244 p[i] = 0 245 } 246 return len(p), nil 247 } 248 249 func (z *zeroReader) Close() error { 250 if z.closed { 251 panic("double close on zeroReader") 252 } 253 z.closed = true 254 return nil 255 } 256 257 // Test closing and abandoning 258 func testAsyncReaderClose(t *testing.T, writeto bool) { 259 ctx := context.Background() 260 261 zr := &zeroReader{} 262 a, err := New(ctx, zr, 16) 263 require.NoError(t, err) 264 var copyN int64 265 var copyErr error 266 var wg sync.WaitGroup 267 started := make(chan struct{}) 268 wg.Add(1) 269 go func() { 270 defer wg.Done() 271 close(started) 272 if writeto { 273 // exercise the WriteTo path 274 copyN, copyErr = a.WriteTo(io.Discard) 275 } else { 276 // exercise the Read path 277 buf := make([]byte, 64*1024) 278 for { 279 var n int 280 n, copyErr = a.Read(buf) 281 copyN += int64(n) 282 if copyErr != nil { 283 break 284 } 285 } 286 } 287 }() 288 // Do some copying 289 <-started 290 time.Sleep(100 * time.Millisecond) 291 // Abandon the copy 292 a.Abandon() 293 wg.Wait() 294 assert.Equal(t, ErrorStreamAbandoned, copyErr) 295 // t.Logf("Copied %d bytes, err %v", copyN, copyErr) 296 assert.True(t, copyN > 0) 297 } 298 func TestAsyncReaderCloseRead(t *testing.T) { testAsyncReaderClose(t, false) } 299 func TestAsyncReaderCloseWriteTo(t *testing.T) { testAsyncReaderClose(t, true) } 300 301 func TestAsyncReaderSkipBytes(t *testing.T) { 302 ctx := context.Background() 303 304 t.Parallel() 305 data := make([]byte, 15000) 306 buf := make([]byte, len(data)) 307 r := rand.New(rand.NewSource(42)) 308 309 n, err := r.Read(data) 310 require.NoError(t, err) 311 require.Equal(t, len(data), n) 312 313 initialReads := []int{0, 1, 100, 2048, 314 softStartInitial - 1, softStartInitial, softStartInitial + 1, 315 8000, len(data)} 316 skips := []int{-1000, -101, -100, -99, 0, 1, 2048, 317 softStartInitial - 1, softStartInitial, softStartInitial + 1, 318 8000, len(data), BufferSize, 2 * BufferSize} 319 320 for buffers := 1; buffers <= 5; buffers++ { 321 if israce.Enabled && buffers > 1 { 322 t.Skip("FIXME Skipping further tests with race detector until https://github.com/golang/go/issues/27070 is fixed.") 323 } 324 t.Run(fmt.Sprintf("%d", buffers), func(t *testing.T) { 325 for _, initialRead := range initialReads { 326 t.Run(fmt.Sprintf("%d", initialRead), func(t *testing.T) { 327 for _, skip := range skips { 328 t.Run(fmt.Sprintf("%d", skip), func(t *testing.T) { 329 ar, err := New(ctx, io.NopCloser(bytes.NewReader(data)), buffers) 330 require.NoError(t, err) 331 332 wantSkipFalse := false 333 buf = buf[:initialRead] 334 n, err := readers.ReadFill(ar, buf) 335 if initialRead >= len(data) { 336 wantSkipFalse = true 337 if initialRead > len(data) { 338 assert.Equal(t, err, io.EOF) 339 } else { 340 assert.True(t, err == nil || err == io.EOF) 341 } 342 assert.Equal(t, len(data), n) 343 assert.Equal(t, data, buf[:len(data)]) 344 } else { 345 assert.NoError(t, err) 346 assert.Equal(t, initialRead, n) 347 assert.Equal(t, data[:initialRead], buf) 348 } 349 350 skipped := ar.SkipBytes(skip) 351 buf = buf[:1024] 352 n, err = readers.ReadFill(ar, buf) 353 offset := initialRead + skip 354 if skipped { 355 assert.False(t, wantSkipFalse) 356 l := len(buf) 357 if offset >= len(data) { 358 assert.Equal(t, err, io.EOF) 359 } else { 360 if offset+1024 >= len(data) { 361 l = len(data) - offset 362 } 363 assert.Equal(t, l, n) 364 assert.Equal(t, data[offset:offset+l], buf[:l]) 365 } 366 } else { 367 if initialRead >= len(data) { 368 assert.Equal(t, err, io.EOF) 369 } else { 370 assert.True(t, err == ErrorStreamAbandoned || err == io.EOF) 371 } 372 } 373 }) 374 } 375 }) 376 } 377 }) 378 } 379 }