storj.io/uplink@v1.13.0/private/eestream/rs_test.go (about) 1 // Copyright (C) 2019 Storj Labs, Inc. 2 // See LICENSE for copying information. 3 4 package eestream_test 5 6 import ( 7 "bytes" 8 "context" 9 "crypto/sha256" 10 "errors" 11 "fmt" 12 "io" 13 "math/rand" 14 "testing" 15 "time" 16 17 "github.com/stretchr/testify/assert" 18 "github.com/stretchr/testify/require" 19 "github.com/zeebo/errs" 20 21 "storj.io/common/encryption" 22 "storj.io/common/memory" 23 "storj.io/common/ranger" 24 "storj.io/common/readcloser" 25 "storj.io/common/storj" 26 "storj.io/common/testcontext" 27 "storj.io/common/testrand" 28 "storj.io/infectious" 29 "storj.io/uplink/private/eestream" 30 ) 31 32 func TestRS(t *testing.T) { 33 ctx := context.Background() 34 data := testrand.Bytes(32 * 1024) 35 fc, err := infectious.NewFEC(2, 4) 36 if err != nil { 37 t.Fatal(err) 38 } 39 es := eestream.NewRSScheme(fc, 8*1024) 40 rs, err := eestream.NewRedundancyStrategy(es, 0, 0) 41 if err != nil { 42 t.Fatal(err) 43 } 44 readers, err := eestream.EncodeReader2(ctx, bytes.NewReader(data), rs) 45 if err != nil { 46 t.Fatal(err) 47 } 48 readerMap := make(map[int]io.ReadCloser, len(readers)) 49 for i, reader := range readers { 50 readerMap[i] = reader 51 } 52 ctx, cancel := context.WithCancel(ctx) 53 decoder := eestream.DecodeReaders2(ctx, cancel, readerMap, rs, 32*1024, 0, false) 54 defer func() { assert.NoError(t, decoder.Close()) }() 55 data2, err := io.ReadAll(decoder) 56 if err != nil { 57 t.Fatal(err) 58 } 59 assert.Equal(t, data, data2) 60 } 61 62 // Check that io.ReadFull will return io.ErrUnexpectedEOF 63 // if DecodeReaders2 return less data than expected. 64 func TestRSUnexpectedEOF(t *testing.T) { 65 ctx := context.Background() 66 data := testrand.Bytes(32 * 1024) 67 fc, err := infectious.NewFEC(2, 4) 68 if err != nil { 69 t.Fatal(err) 70 } 71 es := eestream.NewRSScheme(fc, 8*1024) 72 rs, err := eestream.NewRedundancyStrategy(es, 0, 0) 73 if err != nil { 74 t.Fatal(err) 75 } 76 readers, err := eestream.EncodeReader2(ctx, bytes.NewReader(data), rs) 77 if err != nil { 78 t.Fatal(err) 79 } 80 readerMap := make(map[int]io.ReadCloser, len(readers)) 81 for i, reader := range readers { 82 readerMap[i] = reader 83 } 84 ctx, cancel := context.WithCancel(ctx) 85 decoder := eestream.DecodeReaders2(ctx, cancel, readerMap, rs, 32*1024, 0, false) 86 defer func() { assert.NoError(t, decoder.Close()) }() 87 // Try ReadFull more data from eestream.DecodeReaders2 than available 88 data2 := make([]byte, len(data)+1024) 89 _, err = io.ReadFull(decoder, data2) 90 assert.EqualError(t, err, io.ErrUnexpectedEOF.Error()) 91 } 92 93 func TestRSRanger(t *testing.T) { 94 ctx := context.Background() 95 data := testrand.Bytes(32 * 1024) 96 fc, err := infectious.NewFEC(2, 4) 97 if err != nil { 98 t.Fatal(err) 99 } 100 es := eestream.NewRSScheme(fc, 8*1024) 101 rs, err := eestream.NewRedundancyStrategy(es, 0, 0) 102 if err != nil { 103 t.Fatal(err) 104 } 105 encKey := storj.Key(sha256.Sum256([]byte("the secret key"))) 106 var firstNonce storj.Nonce 107 const stripesPerBlock = 2 108 blockSize := stripesPerBlock * rs.StripeSize() 109 encrypter, err := encryption.NewEncrypter(storj.EncAESGCM, &encKey, &firstNonce, blockSize) 110 if err != nil { 111 t.Fatal(err) 112 } 113 readers, err := eestream.EncodeReader2(ctx, encryption.TransformReader(encryption.PadReader(io.NopCloser( 114 bytes.NewReader(data)), encrypter.InBlockSize()), encrypter, 0), rs) 115 if err != nil { 116 t.Fatal(err) 117 } 118 pieces, err := readAll(readers) 119 if err != nil { 120 t.Fatal(err) 121 } 122 rrs := map[int]ranger.Ranger{} 123 for i, piece := range pieces { 124 rrs[i] = ranger.ByteRanger(piece) 125 } 126 decrypter, err := encryption.NewDecrypter(storj.EncAESGCM, &encKey, &firstNonce, blockSize) 127 if err != nil { 128 t.Fatal(err) 129 } 130 rc, err := eestream.Decode(rrs, rs, 0, false) 131 if err != nil { 132 t.Fatal(err) 133 } 134 rr, err := encryption.Transform(rc, decrypter) 135 if err != nil { 136 t.Fatal(err) 137 } 138 rr, err = encryption.UnpadSlow(ctx, rr) 139 if err != nil { 140 t.Fatal(err) 141 } 142 r, err := rr.Range(ctx, 0, rr.Size()) 143 if err != nil { 144 t.Fatal(err) 145 } 146 data2, err := io.ReadAll(r) 147 if err != nil { 148 t.Fatal(err) 149 } 150 if !bytes.Equal(data, data2) { 151 t.Fatalf("rs encode/decode failed") 152 } 153 } 154 155 func TestNewRedundancyStrategy(t *testing.T) { 156 for i, tt := range []struct { 157 rep int 158 opt int 159 expRep int 160 expOpt int 161 errString string 162 }{ 163 {0, 0, 4, 4, ""}, 164 {-1, 0, 0, 0, "eestream: negative repair threshold"}, 165 {1, 0, 0, 0, "eestream: repair threshold less than required count"}, 166 {5, 0, 0, 0, "eestream: repair threshold greater than total count"}, 167 {0, -1, 0, 0, "eestream: negative optimal threshold"}, 168 {0, 1, 0, 0, "eestream: optimal threshold less than required count"}, 169 {0, 5, 0, 0, "eestream: optimal threshold greater than total count"}, 170 {3, 4, 3, 4, ""}, 171 {0, 3, 0, 0, "eestream: repair threshold greater than optimal threshold"}, 172 {4, 3, 0, 0, "eestream: repair threshold greater than optimal threshold"}, 173 {4, 4, 4, 4, ""}, 174 } { 175 errTag := fmt.Sprintf("Test case #%d", i) 176 fc, err := infectious.NewFEC(2, 4) 177 if !assert.NoError(t, err, errTag) { 178 continue 179 } 180 es := eestream.NewRSScheme(fc, 8*1024) 181 rs, err := eestream.NewRedundancyStrategy(es, tt.rep, tt.opt) 182 if tt.errString != "" { 183 assert.EqualError(t, err, tt.errString, errTag) 184 continue 185 } 186 assert.NoError(t, err, errTag) 187 assert.Equal(t, tt.expRep, rs.RepairThreshold(), errTag) 188 assert.Equal(t, tt.expOpt, rs.OptimalThreshold(), errTag) 189 } 190 } 191 192 // Some pieces will read error. 193 // Test will pass if at least required number of pieces are still good. 194 func TestRSErrors(t *testing.T) { 195 for i, tt := range []testCase{ 196 {4 * 1024, 1024, 1, 1, 0, false, false}, 197 {4 * 1024, 1024, 1, 1, 1, true, false}, 198 {4 * 1024, 1024, 1, 2, 0, false, false}, 199 {4 * 1024, 1024, 1, 2, 1, false, false}, 200 {4 * 1024, 1024, 1, 2, 2, true, false}, 201 {4 * 1024, 1024, 2, 4, 0, false, false}, 202 {4 * 1024, 1024, 2, 4, 1, false, false}, 203 {4 * 1024, 1024, 2, 4, 2, false, false}, 204 {4 * 1024, 1024, 2, 4, 3, true, false}, 205 {4 * 1024, 1024, 2, 4, 4, true, false}, 206 {6 * 1024, 1024, 3, 7, 0, false, false}, 207 {6 * 1024, 1024, 3, 7, 1, false, false}, 208 {6 * 1024, 1024, 3, 7, 2, false, false}, 209 {6 * 1024, 1024, 3, 7, 3, false, false}, 210 {6 * 1024, 1024, 3, 7, 4, false, false}, 211 {6 * 1024, 1024, 3, 7, 5, true, false}, 212 {6 * 1024, 1024, 3, 7, 6, true, false}, 213 {6 * 1024, 1024, 3, 7, 7, true, false}, 214 } { 215 testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser { 216 return readcloser.FatalReadCloser( 217 errors.New("I am an error piece")) 218 }) 219 } 220 } 221 222 // Some pieces will read EOF at the beginning (byte 0). 223 // Test will pass if those pieces are less than required. 224 func TestRSEOF(t *testing.T) { 225 for i, tt := range []testCase{ 226 {4 * 1024, 1024, 1, 1, 0, false, false}, 227 {4 * 1024, 1024, 1, 1, 1, true, false}, 228 {4 * 1024, 1024, 1, 2, 0, false, false}, 229 {4 * 1024, 1024, 1, 2, 1, false, false}, 230 {4 * 1024, 1024, 1, 2, 2, true, false}, 231 {4 * 1024, 1024, 2, 4, 0, false, false}, 232 {4 * 1024, 1024, 2, 4, 1, false, false}, 233 {4 * 1024, 1024, 2, 4, 2, false, false}, 234 {4 * 1024, 1024, 2, 4, 3, true, false}, 235 {4 * 1024, 1024, 2, 4, 4, true, false}, 236 {6 * 1024, 1024, 3, 7, 0, false, false}, 237 {6 * 1024, 1024, 3, 7, 1, false, false}, 238 {6 * 1024, 1024, 3, 7, 2, false, false}, 239 {6 * 1024, 1024, 3, 7, 3, false, false}, 240 {6 * 1024, 1024, 3, 7, 4, false, false}, 241 {6 * 1024, 1024, 3, 7, 5, true, false}, 242 {6 * 1024, 1024, 3, 7, 6, true, false}, 243 {6 * 1024, 1024, 3, 7, 7, true, false}, 244 } { 245 testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser { 246 return readcloser.LimitReadCloser( 247 io.NopCloser(bytes.NewReader(in)), 0) 248 }) 249 } 250 } 251 252 // Some pieces will read EOF earlier than expected 253 // Test will pass if those pieces are less than required. 254 func TestRSEarlyEOF(t *testing.T) { 255 for i, tt := range []testCase{ 256 {4 * 1024, 1024, 1, 1, 0, false, false}, 257 {4 * 1024, 1024, 1, 1, 1, true, false}, 258 {4 * 1024, 1024, 1, 2, 0, false, false}, 259 {4 * 1024, 1024, 1, 2, 1, false, false}, 260 {4 * 1024, 1024, 1, 2, 2, true, false}, 261 {4 * 1024, 1024, 2, 4, 0, false, false}, 262 {4 * 1024, 1024, 2, 4, 1, false, false}, 263 {4 * 1024, 1024, 2, 4, 2, false, false}, 264 {4 * 1024, 1024, 2, 4, 3, true, false}, 265 {4 * 1024, 1024, 2, 4, 4, true, false}, 266 {6 * 1024, 1024, 3, 7, 0, false, false}, 267 {6 * 1024, 1024, 3, 7, 1, false, false}, 268 {6 * 1024, 1024, 3, 7, 2, false, false}, 269 {6 * 1024, 1024, 3, 7, 3, false, false}, 270 {6 * 1024, 1024, 3, 7, 4, false, false}, 271 {6 * 1024, 1024, 3, 7, 5, true, false}, 272 {6 * 1024, 1024, 3, 7, 6, true, false}, 273 {6 * 1024, 1024, 3, 7, 7, true, false}, 274 } { 275 testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser { 276 // Read EOF after 500 bytes 277 return readcloser.LimitReadCloser( 278 io.NopCloser(bytes.NewReader(in)), 500) 279 }) 280 } 281 } 282 283 // Some pieces will read EOF later than expected. 284 // Test will pass if at least required number of pieces are still good. 285 func TestRSLateEOF(t *testing.T) { 286 for i, tt := range []testCase{ 287 {4 * 1024, 1024, 1, 1, 0, false, false}, 288 {4 * 1024, 1024, 1, 1, 1, false, false}, 289 {4 * 1024, 1024, 1, 2, 0, false, false}, 290 {4 * 1024, 1024, 1, 2, 1, false, false}, 291 {4 * 1024, 1024, 1, 2, 2, false, false}, 292 {4 * 1024, 1024, 2, 4, 0, false, false}, 293 {4 * 1024, 1024, 2, 4, 1, false, false}, 294 {4 * 1024, 1024, 2, 4, 2, false, false}, 295 {4 * 1024, 1024, 2, 4, 3, false, false}, 296 {4 * 1024, 1024, 2, 4, 4, false, false}, 297 {6 * 1024, 1024, 3, 7, 0, false, false}, 298 {6 * 1024, 1024, 3, 7, 1, false, false}, 299 {6 * 1024, 1024, 3, 7, 2, false, false}, 300 {6 * 1024, 1024, 3, 7, 3, false, false}, 301 {6 * 1024, 1024, 3, 7, 4, false, false}, 302 {6 * 1024, 1024, 3, 7, 5, false, false}, 303 {6 * 1024, 1024, 3, 7, 6, false, false}, 304 {6 * 1024, 1024, 3, 7, 7, false, false}, 305 } { 306 testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser { 307 // extend the input with random number of random bytes 308 random := testrand.BytesInt(1 + testrand.Intn(10000)) 309 in = append(in, random...) 310 return io.NopCloser(bytes.NewReader(in)) 311 }) 312 } 313 } 314 315 // Some pieces will read random data. 316 // Test will pass if there are enough good pieces for error correction. 317 func TestRSRandomData(t *testing.T) { 318 for i, tt := range []testCase{ 319 {4 * 1024, 1024, 1, 1, 0, false, true}, 320 {4 * 1024, 1024, 1, 1, 1, true, true}, 321 {4 * 1024, 1024, 1, 2, 0, false, true}, 322 {4 * 1024, 1024, 1, 2, 1, true, true}, 323 {4 * 1024, 1024, 1, 2, 2, true, true}, 324 {4 * 1024, 1024, 2, 4, 0, false, true}, 325 {4 * 1024, 1024, 2, 4, 1, false, true}, 326 {4 * 1024, 1024, 2, 4, 2, true, true}, 327 {4 * 1024, 1024, 2, 4, 3, true, true}, 328 {4 * 1024, 1024, 2, 4, 4, true, true}, 329 {6 * 1024, 1024, 3, 7, 0, false, true}, 330 {6 * 1024, 1024, 3, 7, 1, false, true}, 331 {6 * 1024, 1024, 3, 7, 2, false, true}, 332 {6 * 1024, 1024, 3, 7, 4, true, true}, 333 {6 * 1024, 1024, 3, 7, 5, true, true}, 334 {6 * 1024, 1024, 3, 7, 6, true, true}, 335 {6 * 1024, 1024, 3, 7, 7, true, true}, 336 } { 337 testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser { 338 // return random data instead of expected one 339 return io.NopCloser(bytes.NewReader(testrand.BytesInt(len(in)))) 340 }) 341 } 342 } 343 344 // Some pieces will read slowly. 345 func TestRSSlow(t *testing.T) { 346 for i, tt := range []testCase{ 347 {4 * 1024, 1024, 1, 1, 0, false, false}, 348 {4 * 1024, 1024, 1, 2, 0, false, false}, 349 {4 * 1024, 1024, 2, 4, 0, false, false}, 350 {4 * 1024, 1024, 2, 4, 1, false, false}, 351 {6 * 1024, 1024, 3, 7, 0, false, false}, 352 {6 * 1024, 1024, 3, 7, 1, false, false}, 353 {6 * 1024, 1024, 3, 7, 2, false, false}, 354 {6 * 1024, 1024, 3, 7, 3, false, false}, 355 } { 356 start := time.Now() 357 testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser { 358 // sleep 1 second before every read 359 return io.NopCloser(SlowReader(bytes.NewReader(in), 1*time.Second)) 360 }) 361 if time.Since(start) > 1*time.Second { 362 t.Fatalf("waited for slow reader") 363 } 364 } 365 } 366 367 type testCase struct { 368 dataSize int 369 blockSize int 370 required int 371 total int 372 problematic int 373 fail bool 374 errorDetection bool 375 } 376 377 type problematicReadCloser func([]byte) io.ReadCloser 378 379 func testRSProblematic(t *testing.T, tt testCase, i int, fn problematicReadCloser) { 380 errTag := fmt.Sprintf("Test case #%d", i) 381 ctx := context.Background() 382 data := testrand.BytesInt(tt.dataSize) 383 fc, err := infectious.NewFEC(tt.required, tt.total) 384 if !assert.NoError(t, err, errTag) { 385 return 386 } 387 es := eestream.NewRSScheme(fc, tt.blockSize) 388 rs, err := eestream.NewRedundancyStrategy(es, 0, 0) 389 if !assert.NoError(t, err, errTag) { 390 return 391 } 392 readers, err := eestream.EncodeReader2(ctx, bytes.NewReader(data), rs) 393 if !assert.NoError(t, err, errTag) { 394 return 395 } 396 // read all readers in []byte buffers to avoid deadlock if later 397 // we don't read in parallel from all of them 398 pieces, err := readAll(readers) 399 if !assert.NoError(t, err, errTag) { 400 return 401 } 402 readerMap := make(map[int]io.ReadCloser, len(readers)) 403 // some readers will have problematic behavior 404 for i := 0; i < tt.problematic; i++ { 405 readerMap[i] = fn(pieces[i]) 406 } 407 // the rest will operate normally 408 for i := tt.problematic; i < tt.total; i++ { 409 readerMap[i] = io.NopCloser(bytes.NewReader(pieces[i])) 410 } 411 ctx, cancel := context.WithCancel(ctx) 412 decoder := eestream.DecodeReaders2(ctx, cancel, readerMap, rs, int64(tt.dataSize), 3*1024, tt.errorDetection) 413 defer func() { assert.NoError(t, decoder.Close()) }() 414 data2, err := io.ReadAll(decoder) 415 if tt.fail { 416 if err == nil && bytes.Equal(data, data2) { 417 assert.Fail(t, "expected to fail, but didn't", errTag) 418 } 419 } else if assert.NoError(t, err, errTag) { 420 assert.Equal(t, data, data2, errTag) 421 } 422 } 423 424 func readAll(readers []io.ReadCloser) ([][]byte, error) { 425 pieces := make([][]byte, len(readers)) 426 errors := make(chan error, len(readers)) 427 for i := range readers { 428 go func(i int) { 429 var err error 430 pieces[i], err = io.ReadAll(readers[i]) 431 errors <- errs.Combine(err, readers[i].Close()) 432 }(i) 433 } 434 for range readers { 435 err := <-errors 436 if err != nil { 437 return nil, err 438 } 439 } 440 return pieces, nil 441 } 442 443 func SlowReader(r io.Reader, delay time.Duration) io.Reader { 444 return &slowReader{Reader: r, Delay: delay} 445 } 446 447 type slowReader struct { 448 Reader io.Reader 449 Delay time.Duration 450 } 451 452 func (s *slowReader) Read(p []byte) (n int, err error) { 453 time.Sleep(s.Delay) 454 return s.Reader.Read(p) 455 } 456 457 func TestEncoderStalledReaders(t *testing.T) { 458 ctx := context.Background() 459 data := testrand.Bytes(120 * 1024) 460 fc, err := infectious.NewFEC(30, 60) 461 if err != nil { 462 t.Fatal(err) 463 } 464 es := eestream.NewRSScheme(fc, 1024) 465 rs, err := eestream.NewRedundancyStrategy(es, 35, 50) 466 if err != nil { 467 t.Fatal(err) 468 } 469 readers, err := eestream.EncodeReader2(ctx, bytes.NewReader(data), rs) 470 if err != nil { 471 t.Fatal(err) 472 } 473 start := time.Now() 474 _, err = readAllStalled(readers, 25) 475 assert.NoError(t, err) 476 if time.Since(start) > 1*time.Second { 477 t.Fatalf("waited for slow reader") 478 } 479 for _, reader := range readers { 480 assert.NoError(t, reader.Close()) 481 } 482 } 483 484 func readAllStalled(readers []io.ReadCloser, stalled int) ([][]byte, error) { 485 pieces := make([][]byte, len(readers)) 486 errs := make(chan error, len(readers)) 487 for i := stalled; i < len(readers); i++ { 488 go func(i int) { 489 var err error 490 pieces[i], err = io.ReadAll(readers[i]) 491 errs <- err 492 }(i) 493 } 494 for i := stalled; i < len(readers); i++ { 495 err := <-errs 496 if err != nil { 497 return nil, err 498 } 499 } 500 return pieces, nil 501 } 502 503 func TestDecoderErrorWithStalledReaders(t *testing.T) { 504 ctx := context.Background() 505 data := testrand.Bytes(10 * 1024) 506 fc, err := infectious.NewFEC(10, 20) 507 if err != nil { 508 t.Fatal(err) 509 } 510 es := eestream.NewRSScheme(fc, 1024) 511 rs, err := eestream.NewRedundancyStrategy(es, 0, 0) 512 if err != nil { 513 t.Fatal(err) 514 } 515 readers, err := eestream.EncodeReader2(ctx, bytes.NewReader(data), rs) 516 if err != nil { 517 t.Fatal(err) 518 } 519 // read all readers in []byte buffers to avoid deadlock if later 520 // we don't read in parallel from all of them 521 pieces, err := readAll(readers) 522 if !assert.NoError(t, err) { 523 return 524 } 525 readerMap := make(map[int]io.ReadCloser, len(readers)) 526 // just a few readers will operate normally 527 for i := 0; i < 4; i++ { 528 readerMap[i] = io.NopCloser(bytes.NewReader(pieces[i])) 529 } 530 // some of the readers will be slow 531 for i := 4; i < 7; i++ { 532 readerMap[i] = io.NopCloser(SlowReader(bytes.NewReader(pieces[i]), 1*time.Second)) 533 } 534 // most of the readers will return error 535 for i := 7; i < 20; i++ { 536 readerMap[i] = readcloser.FatalReadCloser(errors.New("I am an error piece")) 537 } 538 ctx, cancel := context.WithCancel(ctx) 539 decoder := eestream.DecodeReaders2(ctx, cancel, readerMap, rs, int64(10*1024), 0, false) 540 defer func() { assert.NoError(t, decoder.Close()) }() 541 // record the time for reading the data from the decoder 542 start := time.Now() 543 _, err = io.ReadAll(decoder) 544 // we expect the decoder to fail with error as there are not enough good 545 // nodes to reconstruct the data 546 assert.Error(t, err) 547 // but without waiting for the slowest nodes 548 if time.Since(start) > 1*time.Second { 549 t.Fatalf("waited for slow reader") 550 } 551 } 552 553 func BenchmarkReedSolomonErasureScheme(b *testing.B) { 554 data := testrand.Bytes(8 << 20) 555 output := make([]byte, 8<<20) 556 557 confs := []struct{ required, total int }{ 558 {2, 4}, 559 {20, 50}, 560 {30, 60}, 561 {50, 80}, 562 } 563 564 dataSizes := []int{ 565 100, 566 1 << 10, 567 256 << 10, 568 1 << 20, 569 5 << 20, 570 8 << 20, 571 } 572 573 bytesToStr := func(bytes int) string { 574 switch { 575 case bytes > 10000000: 576 return fmt.Sprintf("%.fMB", float64(bytes)/float64(1<<20)) 577 case bytes > 1000: 578 return fmt.Sprintf("%.fKB", float64(bytes)/float64(1<<10)) 579 default: 580 return fmt.Sprintf("%dB", bytes) 581 } 582 } 583 584 for _, conf := range confs { 585 configuration := conf 586 confname := fmt.Sprintf("r%dt%d/", configuration.required, configuration.total) 587 for _, expDataSize := range dataSizes { 588 dataSize := (expDataSize / configuration.required) * configuration.required 589 testname := bytesToStr(dataSize) 590 forwardErrorCode, _ := infectious.NewFEC(configuration.required, configuration.total) 591 erasureScheme := eestream.NewRSScheme(forwardErrorCode, 8*1024) 592 593 b.Run("Encode/"+confname+testname, func(b *testing.B) { 594 b.SetBytes(int64(dataSize)) 595 for i := 0; i < b.N; i++ { 596 err := erasureScheme.Encode(data[:dataSize], func(num int, data []byte) { 597 _, _ = num, data 598 }) 599 if err != nil { 600 b.Fatal(err) 601 } 602 } 603 }) 604 605 shares := []infectious.Share{} 606 err := erasureScheme.Encode(data[:dataSize], func(num int, data []byte) { 607 shares = append(shares, infectious.Share{ 608 Number: num, 609 Data: append([]byte{}, data...), 610 }) 611 }) 612 if err != nil { 613 b.Fatal(err) 614 } 615 616 b.Run("Decode/"+confname+testname, func(b *testing.B) { 617 b.SetBytes(int64(dataSize)) 618 for i := 0; i < b.N; i++ { 619 rand.Shuffle(len(shares), func(i, k int) { 620 shares[i], shares[k] = shares[k], shares[i] 621 }) 622 623 offset := i % (configuration.total / 4) 624 n := configuration.required + 1 + offset 625 if n > configuration.total { 626 n = configuration.total 627 } 628 629 _, err = erasureScheme.Decode(output[:dataSize], shares[:n]) 630 if err != nil { 631 b.Fatal(err) 632 } 633 } 634 }) 635 } 636 } 637 } 638 639 func TestCalcPieceSize(t *testing.T) { 640 const uint32Size = 4 641 ctx := testcontext.New(t) 642 defer ctx.Cleanup() 643 644 for i, dataSize := range []int64{ 645 0, 646 1, 647 1*memory.KiB.Int64() - uint32Size, 648 1 * memory.KiB.Int64(), 649 32*memory.KiB.Int64() - uint32Size, 650 32 * memory.KiB.Int64(), 651 32*memory.KiB.Int64() + 100, 652 } { 653 errTag := fmt.Sprintf("%d. %+v", i, dataSize) 654 655 fc, err := infectious.NewFEC(2, 4) 656 require.NoError(t, err, errTag) 657 es := eestream.NewRSScheme(fc, 1*memory.KiB.Int()) 658 rs, err := eestream.NewRedundancyStrategy(es, 0, 0) 659 require.NoError(t, err, errTag) 660 661 calculatedSize := eestream.CalcPieceSize(dataSize, es) 662 663 randReader := io.NopCloser(io.LimitReader(testrand.Reader(), dataSize)) 664 readers, err := eestream.EncodeReader2(ctx, encryption.PadReader(randReader, es.StripeSize()), rs) 665 require.NoError(t, err, errTag) 666 667 for _, reader := range readers { 668 piece, err := io.ReadAll(reader) 669 assert.NoError(t, err, errTag) 670 assert.EqualValues(t, calculatedSize, len(piece), errTag) 671 } 672 } 673 }