github.com/petermattis/pebble@v0.0.0-20190905164901-ab51a2166067/internal/record/record_test.go (about) 1 // Copyright 2011 The LevelDB-Go and Pebble Authors. All rights reserved. Use 2 // of this source code is governed by a BSD-style license that can be found in 3 // the LICENSE file. 4 5 package record 6 7 import ( 8 "bytes" 9 "encoding/binary" 10 "errors" 11 "fmt" 12 "io" 13 "io/ioutil" 14 "os" 15 "strings" 16 "testing" 17 "time" 18 19 "golang.org/x/exp/rand" 20 ) 21 22 func short(s string) string { 23 if len(s) < 64 { 24 return s 25 } 26 return fmt.Sprintf("%s...(skipping %d bytes)...%s", s[:20], len(s)-40, s[len(s)-20:]) 27 } 28 29 // big returns a string of length n, composed of repetitions of partial. 30 func big(partial string, n int) string { 31 return strings.Repeat(partial, n/len(partial)+1)[:n] 32 } 33 34 type recordWriter interface { 35 WriteRecord([]byte) (int64, error) 36 Close() error 37 } 38 39 func testGeneratorWriter( 40 t *testing.T, 41 reset func(), 42 gen func() (string, bool), 43 newWriter func(io.Writer) recordWriter, 44 ) { 45 buf := new(bytes.Buffer) 46 47 reset() 48 w := newWriter(buf) 49 for { 50 s, ok := gen() 51 if !ok { 52 break 53 } 54 if _, err := w.WriteRecord([]byte(s)); err != nil { 55 t.Fatalf("Write: %v", err) 56 } 57 } 58 if err := w.Close(); err != nil { 59 t.Fatalf("Close: %v", err) 60 } 61 reset() 62 r := NewReader(buf, 0 /* logNum */) 63 for { 64 s, ok := gen() 65 if !ok { 66 break 67 } 68 rr, err := r.Next() 69 if err != nil { 70 t.Fatalf("reader.Next: %v", err) 71 } 72 x, err := ioutil.ReadAll(rr) 73 if err != nil { 74 t.Fatalf("ReadAll: %v", err) 75 } 76 if string(x) != s { 77 t.Fatalf("got %q, want %q", short(string(x)), short(s)) 78 } 79 } 80 if _, err := r.Next(); err != io.EOF { 81 t.Fatalf("got %v, want %v", err, io.EOF) 82 } 83 } 84 85 func testGenerator(t *testing.T, reset func(), gen func() (string, bool)) { 86 t.Run("Writer", func(t *testing.T) { 87 testGeneratorWriter(t, reset, gen, func(w io.Writer) recordWriter { 88 return NewWriter(w) 89 }) 90 }) 91 92 t.Run("LogWriter", func(t *testing.T) { 93 testGeneratorWriter(t, reset, gen, func(w io.Writer) recordWriter { 94 return NewLogWriter(w, 0 /* logNum */) 95 }) 96 }) 97 } 98 99 func testLiterals(t *testing.T, s []string) { 100 var i int 101 reset := func() { 102 i = 0 103 } 104 gen := func() (string, bool) { 105 if i == len(s) { 106 return "", false 107 } 108 i++ 109 return s[i-1], true 110 } 111 testGenerator(t, reset, gen) 112 } 113 114 func TestMany(t *testing.T) { 115 const n = 1e5 116 var i int 117 reset := func() { 118 i = 0 119 } 120 gen := func() (string, bool) { 121 if i == n { 122 return "", false 123 } 124 i++ 125 return fmt.Sprintf("%d.", i-1), true 126 } 127 testGenerator(t, reset, gen) 128 } 129 130 func TestRandom(t *testing.T) { 131 const n = 1e2 132 var ( 133 i int 134 r *rand.Rand 135 ) 136 reset := func() { 137 i, r = 0, rand.New(rand.NewSource(0)) 138 } 139 gen := func() (string, bool) { 140 if i == n { 141 return "", false 142 } 143 i++ 144 return strings.Repeat(string(uint8(i)), r.Intn(2*blockSize+16)), true 145 } 146 testGenerator(t, reset, gen) 147 } 148 149 func TestBasic(t *testing.T) { 150 testLiterals(t, []string{ 151 strings.Repeat("a", 1000), 152 strings.Repeat("b", 97270), 153 strings.Repeat("c", 8000), 154 }) 155 } 156 157 func TestBoundary(t *testing.T) { 158 for i := blockSize - 16; i < blockSize+16; i++ { 159 s0 := big("abcd", i) 160 for j := blockSize - 16; j < blockSize+16; j++ { 161 s1 := big("ABCDE", j) 162 testLiterals(t, []string{s0, s1}) 163 testLiterals(t, []string{s0, "", s1}) 164 testLiterals(t, []string{s0, "x", s1}) 165 } 166 } 167 } 168 169 func TestFlush(t *testing.T) { 170 buf := new(bytes.Buffer) 171 w := NewWriter(buf) 172 // Write a couple of records. Everything should still be held 173 // in the record.Writer buffer, so that buf.Len should be 0. 174 w0, _ := w.Next() 175 w0.Write([]byte("0")) 176 w1, _ := w.Next() 177 w1.Write([]byte("11")) 178 if got, want := buf.Len(), 0; got != want { 179 t.Fatalf("buffer length #0: got %d want %d", got, want) 180 } 181 // Flush the record.Writer buffer, which should yield 17 bytes. 182 // 17 = 2*7 + 1 + 2, which is two headers and 1 + 2 payload bytes. 183 if err := w.Flush(); err != nil { 184 t.Fatal(err) 185 } 186 if got, want := buf.Len(), 17; got != want { 187 t.Fatalf("buffer length #1: got %d want %d", got, want) 188 } 189 // Do another write, one that isn't large enough to complete the block. 190 // The write should not have flowed through to buf. 191 w2, _ := w.Next() 192 w2.Write(bytes.Repeat([]byte("2"), 10000)) 193 if got, want := buf.Len(), 17; got != want { 194 t.Fatalf("buffer length #2: got %d want %d", got, want) 195 } 196 // Flushing should get us up to 10024 bytes written. 197 // 10024 = 17 + 7 + 10000. 198 if err := w.Flush(); err != nil { 199 t.Fatal(err) 200 } 201 if got, want := buf.Len(), 10024; got != want { 202 t.Fatalf("buffer length #3: got %d want %d", got, want) 203 } 204 // Do a bigger write, one that completes the current block. 205 // We should now have 32768 bytes (a complete block), without 206 // an explicit flush. 207 w3, _ := w.Next() 208 w3.Write(bytes.Repeat([]byte("3"), 40000)) 209 if got, want := buf.Len(), 32768; got != want { 210 t.Fatalf("buffer length #4: got %d want %d", got, want) 211 } 212 // Flushing should get us up to 50038 bytes written. 213 // 50038 = 10024 + 2*7 + 40000. There are two headers because 214 // the one record was split into two chunks. 215 if err := w.Flush(); err != nil { 216 t.Fatal(err) 217 } 218 if got, want := buf.Len(), 50038; got != want { 219 t.Fatalf("buffer length #5: got %d want %d", got, want) 220 } 221 // Check that reading those records give the right lengths. 222 r := NewReader(buf, 0 /* logNum */) 223 wants := []int64{1, 2, 10000, 40000} 224 for i, want := range wants { 225 rr, _ := r.Next() 226 n, err := io.Copy(ioutil.Discard, rr) 227 if err != nil { 228 t.Fatalf("read #%d: %v", i, err) 229 } 230 if n != want { 231 t.Fatalf("read #%d: got %d bytes want %d", i, n, want) 232 } 233 } 234 } 235 236 func TestNonExhaustiveRead(t *testing.T) { 237 const n = 100 238 buf := new(bytes.Buffer) 239 p := make([]byte, 10) 240 rnd := rand.New(rand.NewSource(1)) 241 242 w := NewWriter(buf) 243 for i := 0; i < n; i++ { 244 length := len(p) + rnd.Intn(3*blockSize) 245 s := string(uint8(i)) + "123456789abcdefgh" 246 _, _ = w.WriteRecord([]byte(big(s, length))) 247 } 248 if err := w.Close(); err != nil { 249 t.Fatalf("Close: %v", err) 250 } 251 252 r := NewReader(buf, 0 /* logNum */) 253 for i := 0; i < n; i++ { 254 rr, _ := r.Next() 255 _, err := io.ReadFull(rr, p) 256 if err != nil { 257 t.Fatalf("ReadFull: %v", err) 258 } 259 want := string(uint8(i)) + "123456789" 260 if got := string(p); got != want { 261 t.Fatalf("read #%d: got %q want %q", i, got, want) 262 } 263 } 264 } 265 266 func TestStaleReader(t *testing.T) { 267 buf := new(bytes.Buffer) 268 269 w := NewWriter(buf) 270 if _, err := w.WriteRecord([]byte("0")); err != nil { 271 t.Fatal(err) 272 } 273 if _, err := w.WriteRecord([]byte("11")); err != nil { 274 t.Fatal(err) 275 } 276 if err := w.Close(); err != nil { 277 t.Fatalf("Close: %v\n", err) 278 } 279 280 r := NewReader(buf, 0 /* logNum */) 281 r0, err := r.Next() 282 if err != nil { 283 t.Fatalf("reader.Next: %v", err) 284 } 285 r1, err := r.Next() 286 if err != nil { 287 t.Fatalf("reader.Next: %v", err) 288 } 289 p := make([]byte, 1) 290 if _, err := r0.Read(p); err == nil || !strings.Contains(err.Error(), "stale") { 291 t.Fatalf("stale read #0: unexpected error: %v", err) 292 } 293 if _, err := r1.Read(p); err != nil { 294 t.Fatalf("fresh read #1: got %v want nil error", err) 295 } 296 if p[0] != '1' { 297 t.Fatalf("fresh read #1: byte contents: got '%c' want '1'", p[0]) 298 } 299 } 300 301 type testRecords struct { 302 records [][]byte // The raw value of each record. 303 offsets []int64 // The offset of each record within buf, derived from writer.LastRecordOffset. 304 buf []byte // The serialized records form of all records. 305 } 306 307 // makeTestRecords generates test records of specified lengths. 308 // The first record will consist of repeating 0x00 bytes, the next record of 309 // 0x01 bytes, and so forth. The values will loop back to 0x00 after 0xff. 310 func makeTestRecords(recordLengths ...int) (*testRecords, error) { 311 ret := &testRecords{} 312 ret.records = make([][]byte, len(recordLengths)) 313 ret.offsets = make([]int64, len(recordLengths)) 314 for i, n := range recordLengths { 315 ret.records[i] = bytes.Repeat([]byte{byte(i)}, n) 316 } 317 318 buf := new(bytes.Buffer) 319 w := NewWriter(buf) 320 for i, rec := range ret.records { 321 wRec, err := w.Next() 322 if err != nil { 323 return nil, err 324 } 325 326 // Alternate between one big write and many small writes. 327 cSize := 8 328 if i&1 == 0 { 329 cSize = len(rec) 330 } 331 for ; len(rec) > cSize; rec = rec[cSize:] { 332 if _, err := wRec.Write(rec[:cSize]); err != nil { 333 return nil, err 334 } 335 } 336 if _, err := wRec.Write(rec); err != nil { 337 return nil, err 338 } 339 340 ret.offsets[i], err = w.LastRecordOffset() 341 if err != nil { 342 return nil, err 343 } 344 } 345 346 if err := w.Close(); err != nil { 347 return nil, err 348 } 349 350 ret.buf = buf.Bytes() 351 return ret, nil 352 } 353 354 // corruptBlock corrupts the checksum of the record that starts at the 355 // specified block offset. The number of the block offset is 0 based. 356 func corruptBlock(buf []byte, blockNum int) { 357 // Ensure we always permute at least 1 byte of the checksum. 358 if buf[blockSize*blockNum] == 0x00 { 359 buf[blockSize*blockNum] = 0xff 360 } else { 361 buf[blockSize*blockNum] = 0x00 362 } 363 364 buf[blockSize*blockNum+1] = 0x00 365 buf[blockSize*blockNum+2] = 0x00 366 buf[blockSize*blockNum+3] = 0x00 367 } 368 369 func TestRecoverNoOp(t *testing.T) { 370 recs, err := makeTestRecords( 371 blockSize-legacyHeaderSize, 372 blockSize-legacyHeaderSize, 373 blockSize-legacyHeaderSize, 374 ) 375 if err != nil { 376 t.Fatalf("makeTestRecords: %v", err) 377 } 378 379 r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */) 380 _, err = r.Next() 381 if err != nil || r.err != nil { 382 t.Fatalf("reader.Next: %v reader.err: %v", err, r.err) 383 } 384 385 seq, begin, end, n := r.seq, r.begin, r.end, r.n 386 387 // Should be a no-op since r.err == nil. 388 r.recover() 389 390 // r.err was nil, nothing should have changed. 391 if seq != r.seq || begin != r.begin || end != r.end || n != r.n { 392 t.Fatal("reader.Recover when no error existed, was not a no-op") 393 } 394 } 395 396 func TestBasicRecover(t *testing.T) { 397 recs, err := makeTestRecords( 398 blockSize-legacyHeaderSize, 399 blockSize-legacyHeaderSize, 400 blockSize-legacyHeaderSize, 401 ) 402 if err != nil { 403 t.Fatalf("makeTestRecords: %v", err) 404 } 405 406 // Corrupt the checksum of the second record r1 in our file. 407 corruptBlock(recs.buf, 1) 408 409 underlyingReader := bytes.NewReader(recs.buf) 410 r := NewReader(underlyingReader, 0 /* logNum */) 411 412 // The first record r0 should be read just fine. 413 r0, err := r.Next() 414 if err != nil { 415 t.Fatalf("Next: %v", err) 416 } 417 r0Data, err := ioutil.ReadAll(r0) 418 if err != nil { 419 t.Fatalf("ReadAll: %v", err) 420 } 421 if !bytes.Equal(r0Data, recs.records[0]) { 422 t.Fatal("Unexpected output in r0's data") 423 } 424 425 // The next record should have a checksum mismatch. 426 _, err = r.Next() 427 if err == nil { 428 t.Fatal("Expected an error while reading a corrupted record") 429 } 430 if err != ErrInvalidChunk { 431 t.Fatalf("Unexpected error returned: %v", err) 432 } 433 434 // Recover from that checksum mismatch. 435 r.recover() 436 currentOffset, err := underlyingReader.Seek(0, os.SEEK_CUR) 437 if err != nil { 438 t.Fatalf("current offset: %v", err) 439 } 440 if currentOffset != blockSize*2 { 441 t.Fatalf("current offset: got %d, want %d", currentOffset, blockSize*2) 442 } 443 444 // The third record r2 should be read just fine. 445 r2, err := r.Next() 446 if err != nil { 447 t.Fatalf("Next: %v", err) 448 } 449 r2Data, err := ioutil.ReadAll(r2) 450 if err != nil { 451 t.Fatalf("ReadAll: %v", err) 452 } 453 if !bytes.Equal(r2Data, recs.records[2]) { 454 t.Fatal("Unexpected output in r2's data") 455 } 456 } 457 458 func TestRecoverSingleBlock(t *testing.T) { 459 // The first record will be blockSize * 3 bytes long. Since each block has 460 // a 7 byte header, the first record will roll over into 4 blocks. 461 recs, err := makeTestRecords( 462 blockSize*3, 463 blockSize-legacyHeaderSize, 464 blockSize/2, 465 ) 466 if err != nil { 467 t.Fatalf("makeTestRecords: %v", err) 468 } 469 470 // Corrupt the checksum for the portion of the first record that exists in 471 // the 4th block. 472 corruptBlock(recs.buf, 3) 473 474 // The first record should fail, but only when we read deeper beyond the 475 // first block. 476 r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */) 477 r0, err := r.Next() 478 if err != nil { 479 t.Fatalf("Next: %v", err) 480 } 481 482 // Reading deeper should yield a checksum mismatch. 483 _, err = ioutil.ReadAll(r0) 484 if err == nil { 485 t.Fatal("Expected a checksum mismatch error, got nil") 486 } 487 if err != ErrInvalidChunk { 488 t.Fatalf("Unexpected error returned: %v", err) 489 } 490 491 // Recover from that checksum mismatch. 492 r.recover() 493 494 // All of the data in the second record r1 is lost because the first record 495 // r0 shared a partial block with it. The second record also overlapped 496 // into the block with the third record r2. Recovery should jump to that 497 // block, skipping over the end of the second record and start parsing the 498 // third record. 499 r2, err := r.Next() 500 if err != nil { 501 t.Fatalf("Next: %v", err) 502 } 503 r2Data, _ := ioutil.ReadAll(r2) 504 if !bytes.Equal(r2Data, recs.records[2]) { 505 t.Fatal("Unexpected output in r2's data") 506 } 507 } 508 509 func TestRecoverMultipleBlocks(t *testing.T) { 510 recs, err := makeTestRecords( 511 // The first record will consume 3 entire blocks but a fraction of the 4th. 512 blockSize*3, 513 // The second record will completely fill the remainder of the 4th block. 514 3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize, 515 // Consume the entirety of the 5th block. 516 blockSize-legacyHeaderSize, 517 // Consume the entirety of the 6th block. 518 blockSize-legacyHeaderSize, 519 // Consume roughly half of the 7th block. 520 blockSize/2, 521 ) 522 if err != nil { 523 t.Fatalf("makeTestRecords: %v", err) 524 } 525 526 // Corrupt the checksum for the portion of the first record that exists in the 4th block. 527 corruptBlock(recs.buf, 3) 528 529 // Now corrupt the two blocks in a row that correspond to recs.records[2:4]. 530 corruptBlock(recs.buf, 4) 531 corruptBlock(recs.buf, 5) 532 533 // The first record should fail, but only when we read deeper beyond the first block. 534 r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */) 535 r0, err := r.Next() 536 if err != nil { 537 t.Fatalf("Next: %v", err) 538 } 539 540 // Reading deeper should yield a checksum mismatch. 541 _, err = ioutil.ReadAll(r0) 542 if err == nil { 543 t.Fatal("Exptected a checksum mismatch error, got nil") 544 } 545 if err != ErrInvalidChunk { 546 t.Fatalf("Unexpected error returned: %v", err) 547 } 548 549 // Recover from that checksum mismatch. 550 r.recover() 551 552 // All of the data in the second record is lost because the first 553 // record shared a partial block with it. The following two records 554 // have corrupted checksums as well, so the call above to r.Recover 555 // should result in r.Next() being a reader to the 5th record. 556 r4, err := r.Next() 557 if err != nil { 558 t.Fatalf("Next: %v", err) 559 } 560 561 r4Data, _ := ioutil.ReadAll(r4) 562 if !bytes.Equal(r4Data, recs.records[4]) { 563 t.Fatal("Unexpected output in r4's data") 564 } 565 } 566 567 // verifyLastBlockRecover reads each record from recs expecting that the 568 // last record will be corrupted. It will then try Recover and verify that EOF 569 // is returned. 570 func verifyLastBlockRecover(recs *testRecords) error { 571 r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */) 572 // Loop to one element larger than the number of records to verify EOF. 573 for i := 0; i < len(recs.records)+1; i++ { 574 _, err := r.Next() 575 switch i { 576 case len(recs.records) - 1: 577 if err == nil { 578 return errors.New("Expected a checksum mismatch error, got nil") 579 } 580 r.recover() 581 case len(recs.records): 582 if err != io.EOF { 583 return fmt.Errorf("Expected io.EOF, got %v", err) 584 } 585 default: 586 if err != nil { 587 return fmt.Errorf("Next: %v", err) 588 } 589 } 590 } 591 return nil 592 } 593 594 func TestRecoverLastPartialBlock(t *testing.T) { 595 recs, err := makeTestRecords( 596 // The first record will consume 3 entire blocks but a fraction of the 4th. 597 blockSize*3, 598 // The second record will completely fill the remainder of the 4th block. 599 3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize, 600 // Consume roughly half of the 5th block. 601 blockSize/2, 602 ) 603 if err != nil { 604 t.Fatalf("makeTestRecords: %v", err) 605 } 606 607 // Corrupt the 5th block. 608 corruptBlock(recs.buf, 4) 609 610 // Verify Recover works when the last block is corrupted. 611 if err := verifyLastBlockRecover(recs); err != nil { 612 t.Fatalf("verifyLastBlockRecover: %v", err) 613 } 614 } 615 616 func TestRecoverLastCompleteBlock(t *testing.T) { 617 recs, err := makeTestRecords( 618 // The first record will consume 3 entire blocks but a fraction of the 4th. 619 blockSize*3, 620 // The second record will completely fill the remainder of the 4th block. 621 3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize, 622 // Consume the entire 5th block. 623 blockSize-legacyHeaderSize, 624 ) 625 if err != nil { 626 t.Fatalf("makeTestRecords: %v", err) 627 } 628 629 // Corrupt the 5th block. 630 corruptBlock(recs.buf, 4) 631 632 // Verify Recover works when the last block is corrupted. 633 if err := verifyLastBlockRecover(recs); err != nil { 634 t.Fatalf("verifyLastBlockRecover: %v", err) 635 } 636 } 637 638 func TestReaderOffset(t *testing.T) { 639 recs, err := makeTestRecords( 640 blockSize*2, 641 400, 642 500, 643 600, 644 700, 645 800, 646 9000, 647 1000, 648 ) 649 if err != nil { 650 t.Fatalf("makeTestRecords: %v", err) 651 } 652 653 // The first record should fail, but only when we read deeper beyond the first block. 654 r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */) 655 for i, offset := range recs.offsets { 656 if offset != r.Offset() { 657 t.Fatalf("%d: expected offset %d, but found %d", i, offset, r.Offset()) 658 } 659 rec, err := r.Next() 660 if err != nil { 661 t.Fatalf("Next: %v", err) 662 } 663 if _, err = ioutil.ReadAll(rec); err != nil { 664 t.Fatalf("ReadAll: %v", err) 665 } 666 } 667 } 668 669 func TestSeekRecord(t *testing.T) { 670 recs, err := makeTestRecords( 671 // The first record will consume 3 entire blocks but a fraction of the 4th. 672 blockSize*3, 673 // The second record will completely fill the remainder of the 4th block. 674 3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize, 675 // Consume the entirety of the 5th block. 676 blockSize-legacyHeaderSize, 677 // Consume the entirety of the 6th block. 678 blockSize-legacyHeaderSize, 679 // Consume roughly half of the 7th block. 680 blockSize/2, 681 ) 682 if err != nil { 683 t.Fatalf("makeTestRecords: %v", err) 684 } 685 686 r := NewReader(bytes.NewReader(recs.buf), 0 /* logNum */) 687 // Seek to a valid block offset, but within a multiblock record. This should cause the next call to 688 // Next after SeekRecord to return the next valid FIRST/FULL chunk of the subsequent record. 689 err = r.seekRecord(blockSize) 690 if err != nil { 691 t.Fatalf("SeekRecord: %v", err) 692 } 693 rec, err := r.Next() 694 if err != nil { 695 t.Fatalf("Next: %v", err) 696 } 697 rData, _ := ioutil.ReadAll(rec) 698 if !bytes.Equal(rData, recs.records[1]) { 699 t.Fatalf("Unexpected output in record 1's data, got %v want %v", rData, recs.records[1]) 700 } 701 702 // Seek 3 bytes into the second block, which is still in the middle of the first record, but not 703 // at a valid chunk boundary. Should result in an error upon calling r.Next. 704 err = r.seekRecord(blockSize + 3) 705 if err != nil { 706 t.Fatalf("SeekRecord: %v", err) 707 } 708 if _, err = r.Next(); err == nil { 709 t.Fatalf("Expected an error seeking to an invalid chunk boundary") 710 } 711 r.recover() 712 713 // Seek to the fifth block and verify all records can be read as appropriate. 714 err = r.seekRecord(blockSize * 4) 715 if err != nil { 716 t.Fatalf("SeekRecord: %v", err) 717 } 718 719 check := func(i int) { 720 for ; i < len(recs.records); i++ { 721 rec, err := r.Next() 722 if err != nil { 723 t.Fatalf("Next: %v", err) 724 } 725 726 rData, _ := ioutil.ReadAll(rec) 727 if !bytes.Equal(rData, recs.records[i]) { 728 t.Fatalf("Unexpected output in record #%d's data, got %v want %v", i, rData, recs.records[i]) 729 } 730 } 731 } 732 check(2) 733 734 // Seek back to the fourth block, and read all subsequent records and verify them. 735 err = r.seekRecord(blockSize * 3) 736 if err != nil { 737 t.Fatalf("SeekRecord: %v", err) 738 } 739 check(1) 740 741 // Now seek past the end of the file and verify it causes an error. 742 err = r.seekRecord(1 << 20) 743 if err == nil { 744 t.Fatalf("Seek past the end of a file didn't cause an error") 745 } 746 if err != io.EOF { 747 t.Fatalf("Seeking past EOF raised unexpected error: %v", err) 748 } 749 r.recover() // Verify recovery works. 750 751 // Validate the current records are returned after seeking to a valid offset. 752 err = r.seekRecord(blockSize * 4) 753 if err != nil { 754 t.Fatalf("SeekRecord: %v", err) 755 } 756 check(2) 757 } 758 759 func TestLastRecordOffset(t *testing.T) { 760 recs, err := makeTestRecords( 761 // The first record will consume 3 entire blocks but a fraction of the 4th. 762 blockSize*3, 763 // The second record will completely fill the remainder of the 4th block. 764 3*(blockSize-legacyHeaderSize)-2*blockSize-2*legacyHeaderSize, 765 // Consume the entirety of the 5th block. 766 blockSize-legacyHeaderSize, 767 // Consume the entirety of the 6th block. 768 blockSize-legacyHeaderSize, 769 // Consume roughly half of the 7th block. 770 blockSize/2, 771 ) 772 if err != nil { 773 t.Fatalf("makeTestRecords: %v", err) 774 } 775 776 wants := []int64{0, 98332, 131072, 163840, 196608} 777 for i, got := range recs.offsets { 778 if want := wants[i]; got != want { 779 t.Errorf("record #%d: got %d, want %d", i, got, want) 780 } 781 } 782 } 783 784 func TestNoLastRecordOffset(t *testing.T) { 785 buf := new(bytes.Buffer) 786 w := NewWriter(buf) 787 defer w.Close() 788 789 if _, err := w.LastRecordOffset(); err != ErrNoLastRecord { 790 t.Fatalf("Expected ErrNoLastRecord, got: %v", err) 791 } 792 793 if err := w.Flush(); err != nil { 794 t.Fatal(err) 795 } 796 797 if _, err := w.LastRecordOffset(); err != ErrNoLastRecord { 798 t.Fatalf("LastRecordOffset: got: %v, want ErrNoLastRecord", err) 799 } 800 801 if _, err := w.WriteRecord([]byte("testrecord")); err != nil { 802 t.Fatal(err) 803 } 804 805 if off, err := w.LastRecordOffset(); err != nil { 806 t.Fatalf("LastRecordOffset: %v", err) 807 } else if off != 0 { 808 t.Fatalf("LastRecordOffset: got %d, want 0", off) 809 } 810 } 811 812 func TestInvalidLogNum(t *testing.T) { 813 var buf bytes.Buffer 814 w := NewLogWriter(&buf, 1) 815 for i := 0; i < 10; i++ { 816 s := fmt.Sprintf("%04d\n", i) 817 if _, err := w.WriteRecord([]byte(s)); err != nil { 818 t.Fatal(err) 819 } 820 } 821 if err := w.Close(); err != nil { 822 t.Fatal(err) 823 } 824 825 { 826 r := NewReader(bytes.NewReader(buf.Bytes()), 1) 827 for i := 0; i < 10; i++ { 828 rr, err := r.Next() 829 if err != nil { 830 t.Fatal(err) 831 } 832 x, err := ioutil.ReadAll(rr) 833 if err != nil { 834 t.Fatal(err) 835 } 836 s := fmt.Sprintf("%04d\n", i) 837 if s != string(x) { 838 t.Fatalf("expected %s, but found %s", s, x) 839 } 840 } 841 if _, err := r.Next(); err != io.EOF { 842 t.Fatalf("expected EOF, but found %s", err) 843 } 844 } 845 846 { 847 r := NewReader(bytes.NewReader(buf.Bytes()), 2) 848 if _, err := r.Next(); err != io.EOF { 849 t.Fatalf("expected %s, but found %s\n", io.EOF, err) 850 } 851 } 852 } 853 854 func TestSize(t *testing.T) { 855 var buf bytes.Buffer 856 zeroes := make([]byte, 8<<10) 857 w := NewWriter(&buf) 858 for i := 0; i < 100; i++ { 859 n := rand.Intn(len(zeroes)) 860 if _, err := w.WriteRecord(zeroes[:n]); err != nil { 861 t.Fatal(err) 862 } 863 if err := w.Flush(); err != nil { 864 t.Fatal(err) 865 } 866 if buf.Len() != int(w.Size()) { 867 t.Fatalf("expected %d, but found %d", buf.Len(), w.Size()) 868 } 869 } 870 if err := w.Close(); err != nil { 871 t.Fatal(err) 872 } 873 } 874 875 func TestRecycleLog(t *testing.T) { 876 const min = 16 877 const max = 4096 878 879 rnd := rand.New(rand.NewSource(uint64(time.Now().UnixNano()))) 880 randBlock := func() []byte { 881 data := make([]byte, rand.Intn(max-min)+min) 882 tmp := data 883 for len(tmp) >= 8 { 884 binary.LittleEndian.PutUint64(tmp, rand.Uint64()) 885 tmp = tmp[8:] 886 } 887 r := rand.Uint64() 888 for i := 0; i < len(tmp); i++ { 889 tmp[i] = byte(r) 890 r >>= 8 891 } 892 return data 893 } 894 895 // Recycle a log file 100 times, writing a random number of records filled 896 // with random data. 897 backing := make([]byte, 1<<20) 898 for i := 1; i <= 100; i++ { 899 w := NewLogWriter(bytes.NewBuffer(backing[:0]), uint64(i)) 900 sizes := make([]int, 10+rnd.Intn(100)) 901 for j := range sizes { 902 data := randBlock() 903 if _, err := w.WriteRecord(data); err != nil { 904 t.Fatalf("%d/%d: %v", i, j, err) 905 } 906 sizes[j] = len(data) 907 } 908 if err := w.Close(); err != nil { 909 t.Fatalf("%d: %v", i, err) 910 } 911 912 r := NewReader(bytes.NewReader(backing), uint64(i)) 913 for j := range sizes { 914 rr, err := r.Next() 915 if err != nil { 916 t.Fatalf("%d/%d: %v", i, j, err) 917 } 918 x, err := ioutil.ReadAll(rr) 919 if err != nil { 920 t.Fatalf("%d/%d: %v", i, j, err) 921 } 922 if sizes[j] != len(x) { 923 t.Fatalf("%d/%d: expected record %d, but found %d", i, j, sizes[j], len(x)) 924 } 925 } 926 if _, err := r.Next(); err != io.EOF && err != ErrZeroedChunk && err != ErrInvalidChunk { 927 t.Fatalf("%d: expected EOF, but found %v", i, err) 928 } 929 } 930 } 931 932 func BenchmarkRecordWrite(b *testing.B) { 933 for _, size := range []int{8, 16, 32, 64, 128} { 934 b.Run(fmt.Sprintf("size=%d", size), func(b *testing.B) { 935 w := NewLogWriter(ioutil.Discard, 0 /* logNum */) 936 defer w.Close() 937 buf := make([]byte, size) 938 939 b.SetBytes(int64(len(buf))) 940 b.ResetTimer() 941 for i := 0; i < b.N; i++ { 942 if _, err := w.WriteRecord(buf); err != nil { 943 b.Fatal(err) 944 } 945 } 946 b.StopTimer() 947 }) 948 } 949 }