github.com/shrimpyuk/bor@v0.2.15-0.20220224151350-fb4ec6020bae/core/rawdb/freezer_table_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "fmt" 22 "math/rand" 23 "os" 24 "path/filepath" 25 "testing" 26 "time" 27 28 "github.com/ethereum/go-ethereum/metrics" 29 "github.com/stretchr/testify/require" 30 ) 31 32 func init() { 33 rand.Seed(time.Now().Unix()) 34 } 35 36 // TestFreezerBasics test initializing a freezertable from scratch, writing to the table, 37 // and reading it back. 38 func TestFreezerBasics(t *testing.T) { 39 t.Parallel() 40 // set cutoff at 50 bytes 41 f, err := newTable(os.TempDir(), 42 fmt.Sprintf("unittest-%d", rand.Uint64()), 43 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true) 44 if err != nil { 45 t.Fatal(err) 46 } 47 defer f.Close() 48 49 // Write 15 bytes 255 times, results in 85 files 50 writeChunks(t, f, 255, 15) 51 52 //print(t, f, 0) 53 //print(t, f, 1) 54 //print(t, f, 2) 55 // 56 //db[0] = 000000000000000000000000000000 57 //db[1] = 010101010101010101010101010101 58 //db[2] = 020202020202020202020202020202 59 60 for y := 0; y < 255; y++ { 61 exp := getChunk(15, y) 62 got, err := f.Retrieve(uint64(y)) 63 if err != nil { 64 t.Fatalf("reading item %d: %v", y, err) 65 } 66 if !bytes.Equal(got, exp) { 67 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 68 } 69 } 70 // Check that we cannot read too far 71 _, err = f.Retrieve(uint64(255)) 72 if err != errOutOfBounds { 73 t.Fatal(err) 74 } 75 } 76 77 // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between 78 // every operation 79 func TestFreezerBasicsClosing(t *testing.T) { 80 t.Parallel() 81 // set cutoff at 50 bytes 82 var ( 83 fname = fmt.Sprintf("basics-close-%d", rand.Uint64()) 84 rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 85 f *freezerTable 86 err error 87 ) 88 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 89 if err != nil { 90 t.Fatal(err) 91 } 92 93 // Write 15 bytes 255 times, results in 85 files. 94 // In-between writes, the table is closed and re-opened. 95 for x := 0; x < 255; x++ { 96 data := getChunk(15, x) 97 batch := f.newBatch() 98 require.NoError(t, batch.AppendRaw(uint64(x), data)) 99 require.NoError(t, batch.commit()) 100 f.Close() 101 102 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 103 if err != nil { 104 t.Fatal(err) 105 } 106 } 107 defer f.Close() 108 109 for y := 0; y < 255; y++ { 110 exp := getChunk(15, y) 111 got, err := f.Retrieve(uint64(y)) 112 if err != nil { 113 t.Fatal(err) 114 } 115 if !bytes.Equal(got, exp) { 116 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 117 } 118 f.Close() 119 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 120 if err != nil { 121 t.Fatal(err) 122 } 123 } 124 } 125 126 // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed 127 func TestFreezerRepairDanglingHead(t *testing.T) { 128 t.Parallel() 129 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 130 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 131 132 // Fill table 133 { 134 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 135 if err != nil { 136 t.Fatal(err) 137 } 138 // Write 15 bytes 255 times 139 writeChunks(t, f, 255, 15) 140 141 // The last item should be there 142 if _, err = f.Retrieve(0xfe); err != nil { 143 t.Fatal(err) 144 } 145 f.Close() 146 } 147 148 // open the index 149 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 150 if err != nil { 151 t.Fatalf("Failed to open index file: %v", err) 152 } 153 // Remove 4 bytes 154 stat, err := idxFile.Stat() 155 if err != nil { 156 t.Fatalf("Failed to stat index file: %v", err) 157 } 158 idxFile.Truncate(stat.Size() - 4) 159 idxFile.Close() 160 161 // Now open it again 162 { 163 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 164 if err != nil { 165 t.Fatal(err) 166 } 167 // The last item should be missing 168 if _, err = f.Retrieve(0xff); err == nil { 169 t.Errorf("Expected error for missing index entry") 170 } 171 // The one before should still be there 172 if _, err = f.Retrieve(0xfd); err != nil { 173 t.Fatalf("Expected no error, got %v", err) 174 } 175 } 176 } 177 178 // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed 179 func TestFreezerRepairDanglingHeadLarge(t *testing.T) { 180 t.Parallel() 181 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 182 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 183 184 // Fill a table and close it 185 { 186 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 187 if err != nil { 188 t.Fatal(err) 189 } 190 // Write 15 bytes 255 times 191 writeChunks(t, f, 255, 15) 192 193 // The last item should be there 194 if _, err = f.Retrieve(f.items - 1); err != nil { 195 t.Fatal(err) 196 } 197 f.Close() 198 } 199 200 // open the index 201 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 202 if err != nil { 203 t.Fatalf("Failed to open index file: %v", err) 204 } 205 // Remove everything but the first item, and leave data unaligned 206 // 0-indexEntry, 1-indexEntry, corrupt-indexEntry 207 idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2) 208 idxFile.Close() 209 210 // Now open it again 211 { 212 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 213 if err != nil { 214 t.Fatal(err) 215 } 216 // The first item should be there 217 if _, err = f.Retrieve(0); err != nil { 218 t.Fatal(err) 219 } 220 // The second item should be missing 221 if _, err = f.Retrieve(1); err == nil { 222 t.Errorf("Expected error for missing index entry") 223 } 224 // We should now be able to store items again, from item = 1 225 batch := f.newBatch() 226 for x := 1; x < 0xff; x++ { 227 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 228 } 229 require.NoError(t, batch.commit()) 230 f.Close() 231 } 232 233 // And if we open it, we should now be able to read all of them (new values) 234 { 235 f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 236 for y := 1; y < 255; y++ { 237 exp := getChunk(15, ^y) 238 got, err := f.Retrieve(uint64(y)) 239 if err != nil { 240 t.Fatal(err) 241 } 242 if !bytes.Equal(got, exp) { 243 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 244 } 245 } 246 } 247 } 248 249 // TestSnappyDetection tests that we fail to open a snappy database and vice versa 250 func TestSnappyDetection(t *testing.T) { 251 t.Parallel() 252 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 253 fname := fmt.Sprintf("snappytest-%d", rand.Uint64()) 254 255 // Open with snappy 256 { 257 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 258 if err != nil { 259 t.Fatal(err) 260 } 261 // Write 15 bytes 255 times 262 writeChunks(t, f, 255, 15) 263 f.Close() 264 } 265 266 // Open without snappy 267 { 268 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false) 269 if err != nil { 270 t.Fatal(err) 271 } 272 if _, err = f.Retrieve(0); err == nil { 273 f.Close() 274 t.Fatalf("expected empty table") 275 } 276 } 277 278 // Open with snappy 279 { 280 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 281 if err != nil { 282 t.Fatal(err) 283 } 284 // There should be 255 items 285 if _, err = f.Retrieve(0xfe); err != nil { 286 f.Close() 287 t.Fatalf("expected no error, got %v", err) 288 } 289 } 290 } 291 292 func assertFileSize(f string, size int64) error { 293 stat, err := os.Stat(f) 294 if err != nil { 295 return err 296 } 297 if stat.Size() != size { 298 return fmt.Errorf("error, expected size %d, got %d", size, stat.Size()) 299 } 300 return nil 301 } 302 303 // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data, 304 // the index is repaired 305 func TestFreezerRepairDanglingIndex(t *testing.T) { 306 t.Parallel() 307 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 308 fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64()) 309 310 // Fill a table and close it 311 { 312 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 313 if err != nil { 314 t.Fatal(err) 315 } 316 // Write 15 bytes 9 times : 150 bytes 317 writeChunks(t, f, 9, 15) 318 319 // The last item should be there 320 if _, err = f.Retrieve(f.items - 1); err != nil { 321 f.Close() 322 t.Fatal(err) 323 } 324 f.Close() 325 // File sizes should be 45, 45, 45 : items[3, 3, 3) 326 } 327 328 // Crop third file 329 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname)) 330 // Truncate third file: 45 ,45, 20 331 { 332 if err := assertFileSize(fileToCrop, 45); err != nil { 333 t.Fatal(err) 334 } 335 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 336 if err != nil { 337 t.Fatal(err) 338 } 339 file.Truncate(20) 340 file.Close() 341 } 342 343 // Open db it again 344 // It should restore the file(s) to 345 // 45, 45, 15 346 // with 3+3+1 items 347 { 348 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 349 if err != nil { 350 t.Fatal(err) 351 } 352 defer f.Close() 353 if f.items != 7 { 354 t.Fatalf("expected %d items, got %d", 7, f.items) 355 } 356 if err := assertFileSize(fileToCrop, 15); err != nil { 357 t.Fatal(err) 358 } 359 } 360 } 361 362 func TestFreezerTruncate(t *testing.T) { 363 t.Parallel() 364 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 365 fname := fmt.Sprintf("truncation-%d", rand.Uint64()) 366 367 // Fill table 368 { 369 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 370 if err != nil { 371 t.Fatal(err) 372 } 373 // Write 15 bytes 30 times 374 writeChunks(t, f, 30, 15) 375 376 // The last item should be there 377 if _, err = f.Retrieve(f.items - 1); err != nil { 378 t.Fatal(err) 379 } 380 f.Close() 381 } 382 383 // Reopen, truncate 384 { 385 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 386 if err != nil { 387 t.Fatal(err) 388 } 389 defer f.Close() 390 f.truncate(10) // 150 bytes 391 if f.items != 10 { 392 t.Fatalf("expected %d items, got %d", 10, f.items) 393 } 394 // 45, 45, 45, 15 -- bytes should be 15 395 if f.headBytes != 15 { 396 t.Fatalf("expected %d bytes, got %d", 15, f.headBytes) 397 } 398 } 399 } 400 401 // TestFreezerRepairFirstFile tests a head file with the very first item only half-written. 402 // That will rewind the index, and _should_ truncate the head file 403 func TestFreezerRepairFirstFile(t *testing.T) { 404 t.Parallel() 405 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 406 fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64()) 407 408 // Fill table 409 { 410 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 411 if err != nil { 412 t.Fatal(err) 413 } 414 // Write 80 bytes, splitting out into two files 415 batch := f.newBatch() 416 require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF))) 417 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE))) 418 require.NoError(t, batch.commit()) 419 420 // The last item should be there 421 if _, err = f.Retrieve(1); err != nil { 422 t.Fatal(err) 423 } 424 f.Close() 425 } 426 427 // Truncate the file in half 428 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname)) 429 { 430 if err := assertFileSize(fileToCrop, 40); err != nil { 431 t.Fatal(err) 432 } 433 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 434 if err != nil { 435 t.Fatal(err) 436 } 437 file.Truncate(20) 438 file.Close() 439 } 440 441 // Reopen 442 { 443 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 444 if err != nil { 445 t.Fatal(err) 446 } 447 if f.items != 1 { 448 f.Close() 449 t.Fatalf("expected %d items, got %d", 0, f.items) 450 } 451 452 // Write 40 bytes 453 batch := f.newBatch() 454 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD))) 455 require.NoError(t, batch.commit()) 456 457 f.Close() 458 459 // Should have been truncated down to zero and then 40 written 460 if err := assertFileSize(fileToCrop, 40); err != nil { 461 t.Fatal(err) 462 } 463 } 464 } 465 466 // TestFreezerReadAndTruncate tests: 467 // - we have a table open 468 // - do some reads, so files are open in readonly 469 // - truncate so those files are 'removed' 470 // - check that we did not keep the rdonly file descriptors 471 func TestFreezerReadAndTruncate(t *testing.T) { 472 t.Parallel() 473 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 474 fname := fmt.Sprintf("read_truncate-%d", rand.Uint64()) 475 476 // Fill table 477 { 478 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 479 if err != nil { 480 t.Fatal(err) 481 } 482 // Write 15 bytes 30 times 483 writeChunks(t, f, 30, 15) 484 485 // The last item should be there 486 if _, err = f.Retrieve(f.items - 1); err != nil { 487 t.Fatal(err) 488 } 489 f.Close() 490 } 491 492 // Reopen and read all files 493 { 494 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 495 if err != nil { 496 t.Fatal(err) 497 } 498 if f.items != 30 { 499 f.Close() 500 t.Fatalf("expected %d items, got %d", 0, f.items) 501 } 502 for y := byte(0); y < 30; y++ { 503 f.Retrieve(uint64(y)) 504 } 505 506 // Now, truncate back to zero 507 f.truncate(0) 508 509 // Write the data again 510 batch := f.newBatch() 511 for x := 0; x < 30; x++ { 512 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 513 } 514 require.NoError(t, batch.commit()) 515 f.Close() 516 } 517 } 518 519 func TestFreezerOffset(t *testing.T) { 520 t.Parallel() 521 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 522 fname := fmt.Sprintf("offset-%d", rand.Uint64()) 523 524 // Fill table 525 { 526 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true) 527 if err != nil { 528 t.Fatal(err) 529 } 530 531 // Write 6 x 20 bytes, splitting out into three files 532 batch := f.newBatch() 533 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 534 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 535 536 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 537 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 538 539 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 540 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 541 require.NoError(t, batch.commit()) 542 543 t.Log(f.dumpIndexString(0, 100)) 544 f.Close() 545 } 546 547 // Now crop it. 548 { 549 // delete files 0 and 1 550 for i := 0; i < 2; i++ { 551 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i)) 552 if err := os.Remove(p); err != nil { 553 t.Fatal(err) 554 } 555 } 556 // Read the index file 557 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 558 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 559 if err != nil { 560 t.Fatal(err) 561 } 562 indexBuf := make([]byte, 7*indexEntrySize) 563 indexFile.Read(indexBuf) 564 565 // Update the index file, so that we store 566 // [ file = 2, offset = 4 ] at index zero 567 568 tailId := uint32(2) // First file is 2 569 itemOffset := uint32(4) // We have removed four items 570 zeroIndex := indexEntry{ 571 filenum: tailId, 572 offset: itemOffset, 573 } 574 buf := zeroIndex.append(nil) 575 // Overwrite index zero 576 copy(indexBuf, buf) 577 // Remove the four next indices by overwriting 578 copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) 579 indexFile.WriteAt(indexBuf, 0) 580 // Need to truncate the moved index items 581 indexFile.Truncate(indexEntrySize * (1 + 2)) 582 indexFile.Close() 583 } 584 585 // Now open again 586 { 587 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true) 588 if err != nil { 589 t.Fatal(err) 590 } 591 defer f.Close() 592 t.Log(f.dumpIndexString(0, 100)) 593 594 // It should allow writing item 6. 595 batch := f.newBatch() 596 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99))) 597 require.NoError(t, batch.commit()) 598 599 checkRetrieveError(t, f, map[uint64]error{ 600 0: errOutOfBounds, 601 1: errOutOfBounds, 602 2: errOutOfBounds, 603 3: errOutOfBounds, 604 }) 605 checkRetrieve(t, f, map[uint64][]byte{ 606 4: getChunk(20, 0xbb), 607 5: getChunk(20, 0xaa), 608 6: getChunk(20, 0x99), 609 }) 610 } 611 612 // Edit the index again, with a much larger initial offset of 1M. 613 { 614 // Read the index file 615 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 616 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 617 if err != nil { 618 t.Fatal(err) 619 } 620 indexBuf := make([]byte, 3*indexEntrySize) 621 indexFile.Read(indexBuf) 622 623 // Update the index file, so that we store 624 // [ file = 2, offset = 1M ] at index zero 625 626 tailId := uint32(2) // First file is 2 627 itemOffset := uint32(1000000) // We have removed 1M items 628 zeroIndex := indexEntry{ 629 offset: itemOffset, 630 filenum: tailId, 631 } 632 buf := zeroIndex.append(nil) 633 // Overwrite index zero 634 copy(indexBuf, buf) 635 indexFile.WriteAt(indexBuf, 0) 636 indexFile.Close() 637 } 638 639 // Check that existing items have been moved to index 1M. 640 { 641 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true) 642 if err != nil { 643 t.Fatal(err) 644 } 645 defer f.Close() 646 t.Log(f.dumpIndexString(0, 100)) 647 648 checkRetrieveError(t, f, map[uint64]error{ 649 0: errOutOfBounds, 650 1: errOutOfBounds, 651 2: errOutOfBounds, 652 3: errOutOfBounds, 653 999999: errOutOfBounds, 654 }) 655 checkRetrieve(t, f, map[uint64][]byte{ 656 1000000: getChunk(20, 0xbb), 657 1000001: getChunk(20, 0xaa), 658 }) 659 } 660 } 661 662 func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) { 663 t.Helper() 664 665 for item, wantBytes := range items { 666 value, err := f.Retrieve(item) 667 if err != nil { 668 t.Fatalf("can't get expected item %d: %v", item, err) 669 } 670 if !bytes.Equal(value, wantBytes) { 671 t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes) 672 } 673 } 674 } 675 676 func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) { 677 t.Helper() 678 679 for item, wantError := range items { 680 value, err := f.Retrieve(item) 681 if err == nil { 682 t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError) 683 } 684 if err != wantError { 685 t.Fatalf("wrong error for item %d: %v", item, err) 686 } 687 } 688 } 689 690 // Gets a chunk of data, filled with 'b' 691 func getChunk(size int, b int) []byte { 692 data := make([]byte, size) 693 for i := range data { 694 data[i] = byte(b) 695 } 696 return data 697 } 698 699 // TODO (?) 700 // - test that if we remove several head-files, aswell as data last data-file, 701 // the index is truncated accordingly 702 // Right now, the freezer would fail on these conditions: 703 // 1. have data files d0, d1, d2, d3 704 // 2. remove d2,d3 705 // 706 // However, all 'normal' failure modes arising due to failing to sync() or save a file 707 // should be handled already, and the case described above can only (?) happen if an 708 // external process/user deletes files from the filesystem. 709 710 func writeChunks(t *testing.T, ft *freezerTable, n int, length int) { 711 t.Helper() 712 713 batch := ft.newBatch() 714 for i := 0; i < n; i++ { 715 if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil { 716 t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err) 717 } 718 } 719 if err := batch.commit(); err != nil { 720 t.Fatalf("Commit returned error: %v", err) 721 } 722 } 723 724 // TestSequentialRead does some basic tests on the RetrieveItems. 725 func TestSequentialRead(t *testing.T) { 726 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 727 fname := fmt.Sprintf("batchread-%d", rand.Uint64()) 728 { // Fill table 729 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 730 if err != nil { 731 t.Fatal(err) 732 } 733 // Write 15 bytes 30 times 734 writeChunks(t, f, 30, 15) 735 f.DumpIndex(0, 30) 736 f.Close() 737 } 738 { // Open it, iterate, verify iteration 739 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true) 740 if err != nil { 741 t.Fatal(err) 742 } 743 items, err := f.RetrieveItems(0, 10000, 100000) 744 if err != nil { 745 t.Fatal(err) 746 } 747 if have, want := len(items), 30; have != want { 748 t.Fatalf("want %d items, have %d ", want, have) 749 } 750 for i, have := range items { 751 want := getChunk(15, i) 752 if !bytes.Equal(want, have) { 753 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 754 } 755 } 756 f.Close() 757 } 758 { // Open it, iterate, verify byte limit. The byte limit is less than item 759 // size, so each lookup should only return one item 760 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true) 761 if err != nil { 762 t.Fatal(err) 763 } 764 items, err := f.RetrieveItems(0, 10000, 10) 765 if err != nil { 766 t.Fatal(err) 767 } 768 if have, want := len(items), 1; have != want { 769 t.Fatalf("want %d items, have %d ", want, have) 770 } 771 for i, have := range items { 772 want := getChunk(15, i) 773 if !bytes.Equal(want, have) { 774 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 775 } 776 } 777 f.Close() 778 } 779 } 780 781 // TestSequentialReadByteLimit does some more advanced tests on batch reads. 782 // These tests check that when the byte limit hits, we correctly abort in time, 783 // but also properly do all the deferred reads for the previous data, regardless 784 // of whether the data crosses a file boundary or not. 785 func TestSequentialReadByteLimit(t *testing.T) { 786 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 787 fname := fmt.Sprintf("batchread-2-%d", rand.Uint64()) 788 { // Fill table 789 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true) 790 if err != nil { 791 t.Fatal(err) 792 } 793 // Write 10 bytes 30 times, 794 // Splitting it at every 100 bytes (10 items) 795 writeChunks(t, f, 30, 10) 796 f.Close() 797 } 798 for i, tc := range []struct { 799 items uint64 800 limit uint64 801 want int 802 }{ 803 {9, 89, 8}, 804 {10, 99, 9}, 805 {11, 109, 10}, 806 {100, 89, 8}, 807 {100, 99, 9}, 808 {100, 109, 10}, 809 } { 810 { 811 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true) 812 if err != nil { 813 t.Fatal(err) 814 } 815 items, err := f.RetrieveItems(0, tc.items, tc.limit) 816 if err != nil { 817 t.Fatal(err) 818 } 819 if have, want := len(items), tc.want; have != want { 820 t.Fatalf("test %d: want %d items, have %d ", i, want, have) 821 } 822 for ii, have := range items { 823 want := getChunk(10, ii) 824 if !bytes.Equal(want, have) { 825 t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) 826 } 827 } 828 f.Close() 829 } 830 } 831 }