github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/core/rawdb/freezer_table_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "fmt" 23 "math/rand" 24 "os" 25 "path/filepath" 26 "reflect" 27 "sync/atomic" 28 "testing" 29 "testing/quick" 30 31 "github.com/davecgh/go-spew/spew" 32 "github.com/stretchr/testify/require" 33 "github.com/tacshi/go-ethereum/metrics" 34 ) 35 36 // TestFreezerBasics test initializing a freezertable from scratch, writing to the table, 37 // and reading it back. 38 func TestFreezerBasics(t *testing.T) { 39 t.Parallel() 40 // set cutoff at 50 bytes 41 f, err := newTable(os.TempDir(), 42 fmt.Sprintf("unittest-%d", rand.Uint64()), 43 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 44 if err != nil { 45 t.Fatal(err) 46 } 47 defer f.Close() 48 49 // Write 15 bytes 255 times, results in 85 files 50 writeChunks(t, f, 255, 15) 51 52 //print(t, f, 0) 53 //print(t, f, 1) 54 //print(t, f, 2) 55 // 56 //db[0] = 000000000000000000000000000000 57 //db[1] = 010101010101010101010101010101 58 //db[2] = 020202020202020202020202020202 59 60 for y := 0; y < 255; y++ { 61 exp := getChunk(15, y) 62 got, err := f.Retrieve(uint64(y)) 63 if err != nil { 64 t.Fatalf("reading item %d: %v", y, err) 65 } 66 if !bytes.Equal(got, exp) { 67 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 68 } 69 } 70 // Check that we cannot read too far 71 _, err = f.Retrieve(uint64(255)) 72 if err != errOutOfBounds { 73 t.Fatal(err) 74 } 75 } 76 77 // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between 78 // every operation 79 func TestFreezerBasicsClosing(t *testing.T) { 80 t.Parallel() 81 // set cutoff at 50 bytes 82 var ( 83 fname = fmt.Sprintf("basics-close-%d", rand.Uint64()) 84 rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 85 f *freezerTable 86 err error 87 ) 88 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 89 if err != nil { 90 t.Fatal(err) 91 } 92 93 // Write 15 bytes 255 times, results in 85 files. 94 // In-between writes, the table is closed and re-opened. 95 for x := 0; x < 255; x++ { 96 data := getChunk(15, x) 97 batch := f.newBatch() 98 require.NoError(t, batch.AppendRaw(uint64(x), data)) 99 require.NoError(t, batch.commit()) 100 f.Close() 101 102 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 103 if err != nil { 104 t.Fatal(err) 105 } 106 } 107 defer f.Close() 108 109 for y := 0; y < 255; y++ { 110 exp := getChunk(15, y) 111 got, err := f.Retrieve(uint64(y)) 112 if err != nil { 113 t.Fatal(err) 114 } 115 if !bytes.Equal(got, exp) { 116 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 117 } 118 f.Close() 119 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 120 if err != nil { 121 t.Fatal(err) 122 } 123 } 124 } 125 126 // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed 127 func TestFreezerRepairDanglingHead(t *testing.T) { 128 t.Parallel() 129 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 130 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 131 132 // Fill table 133 { 134 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 135 if err != nil { 136 t.Fatal(err) 137 } 138 // Write 15 bytes 255 times 139 writeChunks(t, f, 255, 15) 140 141 // The last item should be there 142 if _, err = f.Retrieve(0xfe); err != nil { 143 t.Fatal(err) 144 } 145 f.Close() 146 } 147 148 // open the index 149 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 150 if err != nil { 151 t.Fatalf("Failed to open index file: %v", err) 152 } 153 // Remove 4 bytes 154 stat, err := idxFile.Stat() 155 if err != nil { 156 t.Fatalf("Failed to stat index file: %v", err) 157 } 158 idxFile.Truncate(stat.Size() - 4) 159 idxFile.Close() 160 161 // Now open it again 162 { 163 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 164 if err != nil { 165 t.Fatal(err) 166 } 167 // The last item should be missing 168 if _, err = f.Retrieve(0xff); err == nil { 169 t.Errorf("Expected error for missing index entry") 170 } 171 // The one before should still be there 172 if _, err = f.Retrieve(0xfd); err != nil { 173 t.Fatalf("Expected no error, got %v", err) 174 } 175 } 176 } 177 178 // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed 179 func TestFreezerRepairDanglingHeadLarge(t *testing.T) { 180 t.Parallel() 181 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 182 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 183 184 // Fill a table and close it 185 { 186 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 187 if err != nil { 188 t.Fatal(err) 189 } 190 // Write 15 bytes 255 times 191 writeChunks(t, f, 255, 15) 192 193 // The last item should be there 194 if _, err = f.Retrieve(f.items - 1); err != nil { 195 t.Fatal(err) 196 } 197 f.Close() 198 } 199 200 // open the index 201 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 202 if err != nil { 203 t.Fatalf("Failed to open index file: %v", err) 204 } 205 // Remove everything but the first item, and leave data unaligned 206 // 0-indexEntry, 1-indexEntry, corrupt-indexEntry 207 idxFile.Truncate(2*indexEntrySize + indexEntrySize/2) 208 idxFile.Close() 209 210 // Now open it again 211 { 212 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 213 if err != nil { 214 t.Fatal(err) 215 } 216 // The first item should be there 217 if _, err = f.Retrieve(0); err != nil { 218 t.Fatal(err) 219 } 220 // The second item should be missing 221 if _, err = f.Retrieve(1); err == nil { 222 t.Errorf("Expected error for missing index entry") 223 } 224 // We should now be able to store items again, from item = 1 225 batch := f.newBatch() 226 for x := 1; x < 0xff; x++ { 227 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 228 } 229 require.NoError(t, batch.commit()) 230 f.Close() 231 } 232 233 // And if we open it, we should now be able to read all of them (new values) 234 { 235 f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 236 for y := 1; y < 255; y++ { 237 exp := getChunk(15, ^y) 238 got, err := f.Retrieve(uint64(y)) 239 if err != nil { 240 t.Fatal(err) 241 } 242 if !bytes.Equal(got, exp) { 243 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 244 } 245 } 246 } 247 } 248 249 // TestSnappyDetection tests that we fail to open a snappy database and vice versa 250 func TestSnappyDetection(t *testing.T) { 251 t.Parallel() 252 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 253 fname := fmt.Sprintf("snappytest-%d", rand.Uint64()) 254 255 // Open with snappy 256 { 257 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 258 if err != nil { 259 t.Fatal(err) 260 } 261 // Write 15 bytes 255 times 262 writeChunks(t, f, 255, 15) 263 f.Close() 264 } 265 266 // Open without snappy 267 { 268 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false) 269 if err != nil { 270 t.Fatal(err) 271 } 272 if _, err = f.Retrieve(0); err == nil { 273 f.Close() 274 t.Fatalf("expected empty table") 275 } 276 } 277 278 // Open with snappy 279 { 280 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 281 if err != nil { 282 t.Fatal(err) 283 } 284 // There should be 255 items 285 if _, err = f.Retrieve(0xfe); err != nil { 286 f.Close() 287 t.Fatalf("expected no error, got %v", err) 288 } 289 } 290 } 291 292 func assertFileSize(f string, size int64) error { 293 stat, err := os.Stat(f) 294 if err != nil { 295 return err 296 } 297 if stat.Size() != size { 298 return fmt.Errorf("error, expected size %d, got %d", size, stat.Size()) 299 } 300 return nil 301 } 302 303 // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data, 304 // the index is repaired 305 func TestFreezerRepairDanglingIndex(t *testing.T) { 306 t.Parallel() 307 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 308 fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64()) 309 310 // Fill a table and close it 311 { 312 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 313 if err != nil { 314 t.Fatal(err) 315 } 316 // Write 15 bytes 9 times : 150 bytes 317 writeChunks(t, f, 9, 15) 318 319 // The last item should be there 320 if _, err = f.Retrieve(f.items - 1); err != nil { 321 f.Close() 322 t.Fatal(err) 323 } 324 f.Close() 325 // File sizes should be 45, 45, 45 : items[3, 3, 3) 326 } 327 328 // Crop third file 329 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname)) 330 // Truncate third file: 45 ,45, 20 331 { 332 if err := assertFileSize(fileToCrop, 45); err != nil { 333 t.Fatal(err) 334 } 335 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 336 if err != nil { 337 t.Fatal(err) 338 } 339 file.Truncate(20) 340 file.Close() 341 } 342 343 // Open db it again 344 // It should restore the file(s) to 345 // 45, 45, 15 346 // with 3+3+1 items 347 { 348 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 349 if err != nil { 350 t.Fatal(err) 351 } 352 defer f.Close() 353 if f.items != 7 { 354 t.Fatalf("expected %d items, got %d", 7, f.items) 355 } 356 if err := assertFileSize(fileToCrop, 15); err != nil { 357 t.Fatal(err) 358 } 359 } 360 } 361 362 func TestFreezerTruncate(t *testing.T) { 363 t.Parallel() 364 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 365 fname := fmt.Sprintf("truncation-%d", rand.Uint64()) 366 367 // Fill table 368 { 369 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 370 if err != nil { 371 t.Fatal(err) 372 } 373 // Write 15 bytes 30 times 374 writeChunks(t, f, 30, 15) 375 376 // The last item should be there 377 if _, err = f.Retrieve(f.items - 1); err != nil { 378 t.Fatal(err) 379 } 380 f.Close() 381 } 382 383 // Reopen, truncate 384 { 385 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 386 if err != nil { 387 t.Fatal(err) 388 } 389 defer f.Close() 390 f.truncateHead(10) // 150 bytes 391 if f.items != 10 { 392 t.Fatalf("expected %d items, got %d", 10, f.items) 393 } 394 // 45, 45, 45, 15 -- bytes should be 15 395 if f.headBytes != 15 { 396 t.Fatalf("expected %d bytes, got %d", 15, f.headBytes) 397 } 398 } 399 } 400 401 // TestFreezerRepairFirstFile tests a head file with the very first item only half-written. 402 // That will rewind the index, and _should_ truncate the head file 403 func TestFreezerRepairFirstFile(t *testing.T) { 404 t.Parallel() 405 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 406 fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64()) 407 408 // Fill table 409 { 410 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 411 if err != nil { 412 t.Fatal(err) 413 } 414 // Write 80 bytes, splitting out into two files 415 batch := f.newBatch() 416 require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF))) 417 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE))) 418 require.NoError(t, batch.commit()) 419 420 // The last item should be there 421 if _, err = f.Retrieve(1); err != nil { 422 t.Fatal(err) 423 } 424 f.Close() 425 } 426 427 // Truncate the file in half 428 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname)) 429 { 430 if err := assertFileSize(fileToCrop, 40); err != nil { 431 t.Fatal(err) 432 } 433 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 434 if err != nil { 435 t.Fatal(err) 436 } 437 file.Truncate(20) 438 file.Close() 439 } 440 441 // Reopen 442 { 443 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 444 if err != nil { 445 t.Fatal(err) 446 } 447 if f.items != 1 { 448 f.Close() 449 t.Fatalf("expected %d items, got %d", 0, f.items) 450 } 451 452 // Write 40 bytes 453 batch := f.newBatch() 454 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD))) 455 require.NoError(t, batch.commit()) 456 457 f.Close() 458 459 // Should have been truncated down to zero and then 40 written 460 if err := assertFileSize(fileToCrop, 40); err != nil { 461 t.Fatal(err) 462 } 463 } 464 } 465 466 // TestFreezerReadAndTruncate tests: 467 // - we have a table open 468 // - do some reads, so files are open in readonly 469 // - truncate so those files are 'removed' 470 // - check that we did not keep the rdonly file descriptors 471 func TestFreezerReadAndTruncate(t *testing.T) { 472 t.Parallel() 473 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 474 fname := fmt.Sprintf("read_truncate-%d", rand.Uint64()) 475 476 // Fill table 477 { 478 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 479 if err != nil { 480 t.Fatal(err) 481 } 482 // Write 15 bytes 30 times 483 writeChunks(t, f, 30, 15) 484 485 // The last item should be there 486 if _, err = f.Retrieve(f.items - 1); err != nil { 487 t.Fatal(err) 488 } 489 f.Close() 490 } 491 492 // Reopen and read all files 493 { 494 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 495 if err != nil { 496 t.Fatal(err) 497 } 498 if f.items != 30 { 499 f.Close() 500 t.Fatalf("expected %d items, got %d", 0, f.items) 501 } 502 for y := byte(0); y < 30; y++ { 503 f.Retrieve(uint64(y)) 504 } 505 506 // Now, truncate back to zero 507 f.truncateHead(0) 508 509 // Write the data again 510 batch := f.newBatch() 511 for x := 0; x < 30; x++ { 512 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 513 } 514 require.NoError(t, batch.commit()) 515 f.Close() 516 } 517 } 518 519 func TestFreezerOffset(t *testing.T) { 520 t.Parallel() 521 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 522 fname := fmt.Sprintf("offset-%d", rand.Uint64()) 523 524 // Fill table 525 { 526 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 527 if err != nil { 528 t.Fatal(err) 529 } 530 531 // Write 6 x 20 bytes, splitting out into three files 532 batch := f.newBatch() 533 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 534 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 535 536 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 537 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 538 539 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 540 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 541 require.NoError(t, batch.commit()) 542 543 t.Log(f.dumpIndexString(0, 100)) 544 f.Close() 545 } 546 547 // Now crop it. 548 { 549 // delete files 0 and 1 550 for i := 0; i < 2; i++ { 551 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i)) 552 if err := os.Remove(p); err != nil { 553 t.Fatal(err) 554 } 555 } 556 // Read the index file 557 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 558 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 559 if err != nil { 560 t.Fatal(err) 561 } 562 indexBuf := make([]byte, 7*indexEntrySize) 563 indexFile.Read(indexBuf) 564 565 // Update the index file, so that we store 566 // [ file = 2, offset = 4 ] at index zero 567 568 zeroIndex := indexEntry{ 569 filenum: uint32(2), // First file is 2 570 offset: uint32(4), // We have removed four items 571 } 572 buf := zeroIndex.append(nil) 573 574 // Overwrite index zero 575 copy(indexBuf, buf) 576 577 // Remove the four next indices by overwriting 578 copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) 579 indexFile.WriteAt(indexBuf, 0) 580 581 // Need to truncate the moved index items 582 indexFile.Truncate(indexEntrySize * (1 + 2)) 583 indexFile.Close() 584 } 585 586 // Now open again 587 { 588 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 589 if err != nil { 590 t.Fatal(err) 591 } 592 defer f.Close() 593 t.Log(f.dumpIndexString(0, 100)) 594 595 // It should allow writing item 6. 596 batch := f.newBatch() 597 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99))) 598 require.NoError(t, batch.commit()) 599 600 checkRetrieveError(t, f, map[uint64]error{ 601 0: errOutOfBounds, 602 1: errOutOfBounds, 603 2: errOutOfBounds, 604 3: errOutOfBounds, 605 }) 606 checkRetrieve(t, f, map[uint64][]byte{ 607 4: getChunk(20, 0xbb), 608 5: getChunk(20, 0xaa), 609 6: getChunk(20, 0x99), 610 }) 611 } 612 613 // Edit the index again, with a much larger initial offset of 1M. 614 { 615 // Read the index file 616 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 617 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 618 if err != nil { 619 t.Fatal(err) 620 } 621 indexBuf := make([]byte, 3*indexEntrySize) 622 indexFile.Read(indexBuf) 623 624 // Update the index file, so that we store 625 // [ file = 2, offset = 1M ] at index zero 626 627 zeroIndex := indexEntry{ 628 offset: uint32(1000000), // We have removed 1M items 629 filenum: uint32(2), // First file is 2 630 } 631 buf := zeroIndex.append(nil) 632 633 // Overwrite index zero 634 copy(indexBuf, buf) 635 indexFile.WriteAt(indexBuf, 0) 636 indexFile.Close() 637 } 638 639 // Check that existing items have been moved to index 1M. 640 { 641 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 642 if err != nil { 643 t.Fatal(err) 644 } 645 defer f.Close() 646 t.Log(f.dumpIndexString(0, 100)) 647 648 checkRetrieveError(t, f, map[uint64]error{ 649 0: errOutOfBounds, 650 1: errOutOfBounds, 651 2: errOutOfBounds, 652 3: errOutOfBounds, 653 999999: errOutOfBounds, 654 }) 655 checkRetrieve(t, f, map[uint64][]byte{ 656 1000000: getChunk(20, 0xbb), 657 1000001: getChunk(20, 0xaa), 658 }) 659 } 660 } 661 662 func TestTruncateTail(t *testing.T) { 663 t.Parallel() 664 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 665 fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64()) 666 667 // Fill table 668 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 669 if err != nil { 670 t.Fatal(err) 671 } 672 673 // Write 7 x 20 bytes, splitting out into four files 674 batch := f.newBatch() 675 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 676 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 677 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 678 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 679 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 680 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 681 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 682 require.NoError(t, batch.commit()) 683 684 // nothing to do, all the items should still be there. 685 f.truncateTail(0) 686 fmt.Println(f.dumpIndexString(0, 1000)) 687 checkRetrieve(t, f, map[uint64][]byte{ 688 0: getChunk(20, 0xFF), 689 1: getChunk(20, 0xEE), 690 2: getChunk(20, 0xdd), 691 3: getChunk(20, 0xcc), 692 4: getChunk(20, 0xbb), 693 5: getChunk(20, 0xaa), 694 6: getChunk(20, 0x11), 695 }) 696 697 // truncate single element( item 0 ), deletion is only supported at file level 698 f.truncateTail(1) 699 fmt.Println(f.dumpIndexString(0, 1000)) 700 checkRetrieveError(t, f, map[uint64]error{ 701 0: errOutOfBounds, 702 }) 703 checkRetrieve(t, f, map[uint64][]byte{ 704 1: getChunk(20, 0xEE), 705 2: getChunk(20, 0xdd), 706 3: getChunk(20, 0xcc), 707 4: getChunk(20, 0xbb), 708 5: getChunk(20, 0xaa), 709 6: getChunk(20, 0x11), 710 }) 711 712 // Reopen the table, the deletion information should be persisted as well 713 f.Close() 714 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 715 if err != nil { 716 t.Fatal(err) 717 } 718 checkRetrieveError(t, f, map[uint64]error{ 719 0: errOutOfBounds, 720 }) 721 checkRetrieve(t, f, map[uint64][]byte{ 722 1: getChunk(20, 0xEE), 723 2: getChunk(20, 0xdd), 724 3: getChunk(20, 0xcc), 725 4: getChunk(20, 0xbb), 726 5: getChunk(20, 0xaa), 727 6: getChunk(20, 0x11), 728 }) 729 730 // truncate two elements( item 0, item 1 ), the file 0 should be deleted 731 f.truncateTail(2) 732 checkRetrieveError(t, f, map[uint64]error{ 733 0: errOutOfBounds, 734 1: errOutOfBounds, 735 }) 736 checkRetrieve(t, f, map[uint64][]byte{ 737 2: getChunk(20, 0xdd), 738 3: getChunk(20, 0xcc), 739 4: getChunk(20, 0xbb), 740 5: getChunk(20, 0xaa), 741 6: getChunk(20, 0x11), 742 }) 743 744 // Reopen the table, the above testing should still pass 745 f.Close() 746 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 747 if err != nil { 748 t.Fatal(err) 749 } 750 defer f.Close() 751 752 checkRetrieveError(t, f, map[uint64]error{ 753 0: errOutOfBounds, 754 1: errOutOfBounds, 755 }) 756 checkRetrieve(t, f, map[uint64][]byte{ 757 2: getChunk(20, 0xdd), 758 3: getChunk(20, 0xcc), 759 4: getChunk(20, 0xbb), 760 5: getChunk(20, 0xaa), 761 6: getChunk(20, 0x11), 762 }) 763 764 // truncate all, the entire freezer should be deleted 765 f.truncateTail(7) 766 checkRetrieveError(t, f, map[uint64]error{ 767 0: errOutOfBounds, 768 1: errOutOfBounds, 769 2: errOutOfBounds, 770 3: errOutOfBounds, 771 4: errOutOfBounds, 772 5: errOutOfBounds, 773 6: errOutOfBounds, 774 }) 775 } 776 777 func TestTruncateHead(t *testing.T) { 778 t.Parallel() 779 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 780 fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64()) 781 782 // Fill table 783 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 784 if err != nil { 785 t.Fatal(err) 786 } 787 788 // Write 7 x 20 bytes, splitting out into four files 789 batch := f.newBatch() 790 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 791 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 792 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 793 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 794 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 795 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 796 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 797 require.NoError(t, batch.commit()) 798 799 f.truncateTail(4) // Tail = 4 800 801 // NewHead is required to be 3, the entire table should be truncated 802 f.truncateHead(4) 803 checkRetrieveError(t, f, map[uint64]error{ 804 0: errOutOfBounds, // Deleted by tail 805 1: errOutOfBounds, // Deleted by tail 806 2: errOutOfBounds, // Deleted by tail 807 3: errOutOfBounds, // Deleted by tail 808 4: errOutOfBounds, // Deleted by Head 809 5: errOutOfBounds, // Deleted by Head 810 6: errOutOfBounds, // Deleted by Head 811 }) 812 813 // Append new items 814 batch = f.newBatch() 815 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 816 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 817 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 818 require.NoError(t, batch.commit()) 819 820 checkRetrieve(t, f, map[uint64][]byte{ 821 4: getChunk(20, 0xbb), 822 5: getChunk(20, 0xaa), 823 6: getChunk(20, 0x11), 824 }) 825 } 826 827 func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) { 828 t.Helper() 829 830 for item, wantBytes := range items { 831 value, err := f.Retrieve(item) 832 if err != nil { 833 t.Fatalf("can't get expected item %d: %v", item, err) 834 } 835 if !bytes.Equal(value, wantBytes) { 836 t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes) 837 } 838 } 839 } 840 841 func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) { 842 t.Helper() 843 844 for item, wantError := range items { 845 value, err := f.Retrieve(item) 846 if err == nil { 847 t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError) 848 } 849 if err != wantError { 850 t.Fatalf("wrong error for item %d: %v", item, err) 851 } 852 } 853 } 854 855 // Gets a chunk of data, filled with 'b' 856 func getChunk(size int, b int) []byte { 857 data := make([]byte, size) 858 for i := range data { 859 data[i] = byte(b) 860 } 861 return data 862 } 863 864 // TODO (?) 865 // - test that if we remove several head-files, aswell as data last data-file, 866 // the index is truncated accordingly 867 // Right now, the freezer would fail on these conditions: 868 // 1. have data files d0, d1, d2, d3 869 // 2. remove d2,d3 870 // 871 // However, all 'normal' failure modes arising due to failing to sync() or save a file 872 // should be handled already, and the case described above can only (?) happen if an 873 // external process/user deletes files from the filesystem. 874 875 func writeChunks(t *testing.T, ft *freezerTable, n int, length int) { 876 t.Helper() 877 878 batch := ft.newBatch() 879 for i := 0; i < n; i++ { 880 if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil { 881 t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err) 882 } 883 } 884 if err := batch.commit(); err != nil { 885 t.Fatalf("Commit returned error: %v", err) 886 } 887 } 888 889 // TestSequentialRead does some basic tests on the RetrieveItems. 890 func TestSequentialRead(t *testing.T) { 891 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 892 fname := fmt.Sprintf("batchread-%d", rand.Uint64()) 893 { // Fill table 894 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 895 if err != nil { 896 t.Fatal(err) 897 } 898 // Write 15 bytes 30 times 899 writeChunks(t, f, 30, 15) 900 f.dumpIndexStdout(0, 30) 901 f.Close() 902 } 903 { // Open it, iterate, verify iteration 904 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 905 if err != nil { 906 t.Fatal(err) 907 } 908 items, err := f.RetrieveItems(0, 10000, 100000) 909 if err != nil { 910 t.Fatal(err) 911 } 912 if have, want := len(items), 30; have != want { 913 t.Fatalf("want %d items, have %d ", want, have) 914 } 915 for i, have := range items { 916 want := getChunk(15, i) 917 if !bytes.Equal(want, have) { 918 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 919 } 920 } 921 f.Close() 922 } 923 { // Open it, iterate, verify byte limit. The byte limit is less than item 924 // size, so each lookup should only return one item 925 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 926 if err != nil { 927 t.Fatal(err) 928 } 929 items, err := f.RetrieveItems(0, 10000, 10) 930 if err != nil { 931 t.Fatal(err) 932 } 933 if have, want := len(items), 1; have != want { 934 t.Fatalf("want %d items, have %d ", want, have) 935 } 936 for i, have := range items { 937 want := getChunk(15, i) 938 if !bytes.Equal(want, have) { 939 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 940 } 941 } 942 f.Close() 943 } 944 } 945 946 // TestSequentialReadByteLimit does some more advanced tests on batch reads. 947 // These tests check that when the byte limit hits, we correctly abort in time, 948 // but also properly do all the deferred reads for the previous data, regardless 949 // of whether the data crosses a file boundary or not. 950 func TestSequentialReadByteLimit(t *testing.T) { 951 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 952 fname := fmt.Sprintf("batchread-2-%d", rand.Uint64()) 953 { // Fill table 954 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 955 if err != nil { 956 t.Fatal(err) 957 } 958 // Write 10 bytes 30 times, 959 // Splitting it at every 100 bytes (10 items) 960 writeChunks(t, f, 30, 10) 961 f.Close() 962 } 963 for i, tc := range []struct { 964 items uint64 965 limit uint64 966 want int 967 }{ 968 {9, 89, 8}, 969 {10, 99, 9}, 970 {11, 109, 10}, 971 {100, 89, 8}, 972 {100, 99, 9}, 973 {100, 109, 10}, 974 } { 975 { 976 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 977 if err != nil { 978 t.Fatal(err) 979 } 980 items, err := f.RetrieveItems(0, tc.items, tc.limit) 981 if err != nil { 982 t.Fatal(err) 983 } 984 if have, want := len(items), tc.want; have != want { 985 t.Fatalf("test %d: want %d items, have %d ", i, want, have) 986 } 987 for ii, have := range items { 988 want := getChunk(10, ii) 989 if !bytes.Equal(want, have) { 990 t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) 991 } 992 } 993 f.Close() 994 } 995 } 996 } 997 998 func TestFreezerReadonly(t *testing.T) { 999 tmpdir := os.TempDir() 1000 // Case 1: Check it fails on non-existent file. 1001 _, err := newTable(tmpdir, 1002 fmt.Sprintf("readonlytest-%d", rand.Uint64()), 1003 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1004 if err == nil { 1005 t.Fatal("readonly table instantiation should fail for non-existent table") 1006 } 1007 1008 // Case 2: Check that it fails on invalid index length. 1009 fname := fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1010 idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname))) 1011 if err != nil { 1012 t.Errorf("Failed to open index file: %v\n", err) 1013 } 1014 // size should not be a multiple of indexEntrySize. 1015 idxFile.Write(make([]byte, 17)) 1016 idxFile.Close() 1017 _, err = newTable(tmpdir, fname, 1018 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1019 if err == nil { 1020 t.Errorf("readonly table instantiation should fail for invalid index size") 1021 } 1022 1023 // Case 3: Open table non-readonly table to write some data. 1024 // Then corrupt the head file and make sure opening the table 1025 // again in readonly triggers an error. 1026 fname = fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1027 f, err := newTable(tmpdir, fname, 1028 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1029 if err != nil { 1030 t.Fatalf("failed to instantiate table: %v", err) 1031 } 1032 writeChunks(t, f, 8, 32) 1033 // Corrupt table file 1034 if _, err := f.head.Write([]byte{1, 1}); err != nil { 1035 t.Fatal(err) 1036 } 1037 if err := f.Close(); err != nil { 1038 t.Fatal(err) 1039 } 1040 _, err = newTable(tmpdir, fname, 1041 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1042 if err == nil { 1043 t.Errorf("readonly table instantiation should fail for corrupt table file") 1044 } 1045 1046 // Case 4: Write some data to a table and later re-open it as readonly. 1047 // Should be successful. 1048 fname = fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1049 f, err = newTable(tmpdir, fname, 1050 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1051 if err != nil { 1052 t.Fatalf("failed to instantiate table: %v\n", err) 1053 } 1054 writeChunks(t, f, 32, 128) 1055 if err := f.Close(); err != nil { 1056 t.Fatal(err) 1057 } 1058 f, err = newTable(tmpdir, fname, 1059 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1060 if err != nil { 1061 t.Fatal(err) 1062 } 1063 v, err := f.Retrieve(10) 1064 if err != nil { 1065 t.Fatal(err) 1066 } 1067 exp := getChunk(128, 10) 1068 if !bytes.Equal(v, exp) { 1069 t.Errorf("retrieved value is incorrect") 1070 } 1071 1072 // Case 5: Now write some data via a batch. 1073 // This should fail either during AppendRaw or Commit 1074 batch := f.newBatch() 1075 writeErr := batch.AppendRaw(32, make([]byte, 1)) 1076 if writeErr == nil { 1077 writeErr = batch.commit() 1078 } 1079 if writeErr == nil { 1080 t.Fatalf("Writing to readonly table should fail") 1081 } 1082 } 1083 1084 // randTest performs random freezer table operations. 1085 // Instances of this test are created by Generate. 1086 type randTest []randTestStep 1087 1088 type randTestStep struct { 1089 op int 1090 items []uint64 // for append and retrieve 1091 blobs [][]byte // for append 1092 target uint64 // for truncate(head/tail) 1093 err error // for debugging 1094 } 1095 1096 const ( 1097 opReload = iota 1098 opAppend 1099 opRetrieve 1100 opTruncateHead 1101 opTruncateHeadAll 1102 opTruncateTail 1103 opTruncateTailAll 1104 opCheckAll 1105 opMax // boundary value, not an actual op 1106 ) 1107 1108 func getVals(first uint64, n int) [][]byte { 1109 var ret [][]byte 1110 for i := 0; i < n; i++ { 1111 val := make([]byte, 8) 1112 binary.BigEndian.PutUint64(val, first+uint64(i)) 1113 ret = append(ret, val) 1114 } 1115 return ret 1116 } 1117 1118 func (randTest) Generate(r *rand.Rand, size int) reflect.Value { 1119 var ( 1120 deleted uint64 // The number of deleted items from tail 1121 items []uint64 // The index of entries in table 1122 1123 // getItems retrieves the indexes for items in table. 1124 getItems = func(n int) []uint64 { 1125 length := len(items) 1126 if length == 0 { 1127 return nil 1128 } 1129 var ret []uint64 1130 index := rand.Intn(length) 1131 for i := index; len(ret) < n && i < length; i++ { 1132 ret = append(ret, items[i]) 1133 } 1134 return ret 1135 } 1136 1137 // addItems appends the given length items into the table. 1138 addItems = func(n int) []uint64 { 1139 var first = deleted 1140 if len(items) != 0 { 1141 first = items[len(items)-1] + 1 1142 } 1143 var ret []uint64 1144 for i := 0; i < n; i++ { 1145 ret = append(ret, first+uint64(i)) 1146 } 1147 items = append(items, ret...) 1148 return ret 1149 } 1150 ) 1151 1152 var steps randTest 1153 for i := 0; i < size; i++ { 1154 step := randTestStep{op: r.Intn(opMax)} 1155 switch step.op { 1156 case opReload, opCheckAll: 1157 case opAppend: 1158 num := r.Intn(3) 1159 step.items = addItems(num) 1160 if len(step.items) == 0 { 1161 step.blobs = nil 1162 } else { 1163 step.blobs = getVals(step.items[0], num) 1164 } 1165 case opRetrieve: 1166 step.items = getItems(r.Intn(3)) 1167 case opTruncateHead: 1168 if len(items) == 0 { 1169 step.target = deleted 1170 } else { 1171 index := r.Intn(len(items)) 1172 items = items[:index] 1173 step.target = deleted + uint64(index) 1174 } 1175 case opTruncateHeadAll: 1176 step.target = deleted 1177 items = items[:0] 1178 case opTruncateTail: 1179 if len(items) == 0 { 1180 step.target = deleted 1181 } else { 1182 index := r.Intn(len(items)) 1183 items = items[index:] 1184 deleted += uint64(index) 1185 step.target = deleted 1186 } 1187 case opTruncateTailAll: 1188 step.target = deleted + uint64(len(items)) 1189 items = items[:0] 1190 deleted = step.target 1191 } 1192 steps = append(steps, step) 1193 } 1194 return reflect.ValueOf(steps) 1195 } 1196 1197 func runRandTest(rt randTest) bool { 1198 fname := fmt.Sprintf("randtest-%d", rand.Uint64()) 1199 f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1200 if err != nil { 1201 panic("failed to initialize table") 1202 } 1203 var values [][]byte 1204 for i, step := range rt { 1205 switch step.op { 1206 case opReload: 1207 f.Close() 1208 f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1209 if err != nil { 1210 rt[i].err = fmt.Errorf("failed to reload table %v", err) 1211 } 1212 case opCheckAll: 1213 tail := atomic.LoadUint64(&f.itemHidden) 1214 head := atomic.LoadUint64(&f.items) 1215 1216 if tail == head { 1217 continue 1218 } 1219 got, err := f.RetrieveItems(atomic.LoadUint64(&f.itemHidden), head-tail, 100000) 1220 if err != nil { 1221 rt[i].err = err 1222 } else { 1223 if !reflect.DeepEqual(got, values) { 1224 rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values) 1225 } 1226 } 1227 1228 case opAppend: 1229 batch := f.newBatch() 1230 for i := 0; i < len(step.items); i++ { 1231 batch.AppendRaw(step.items[i], step.blobs[i]) 1232 } 1233 batch.commit() 1234 values = append(values, step.blobs...) 1235 1236 case opRetrieve: 1237 var blobs [][]byte 1238 if len(step.items) == 0 { 1239 continue 1240 } 1241 tail := atomic.LoadUint64(&f.itemHidden) 1242 for i := 0; i < len(step.items); i++ { 1243 blobs = append(blobs, values[step.items[i]-tail]) 1244 } 1245 got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000) 1246 if err != nil { 1247 rt[i].err = err 1248 } else { 1249 if !reflect.DeepEqual(got, blobs) { 1250 rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items) 1251 } 1252 } 1253 1254 case opTruncateHead: 1255 f.truncateHead(step.target) 1256 1257 length := atomic.LoadUint64(&f.items) - atomic.LoadUint64(&f.itemHidden) 1258 values = values[:length] 1259 1260 case opTruncateHeadAll: 1261 f.truncateHead(step.target) 1262 values = nil 1263 1264 case opTruncateTail: 1265 prev := atomic.LoadUint64(&f.itemHidden) 1266 f.truncateTail(step.target) 1267 1268 truncated := atomic.LoadUint64(&f.itemHidden) - prev 1269 values = values[truncated:] 1270 1271 case opTruncateTailAll: 1272 f.truncateTail(step.target) 1273 values = nil 1274 } 1275 // Abort the test on error. 1276 if rt[i].err != nil { 1277 return false 1278 } 1279 } 1280 f.Close() 1281 return true 1282 } 1283 1284 func TestRandom(t *testing.T) { 1285 if err := quick.Check(runRandTest, nil); err != nil { 1286 if cerr, ok := err.(*quick.CheckError); ok { 1287 t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In)) 1288 } 1289 t.Fatal(err) 1290 } 1291 }