github.com/theQRL/go-zond@v0.1.1/core/rawdb/freezer_table_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "fmt" 23 "math/rand" 24 "os" 25 "path/filepath" 26 "reflect" 27 "testing" 28 "testing/quick" 29 30 "github.com/davecgh/go-spew/spew" 31 "github.com/stretchr/testify/require" 32 "github.com/theQRL/go-zond/metrics" 33 ) 34 35 // TestFreezerBasics test initializing a freezertable from scratch, writing to the table, 36 // and reading it back. 37 func TestFreezerBasics(t *testing.T) { 38 t.Parallel() 39 // set cutoff at 50 bytes 40 f, err := newTable(os.TempDir(), 41 fmt.Sprintf("unittest-%d", rand.Uint64()), 42 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 43 if err != nil { 44 t.Fatal(err) 45 } 46 defer f.Close() 47 48 // Write 15 bytes 255 times, results in 85 files 49 writeChunks(t, f, 255, 15) 50 51 //print(t, f, 0) 52 //print(t, f, 1) 53 //print(t, f, 2) 54 // 55 //db[0] = 000000000000000000000000000000 56 //db[1] = 010101010101010101010101010101 57 //db[2] = 020202020202020202020202020202 58 59 for y := 0; y < 255; y++ { 60 exp := getChunk(15, y) 61 got, err := f.Retrieve(uint64(y)) 62 if err != nil { 63 t.Fatalf("reading item %d: %v", y, err) 64 } 65 if !bytes.Equal(got, exp) { 66 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 67 } 68 } 69 // Check that we cannot read too far 70 _, err = f.Retrieve(uint64(255)) 71 if err != errOutOfBounds { 72 t.Fatal(err) 73 } 74 } 75 76 // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between 77 // every operation 78 func TestFreezerBasicsClosing(t *testing.T) { 79 t.Parallel() 80 // set cutoff at 50 bytes 81 var ( 82 fname = fmt.Sprintf("basics-close-%d", rand.Uint64()) 83 rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 84 f *freezerTable 85 err error 86 ) 87 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 88 if err != nil { 89 t.Fatal(err) 90 } 91 92 // Write 15 bytes 255 times, results in 85 files. 93 // In-between writes, the table is closed and re-opened. 94 for x := 0; x < 255; x++ { 95 data := getChunk(15, x) 96 batch := f.newBatch() 97 require.NoError(t, batch.AppendRaw(uint64(x), data)) 98 require.NoError(t, batch.commit()) 99 f.Close() 100 101 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 102 if err != nil { 103 t.Fatal(err) 104 } 105 } 106 defer f.Close() 107 108 for y := 0; y < 255; y++ { 109 exp := getChunk(15, y) 110 got, err := f.Retrieve(uint64(y)) 111 if err != nil { 112 t.Fatal(err) 113 } 114 if !bytes.Equal(got, exp) { 115 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 116 } 117 f.Close() 118 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 119 if err != nil { 120 t.Fatal(err) 121 } 122 } 123 } 124 125 // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed 126 func TestFreezerRepairDanglingHead(t *testing.T) { 127 t.Parallel() 128 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 129 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 130 131 // Fill table 132 { 133 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 134 if err != nil { 135 t.Fatal(err) 136 } 137 // Write 15 bytes 255 times 138 writeChunks(t, f, 255, 15) 139 140 // The last item should be there 141 if _, err = f.Retrieve(0xfe); err != nil { 142 t.Fatal(err) 143 } 144 f.Close() 145 } 146 147 // open the index 148 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 149 if err != nil { 150 t.Fatalf("Failed to open index file: %v", err) 151 } 152 // Remove 4 bytes 153 stat, err := idxFile.Stat() 154 if err != nil { 155 t.Fatalf("Failed to stat index file: %v", err) 156 } 157 idxFile.Truncate(stat.Size() - 4) 158 idxFile.Close() 159 160 // Now open it again 161 { 162 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 163 if err != nil { 164 t.Fatal(err) 165 } 166 // The last item should be missing 167 if _, err = f.Retrieve(0xff); err == nil { 168 t.Errorf("Expected error for missing index entry") 169 } 170 // The one before should still be there 171 if _, err = f.Retrieve(0xfd); err != nil { 172 t.Fatalf("Expected no error, got %v", err) 173 } 174 } 175 } 176 177 // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed 178 func TestFreezerRepairDanglingHeadLarge(t *testing.T) { 179 t.Parallel() 180 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 181 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 182 183 // Fill a table and close it 184 { 185 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 186 if err != nil { 187 t.Fatal(err) 188 } 189 // Write 15 bytes 255 times 190 writeChunks(t, f, 255, 15) 191 192 // The last item should be there 193 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 194 t.Fatal(err) 195 } 196 f.Close() 197 } 198 199 // open the index 200 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 201 if err != nil { 202 t.Fatalf("Failed to open index file: %v", err) 203 } 204 // Remove everything but the first item, and leave data unaligned 205 // 0-indexEntry, 1-indexEntry, corrupt-indexEntry 206 idxFile.Truncate(2*indexEntrySize + indexEntrySize/2) 207 idxFile.Close() 208 209 // Now open it again 210 { 211 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 212 if err != nil { 213 t.Fatal(err) 214 } 215 // The first item should be there 216 if _, err = f.Retrieve(0); err != nil { 217 t.Fatal(err) 218 } 219 // The second item should be missing 220 if _, err = f.Retrieve(1); err == nil { 221 t.Errorf("Expected error for missing index entry") 222 } 223 // We should now be able to store items again, from item = 1 224 batch := f.newBatch() 225 for x := 1; x < 0xff; x++ { 226 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 227 } 228 require.NoError(t, batch.commit()) 229 f.Close() 230 } 231 232 // And if we open it, we should now be able to read all of them (new values) 233 { 234 f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 235 for y := 1; y < 255; y++ { 236 exp := getChunk(15, ^y) 237 got, err := f.Retrieve(uint64(y)) 238 if err != nil { 239 t.Fatal(err) 240 } 241 if !bytes.Equal(got, exp) { 242 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 243 } 244 } 245 } 246 } 247 248 // TestSnappyDetection tests that we fail to open a snappy database and vice versa 249 func TestSnappyDetection(t *testing.T) { 250 t.Parallel() 251 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 252 fname := fmt.Sprintf("snappytest-%d", rand.Uint64()) 253 254 // Open with snappy 255 { 256 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 257 if err != nil { 258 t.Fatal(err) 259 } 260 // Write 15 bytes 255 times 261 writeChunks(t, f, 255, 15) 262 f.Close() 263 } 264 265 // Open without snappy 266 { 267 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false) 268 if err != nil { 269 t.Fatal(err) 270 } 271 if _, err = f.Retrieve(0); err == nil { 272 f.Close() 273 t.Fatalf("expected empty table") 274 } 275 } 276 277 // Open with snappy 278 { 279 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 280 if err != nil { 281 t.Fatal(err) 282 } 283 // There should be 255 items 284 if _, err = f.Retrieve(0xfe); err != nil { 285 f.Close() 286 t.Fatalf("expected no error, got %v", err) 287 } 288 } 289 } 290 291 func assertFileSize(f string, size int64) error { 292 stat, err := os.Stat(f) 293 if err != nil { 294 return err 295 } 296 if stat.Size() != size { 297 return fmt.Errorf("error, expected size %d, got %d", size, stat.Size()) 298 } 299 return nil 300 } 301 302 // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data, 303 // the index is repaired 304 func TestFreezerRepairDanglingIndex(t *testing.T) { 305 t.Parallel() 306 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 307 fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64()) 308 309 // Fill a table and close it 310 { 311 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 312 if err != nil { 313 t.Fatal(err) 314 } 315 // Write 15 bytes 9 times : 150 bytes 316 writeChunks(t, f, 9, 15) 317 318 // The last item should be there 319 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 320 f.Close() 321 t.Fatal(err) 322 } 323 f.Close() 324 // File sizes should be 45, 45, 45 : items[3, 3, 3) 325 } 326 327 // Crop third file 328 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname)) 329 // Truncate third file: 45 ,45, 20 330 { 331 if err := assertFileSize(fileToCrop, 45); err != nil { 332 t.Fatal(err) 333 } 334 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 335 if err != nil { 336 t.Fatal(err) 337 } 338 file.Truncate(20) 339 file.Close() 340 } 341 342 // Open db it again 343 // It should restore the file(s) to 344 // 45, 45, 15 345 // with 3+3+1 items 346 { 347 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 348 if err != nil { 349 t.Fatal(err) 350 } 351 defer f.Close() 352 if f.items.Load() != 7 { 353 t.Fatalf("expected %d items, got %d", 7, f.items.Load()) 354 } 355 if err := assertFileSize(fileToCrop, 15); err != nil { 356 t.Fatal(err) 357 } 358 } 359 } 360 361 func TestFreezerTruncate(t *testing.T) { 362 t.Parallel() 363 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 364 fname := fmt.Sprintf("truncation-%d", rand.Uint64()) 365 366 // Fill table 367 { 368 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 369 if err != nil { 370 t.Fatal(err) 371 } 372 // Write 15 bytes 30 times 373 writeChunks(t, f, 30, 15) 374 375 // The last item should be there 376 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 377 t.Fatal(err) 378 } 379 f.Close() 380 } 381 382 // Reopen, truncate 383 { 384 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 385 if err != nil { 386 t.Fatal(err) 387 } 388 defer f.Close() 389 f.truncateHead(10) // 150 bytes 390 if f.items.Load() != 10 { 391 t.Fatalf("expected %d items, got %d", 10, f.items.Load()) 392 } 393 // 45, 45, 45, 15 -- bytes should be 15 394 if f.headBytes != 15 { 395 t.Fatalf("expected %d bytes, got %d", 15, f.headBytes) 396 } 397 } 398 } 399 400 // TestFreezerRepairFirstFile tests a head file with the very first item only half-written. 401 // That will rewind the index, and _should_ truncate the head file 402 func TestFreezerRepairFirstFile(t *testing.T) { 403 t.Parallel() 404 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 405 fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64()) 406 407 // Fill table 408 { 409 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 410 if err != nil { 411 t.Fatal(err) 412 } 413 // Write 80 bytes, splitting out into two files 414 batch := f.newBatch() 415 require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF))) 416 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE))) 417 require.NoError(t, batch.commit()) 418 419 // The last item should be there 420 if _, err = f.Retrieve(1); err != nil { 421 t.Fatal(err) 422 } 423 f.Close() 424 } 425 426 // Truncate the file in half 427 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname)) 428 { 429 if err := assertFileSize(fileToCrop, 40); err != nil { 430 t.Fatal(err) 431 } 432 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 433 if err != nil { 434 t.Fatal(err) 435 } 436 file.Truncate(20) 437 file.Close() 438 } 439 440 // Reopen 441 { 442 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 443 if err != nil { 444 t.Fatal(err) 445 } 446 if f.items.Load() != 1 { 447 f.Close() 448 t.Fatalf("expected %d items, got %d", 0, f.items.Load()) 449 } 450 451 // Write 40 bytes 452 batch := f.newBatch() 453 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD))) 454 require.NoError(t, batch.commit()) 455 456 f.Close() 457 458 // Should have been truncated down to zero and then 40 written 459 if err := assertFileSize(fileToCrop, 40); err != nil { 460 t.Fatal(err) 461 } 462 } 463 } 464 465 // TestFreezerReadAndTruncate tests: 466 // - we have a table open 467 // - do some reads, so files are open in readonly 468 // - truncate so those files are 'removed' 469 // - check that we did not keep the rdonly file descriptors 470 func TestFreezerReadAndTruncate(t *testing.T) { 471 t.Parallel() 472 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 473 fname := fmt.Sprintf("read_truncate-%d", rand.Uint64()) 474 475 // Fill table 476 { 477 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 478 if err != nil { 479 t.Fatal(err) 480 } 481 // Write 15 bytes 30 times 482 writeChunks(t, f, 30, 15) 483 484 // The last item should be there 485 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 486 t.Fatal(err) 487 } 488 f.Close() 489 } 490 491 // Reopen and read all files 492 { 493 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 494 if err != nil { 495 t.Fatal(err) 496 } 497 if f.items.Load() != 30 { 498 f.Close() 499 t.Fatalf("expected %d items, got %d", 0, f.items.Load()) 500 } 501 for y := byte(0); y < 30; y++ { 502 f.Retrieve(uint64(y)) 503 } 504 505 // Now, truncate back to zero 506 f.truncateHead(0) 507 508 // Write the data again 509 batch := f.newBatch() 510 for x := 0; x < 30; x++ { 511 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 512 } 513 require.NoError(t, batch.commit()) 514 f.Close() 515 } 516 } 517 518 func TestFreezerOffset(t *testing.T) { 519 t.Parallel() 520 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 521 fname := fmt.Sprintf("offset-%d", rand.Uint64()) 522 523 // Fill table 524 { 525 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 526 if err != nil { 527 t.Fatal(err) 528 } 529 530 // Write 6 x 20 bytes, splitting out into three files 531 batch := f.newBatch() 532 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 533 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 534 535 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 536 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 537 538 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 539 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 540 require.NoError(t, batch.commit()) 541 542 t.Log(f.dumpIndexString(0, 100)) 543 f.Close() 544 } 545 546 // Now crop it. 547 { 548 // delete files 0 and 1 549 for i := 0; i < 2; i++ { 550 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i)) 551 if err := os.Remove(p); err != nil { 552 t.Fatal(err) 553 } 554 } 555 // Read the index file 556 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 557 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 558 if err != nil { 559 t.Fatal(err) 560 } 561 indexBuf := make([]byte, 7*indexEntrySize) 562 indexFile.Read(indexBuf) 563 564 // Update the index file, so that we store 565 // [ file = 2, offset = 4 ] at index zero 566 567 zeroIndex := indexEntry{ 568 filenum: uint32(2), // First file is 2 569 offset: uint32(4), // We have removed four items 570 } 571 buf := zeroIndex.append(nil) 572 573 // Overwrite index zero 574 copy(indexBuf, buf) 575 576 // Remove the four next indices by overwriting 577 copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) 578 indexFile.WriteAt(indexBuf, 0) 579 580 // Need to truncate the moved index items 581 indexFile.Truncate(indexEntrySize * (1 + 2)) 582 indexFile.Close() 583 } 584 585 // Now open again 586 { 587 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 588 if err != nil { 589 t.Fatal(err) 590 } 591 defer f.Close() 592 t.Log(f.dumpIndexString(0, 100)) 593 594 // It should allow writing item 6. 595 batch := f.newBatch() 596 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99))) 597 require.NoError(t, batch.commit()) 598 599 checkRetrieveError(t, f, map[uint64]error{ 600 0: errOutOfBounds, 601 1: errOutOfBounds, 602 2: errOutOfBounds, 603 3: errOutOfBounds, 604 }) 605 checkRetrieve(t, f, map[uint64][]byte{ 606 4: getChunk(20, 0xbb), 607 5: getChunk(20, 0xaa), 608 6: getChunk(20, 0x99), 609 }) 610 } 611 612 // Edit the index again, with a much larger initial offset of 1M. 613 { 614 // Read the index file 615 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 616 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 617 if err != nil { 618 t.Fatal(err) 619 } 620 indexBuf := make([]byte, 3*indexEntrySize) 621 indexFile.Read(indexBuf) 622 623 // Update the index file, so that we store 624 // [ file = 2, offset = 1M ] at index zero 625 626 zeroIndex := indexEntry{ 627 offset: uint32(1000000), // We have removed 1M items 628 filenum: uint32(2), // First file is 2 629 } 630 buf := zeroIndex.append(nil) 631 632 // Overwrite index zero 633 copy(indexBuf, buf) 634 indexFile.WriteAt(indexBuf, 0) 635 indexFile.Close() 636 } 637 638 // Check that existing items have been moved to index 1M. 639 { 640 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 641 if err != nil { 642 t.Fatal(err) 643 } 644 defer f.Close() 645 t.Log(f.dumpIndexString(0, 100)) 646 647 checkRetrieveError(t, f, map[uint64]error{ 648 0: errOutOfBounds, 649 1: errOutOfBounds, 650 2: errOutOfBounds, 651 3: errOutOfBounds, 652 999999: errOutOfBounds, 653 }) 654 checkRetrieve(t, f, map[uint64][]byte{ 655 1000000: getChunk(20, 0xbb), 656 1000001: getChunk(20, 0xaa), 657 }) 658 } 659 } 660 661 func TestTruncateTail(t *testing.T) { 662 t.Parallel() 663 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 664 fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64()) 665 666 // Fill table 667 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 668 if err != nil { 669 t.Fatal(err) 670 } 671 672 // Write 7 x 20 bytes, splitting out into four files 673 batch := f.newBatch() 674 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 675 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 676 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 677 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 678 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 679 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 680 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 681 require.NoError(t, batch.commit()) 682 683 // nothing to do, all the items should still be there. 684 f.truncateTail(0) 685 fmt.Println(f.dumpIndexString(0, 1000)) 686 checkRetrieve(t, f, map[uint64][]byte{ 687 0: getChunk(20, 0xFF), 688 1: getChunk(20, 0xEE), 689 2: getChunk(20, 0xdd), 690 3: getChunk(20, 0xcc), 691 4: getChunk(20, 0xbb), 692 5: getChunk(20, 0xaa), 693 6: getChunk(20, 0x11), 694 }) 695 696 // truncate single element( item 0 ), deletion is only supported at file level 697 f.truncateTail(1) 698 fmt.Println(f.dumpIndexString(0, 1000)) 699 checkRetrieveError(t, f, map[uint64]error{ 700 0: errOutOfBounds, 701 }) 702 checkRetrieve(t, f, map[uint64][]byte{ 703 1: getChunk(20, 0xEE), 704 2: getChunk(20, 0xdd), 705 3: getChunk(20, 0xcc), 706 4: getChunk(20, 0xbb), 707 5: getChunk(20, 0xaa), 708 6: getChunk(20, 0x11), 709 }) 710 711 // Reopen the table, the deletion information should be persisted as well 712 f.Close() 713 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 714 if err != nil { 715 t.Fatal(err) 716 } 717 checkRetrieveError(t, f, map[uint64]error{ 718 0: errOutOfBounds, 719 }) 720 checkRetrieve(t, f, map[uint64][]byte{ 721 1: getChunk(20, 0xEE), 722 2: getChunk(20, 0xdd), 723 3: getChunk(20, 0xcc), 724 4: getChunk(20, 0xbb), 725 5: getChunk(20, 0xaa), 726 6: getChunk(20, 0x11), 727 }) 728 729 // truncate two elements( item 0, item 1 ), the file 0 should be deleted 730 f.truncateTail(2) 731 checkRetrieveError(t, f, map[uint64]error{ 732 0: errOutOfBounds, 733 1: errOutOfBounds, 734 }) 735 checkRetrieve(t, f, map[uint64][]byte{ 736 2: getChunk(20, 0xdd), 737 3: getChunk(20, 0xcc), 738 4: getChunk(20, 0xbb), 739 5: getChunk(20, 0xaa), 740 6: getChunk(20, 0x11), 741 }) 742 743 // Reopen the table, the above testing should still pass 744 f.Close() 745 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 746 if err != nil { 747 t.Fatal(err) 748 } 749 defer f.Close() 750 751 checkRetrieveError(t, f, map[uint64]error{ 752 0: errOutOfBounds, 753 1: errOutOfBounds, 754 }) 755 checkRetrieve(t, f, map[uint64][]byte{ 756 2: getChunk(20, 0xdd), 757 3: getChunk(20, 0xcc), 758 4: getChunk(20, 0xbb), 759 5: getChunk(20, 0xaa), 760 6: getChunk(20, 0x11), 761 }) 762 763 // truncate all, the entire freezer should be deleted 764 f.truncateTail(7) 765 checkRetrieveError(t, f, map[uint64]error{ 766 0: errOutOfBounds, 767 1: errOutOfBounds, 768 2: errOutOfBounds, 769 3: errOutOfBounds, 770 4: errOutOfBounds, 771 5: errOutOfBounds, 772 6: errOutOfBounds, 773 }) 774 } 775 776 func TestTruncateHead(t *testing.T) { 777 t.Parallel() 778 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 779 fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64()) 780 781 // Fill table 782 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 783 if err != nil { 784 t.Fatal(err) 785 } 786 787 // Write 7 x 20 bytes, splitting out into four files 788 batch := f.newBatch() 789 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 790 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 791 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 792 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 793 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 794 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 795 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 796 require.NoError(t, batch.commit()) 797 798 f.truncateTail(4) // Tail = 4 799 800 // NewHead is required to be 3, the entire table should be truncated 801 f.truncateHead(4) 802 checkRetrieveError(t, f, map[uint64]error{ 803 0: errOutOfBounds, // Deleted by tail 804 1: errOutOfBounds, // Deleted by tail 805 2: errOutOfBounds, // Deleted by tail 806 3: errOutOfBounds, // Deleted by tail 807 4: errOutOfBounds, // Deleted by Head 808 5: errOutOfBounds, // Deleted by Head 809 6: errOutOfBounds, // Deleted by Head 810 }) 811 812 // Append new items 813 batch = f.newBatch() 814 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 815 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 816 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 817 require.NoError(t, batch.commit()) 818 819 checkRetrieve(t, f, map[uint64][]byte{ 820 4: getChunk(20, 0xbb), 821 5: getChunk(20, 0xaa), 822 6: getChunk(20, 0x11), 823 }) 824 } 825 826 func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) { 827 t.Helper() 828 829 for item, wantBytes := range items { 830 value, err := f.Retrieve(item) 831 if err != nil { 832 t.Fatalf("can't get expected item %d: %v", item, err) 833 } 834 if !bytes.Equal(value, wantBytes) { 835 t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes) 836 } 837 } 838 } 839 840 func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) { 841 t.Helper() 842 843 for item, wantError := range items { 844 value, err := f.Retrieve(item) 845 if err == nil { 846 t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError) 847 } 848 if err != wantError { 849 t.Fatalf("wrong error for item %d: %v", item, err) 850 } 851 } 852 } 853 854 // Gets a chunk of data, filled with 'b' 855 func getChunk(size int, b int) []byte { 856 data := make([]byte, size) 857 for i := range data { 858 data[i] = byte(b) 859 } 860 return data 861 } 862 863 // TODO (?) 864 // - test that if we remove several head-files, aswell as data last data-file, 865 // the index is truncated accordingly 866 // Right now, the freezer would fail on these conditions: 867 // 1. have data files d0, d1, d2, d3 868 // 2. remove d2,d3 869 // 870 // However, all 'normal' failure modes arising due to failing to sync() or save a file 871 // should be handled already, and the case described above can only (?) happen if an 872 // external process/user deletes files from the filesystem. 873 874 func writeChunks(t *testing.T, ft *freezerTable, n int, length int) { 875 t.Helper() 876 877 batch := ft.newBatch() 878 for i := 0; i < n; i++ { 879 if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil { 880 t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err) 881 } 882 } 883 if err := batch.commit(); err != nil { 884 t.Fatalf("Commit returned error: %v", err) 885 } 886 } 887 888 // TestSequentialRead does some basic tests on the RetrieveItems. 889 func TestSequentialRead(t *testing.T) { 890 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 891 fname := fmt.Sprintf("batchread-%d", rand.Uint64()) 892 { // Fill table 893 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 894 if err != nil { 895 t.Fatal(err) 896 } 897 // Write 15 bytes 30 times 898 writeChunks(t, f, 30, 15) 899 f.dumpIndexStdout(0, 30) 900 f.Close() 901 } 902 { // Open it, iterate, verify iteration 903 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 904 if err != nil { 905 t.Fatal(err) 906 } 907 items, err := f.RetrieveItems(0, 10000, 100000) 908 if err != nil { 909 t.Fatal(err) 910 } 911 if have, want := len(items), 30; have != want { 912 t.Fatalf("want %d items, have %d ", want, have) 913 } 914 for i, have := range items { 915 want := getChunk(15, i) 916 if !bytes.Equal(want, have) { 917 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 918 } 919 } 920 f.Close() 921 } 922 { // Open it, iterate, verify byte limit. The byte limit is less than item 923 // size, so each lookup should only return one item 924 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 925 if err != nil { 926 t.Fatal(err) 927 } 928 items, err := f.RetrieveItems(0, 10000, 10) 929 if err != nil { 930 t.Fatal(err) 931 } 932 if have, want := len(items), 1; have != want { 933 t.Fatalf("want %d items, have %d ", want, have) 934 } 935 for i, have := range items { 936 want := getChunk(15, i) 937 if !bytes.Equal(want, have) { 938 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 939 } 940 } 941 f.Close() 942 } 943 } 944 945 // TestSequentialReadByteLimit does some more advanced tests on batch reads. 946 // These tests check that when the byte limit hits, we correctly abort in time, 947 // but also properly do all the deferred reads for the previous data, regardless 948 // of whether the data crosses a file boundary or not. 949 func TestSequentialReadByteLimit(t *testing.T) { 950 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 951 fname := fmt.Sprintf("batchread-2-%d", rand.Uint64()) 952 { // Fill table 953 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 954 if err != nil { 955 t.Fatal(err) 956 } 957 // Write 10 bytes 30 times, 958 // Splitting it at every 100 bytes (10 items) 959 writeChunks(t, f, 30, 10) 960 f.Close() 961 } 962 for i, tc := range []struct { 963 items uint64 964 limit uint64 965 want int 966 }{ 967 {9, 89, 8}, 968 {10, 99, 9}, 969 {11, 109, 10}, 970 {100, 89, 8}, 971 {100, 99, 9}, 972 {100, 109, 10}, 973 } { 974 { 975 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 976 if err != nil { 977 t.Fatal(err) 978 } 979 items, err := f.RetrieveItems(0, tc.items, tc.limit) 980 if err != nil { 981 t.Fatal(err) 982 } 983 if have, want := len(items), tc.want; have != want { 984 t.Fatalf("test %d: want %d items, have %d ", i, want, have) 985 } 986 for ii, have := range items { 987 want := getChunk(10, ii) 988 if !bytes.Equal(want, have) { 989 t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) 990 } 991 } 992 f.Close() 993 } 994 } 995 } 996 997 // TestSequentialReadNoByteLimit tests the batch-read if maxBytes is not specified. 998 // Freezer should return the requested items regardless the size limitation. 999 func TestSequentialReadNoByteLimit(t *testing.T) { 1000 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 1001 fname := fmt.Sprintf("batchread-3-%d", rand.Uint64()) 1002 { // Fill table 1003 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 1004 if err != nil { 1005 t.Fatal(err) 1006 } 1007 // Write 10 bytes 30 times, 1008 // Splitting it at every 100 bytes (10 items) 1009 writeChunks(t, f, 30, 10) 1010 f.Close() 1011 } 1012 for i, tc := range []struct { 1013 items uint64 1014 want int 1015 }{ 1016 {1, 1}, 1017 {30, 30}, 1018 {31, 30}, 1019 } { 1020 { 1021 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 1022 if err != nil { 1023 t.Fatal(err) 1024 } 1025 items, err := f.RetrieveItems(0, tc.items, 0) 1026 if err != nil { 1027 t.Fatal(err) 1028 } 1029 if have, want := len(items), tc.want; have != want { 1030 t.Fatalf("test %d: want %d items, have %d ", i, want, have) 1031 } 1032 for ii, have := range items { 1033 want := getChunk(10, ii) 1034 if !bytes.Equal(want, have) { 1035 t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) 1036 } 1037 } 1038 f.Close() 1039 } 1040 } 1041 } 1042 1043 func TestFreezerReadonly(t *testing.T) { 1044 tmpdir := os.TempDir() 1045 // Case 1: Check it fails on non-existent file. 1046 _, err := newTable(tmpdir, 1047 fmt.Sprintf("readonlytest-%d", rand.Uint64()), 1048 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1049 if err == nil { 1050 t.Fatal("readonly table instantiation should fail for non-existent table") 1051 } 1052 1053 // Case 2: Check that it fails on invalid index length. 1054 fname := fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1055 idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname))) 1056 if err != nil { 1057 t.Errorf("Failed to open index file: %v\n", err) 1058 } 1059 // size should not be a multiple of indexEntrySize. 1060 idxFile.Write(make([]byte, 17)) 1061 idxFile.Close() 1062 _, err = newTable(tmpdir, fname, 1063 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1064 if err == nil { 1065 t.Errorf("readonly table instantiation should fail for invalid index size") 1066 } 1067 1068 // Case 3: Open table non-readonly table to write some data. 1069 // Then corrupt the head file and make sure opening the table 1070 // again in readonly triggers an error. 1071 fname = fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1072 f, err := newTable(tmpdir, fname, 1073 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1074 if err != nil { 1075 t.Fatalf("failed to instantiate table: %v", err) 1076 } 1077 writeChunks(t, f, 8, 32) 1078 // Corrupt table file 1079 if _, err := f.head.Write([]byte{1, 1}); err != nil { 1080 t.Fatal(err) 1081 } 1082 if err := f.Close(); err != nil { 1083 t.Fatal(err) 1084 } 1085 _, err = newTable(tmpdir, fname, 1086 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1087 if err == nil { 1088 t.Errorf("readonly table instantiation should fail for corrupt table file") 1089 } 1090 1091 // Case 4: Write some data to a table and later re-open it as readonly. 1092 // Should be successful. 1093 fname = fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1094 f, err = newTable(tmpdir, fname, 1095 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1096 if err != nil { 1097 t.Fatalf("failed to instantiate table: %v\n", err) 1098 } 1099 writeChunks(t, f, 32, 128) 1100 if err := f.Close(); err != nil { 1101 t.Fatal(err) 1102 } 1103 f, err = newTable(tmpdir, fname, 1104 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1105 if err != nil { 1106 t.Fatal(err) 1107 } 1108 v, err := f.Retrieve(10) 1109 if err != nil { 1110 t.Fatal(err) 1111 } 1112 exp := getChunk(128, 10) 1113 if !bytes.Equal(v, exp) { 1114 t.Errorf("retrieved value is incorrect") 1115 } 1116 1117 // Case 5: Now write some data via a batch. 1118 // This should fail either during AppendRaw or Commit 1119 batch := f.newBatch() 1120 writeErr := batch.AppendRaw(32, make([]byte, 1)) 1121 if writeErr == nil { 1122 writeErr = batch.commit() 1123 } 1124 if writeErr == nil { 1125 t.Fatalf("Writing to readonly table should fail") 1126 } 1127 } 1128 1129 // randTest performs random freezer table operations. 1130 // Instances of this test are created by Generate. 1131 type randTest []randTestStep 1132 1133 type randTestStep struct { 1134 op int 1135 items []uint64 // for append and retrieve 1136 blobs [][]byte // for append 1137 target uint64 // for truncate(head/tail) 1138 err error // for debugging 1139 } 1140 1141 const ( 1142 opReload = iota 1143 opAppend 1144 opRetrieve 1145 opTruncateHead 1146 opTruncateHeadAll 1147 opTruncateTail 1148 opTruncateTailAll 1149 opCheckAll 1150 opMax // boundary value, not an actual op 1151 ) 1152 1153 func getVals(first uint64, n int) [][]byte { 1154 var ret [][]byte 1155 for i := 0; i < n; i++ { 1156 val := make([]byte, 8) 1157 binary.BigEndian.PutUint64(val, first+uint64(i)) 1158 ret = append(ret, val) 1159 } 1160 return ret 1161 } 1162 1163 func (randTest) Generate(r *rand.Rand, size int) reflect.Value { 1164 var ( 1165 deleted uint64 // The number of deleted items from tail 1166 items []uint64 // The index of entries in table 1167 1168 // getItems retrieves the indexes for items in table. 1169 getItems = func(n int) []uint64 { 1170 length := len(items) 1171 if length == 0 { 1172 return nil 1173 } 1174 var ret []uint64 1175 index := rand.Intn(length) 1176 for i := index; len(ret) < n && i < length; i++ { 1177 ret = append(ret, items[i]) 1178 } 1179 return ret 1180 } 1181 1182 // addItems appends the given length items into the table. 1183 addItems = func(n int) []uint64 { 1184 var first = deleted 1185 if len(items) != 0 { 1186 first = items[len(items)-1] + 1 1187 } 1188 var ret []uint64 1189 for i := 0; i < n; i++ { 1190 ret = append(ret, first+uint64(i)) 1191 } 1192 items = append(items, ret...) 1193 return ret 1194 } 1195 ) 1196 1197 var steps randTest 1198 for i := 0; i < size; i++ { 1199 step := randTestStep{op: r.Intn(opMax)} 1200 switch step.op { 1201 case opReload, opCheckAll: 1202 case opAppend: 1203 num := r.Intn(3) 1204 step.items = addItems(num) 1205 if len(step.items) == 0 { 1206 step.blobs = nil 1207 } else { 1208 step.blobs = getVals(step.items[0], num) 1209 } 1210 case opRetrieve: 1211 step.items = getItems(r.Intn(3)) 1212 case opTruncateHead: 1213 if len(items) == 0 { 1214 step.target = deleted 1215 } else { 1216 index := r.Intn(len(items)) 1217 items = items[:index] 1218 step.target = deleted + uint64(index) 1219 } 1220 case opTruncateHeadAll: 1221 step.target = deleted 1222 items = items[:0] 1223 case opTruncateTail: 1224 if len(items) == 0 { 1225 step.target = deleted 1226 } else { 1227 index := r.Intn(len(items)) 1228 items = items[index:] 1229 deleted += uint64(index) 1230 step.target = deleted 1231 } 1232 case opTruncateTailAll: 1233 step.target = deleted + uint64(len(items)) 1234 items = items[:0] 1235 deleted = step.target 1236 } 1237 steps = append(steps, step) 1238 } 1239 return reflect.ValueOf(steps) 1240 } 1241 1242 func runRandTest(rt randTest) bool { 1243 fname := fmt.Sprintf("randtest-%d", rand.Uint64()) 1244 f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1245 if err != nil { 1246 panic("failed to initialize table") 1247 } 1248 var values [][]byte 1249 for i, step := range rt { 1250 switch step.op { 1251 case opReload: 1252 f.Close() 1253 f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1254 if err != nil { 1255 rt[i].err = fmt.Errorf("failed to reload table %v", err) 1256 } 1257 case opCheckAll: 1258 tail := f.itemHidden.Load() 1259 head := f.items.Load() 1260 1261 if tail == head { 1262 continue 1263 } 1264 got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000) 1265 if err != nil { 1266 rt[i].err = err 1267 } else { 1268 if !reflect.DeepEqual(got, values) { 1269 rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values) 1270 } 1271 } 1272 1273 case opAppend: 1274 batch := f.newBatch() 1275 for i := 0; i < len(step.items); i++ { 1276 batch.AppendRaw(step.items[i], step.blobs[i]) 1277 } 1278 batch.commit() 1279 values = append(values, step.blobs...) 1280 1281 case opRetrieve: 1282 var blobs [][]byte 1283 if len(step.items) == 0 { 1284 continue 1285 } 1286 tail := f.itemHidden.Load() 1287 for i := 0; i < len(step.items); i++ { 1288 blobs = append(blobs, values[step.items[i]-tail]) 1289 } 1290 got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000) 1291 if err != nil { 1292 rt[i].err = err 1293 } else { 1294 if !reflect.DeepEqual(got, blobs) { 1295 rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items) 1296 } 1297 } 1298 1299 case opTruncateHead: 1300 f.truncateHead(step.target) 1301 1302 length := f.items.Load() - f.itemHidden.Load() 1303 values = values[:length] 1304 1305 case opTruncateHeadAll: 1306 f.truncateHead(step.target) 1307 values = nil 1308 1309 case opTruncateTail: 1310 prev := f.itemHidden.Load() 1311 f.truncateTail(step.target) 1312 1313 truncated := f.itemHidden.Load() - prev 1314 values = values[truncated:] 1315 1316 case opTruncateTailAll: 1317 f.truncateTail(step.target) 1318 values = nil 1319 } 1320 // Abort the test on error. 1321 if rt[i].err != nil { 1322 return false 1323 } 1324 } 1325 f.Close() 1326 return true 1327 } 1328 1329 func TestRandom(t *testing.T) { 1330 if err := quick.Check(runRandTest, nil); err != nil { 1331 if cerr, ok := err.(*quick.CheckError); ok { 1332 t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In)) 1333 } 1334 t.Fatal(err) 1335 } 1336 }