gitee.com/liu-zhao234568/cntest@v1.0.0/core/rawdb/freezer_table_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "fmt" 23 "io/ioutil" 24 "math/rand" 25 "os" 26 "path/filepath" 27 "sync" 28 "testing" 29 "time" 30 31 "gitee.com/liu-zhao234568/cntest/metrics" 32 ) 33 34 func init() { 35 rand.Seed(time.Now().Unix()) 36 } 37 38 // Gets a chunk of data, filled with 'b' 39 func getChunk(size int, b int) []byte { 40 data := make([]byte, size) 41 for i := range data { 42 data[i] = byte(b) 43 } 44 return data 45 } 46 47 // TestFreezerBasics test initializing a freezertable from scratch, writing to the table, 48 // and reading it back. 49 func TestFreezerBasics(t *testing.T) { 50 t.Parallel() 51 // set cutoff at 50 bytes 52 f, err := newCustomTable(os.TempDir(), 53 fmt.Sprintf("unittest-%d", rand.Uint64()), 54 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true) 55 if err != nil { 56 t.Fatal(err) 57 } 58 defer f.Close() 59 // Write 15 bytes 255 times, results in 85 files 60 for x := 0; x < 255; x++ { 61 data := getChunk(15, x) 62 f.Append(uint64(x), data) 63 } 64 65 //print(t, f, 0) 66 //print(t, f, 1) 67 //print(t, f, 2) 68 // 69 //db[0] = 000000000000000000000000000000 70 //db[1] = 010101010101010101010101010101 71 //db[2] = 020202020202020202020202020202 72 73 for y := 0; y < 255; y++ { 74 exp := getChunk(15, y) 75 got, err := f.Retrieve(uint64(y)) 76 if err != nil { 77 t.Fatal(err) 78 } 79 if !bytes.Equal(got, exp) { 80 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 81 } 82 } 83 // Check that we cannot read too far 84 _, err = f.Retrieve(uint64(255)) 85 if err != errOutOfBounds { 86 t.Fatal(err) 87 } 88 } 89 90 // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between 91 // every operation 92 func TestFreezerBasicsClosing(t *testing.T) { 93 t.Parallel() 94 // set cutoff at 50 bytes 95 var ( 96 fname = fmt.Sprintf("basics-close-%d", rand.Uint64()) 97 rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 98 f *freezerTable 99 err error 100 ) 101 f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 102 if err != nil { 103 t.Fatal(err) 104 } 105 // Write 15 bytes 255 times, results in 85 files 106 for x := 0; x < 255; x++ { 107 data := getChunk(15, x) 108 f.Append(uint64(x), data) 109 f.Close() 110 f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 111 if err != nil { 112 t.Fatal(err) 113 } 114 } 115 defer f.Close() 116 117 for y := 0; y < 255; y++ { 118 exp := getChunk(15, y) 119 got, err := f.Retrieve(uint64(y)) 120 if err != nil { 121 t.Fatal(err) 122 } 123 if !bytes.Equal(got, exp) { 124 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 125 } 126 f.Close() 127 f, err = newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 128 if err != nil { 129 t.Fatal(err) 130 } 131 } 132 } 133 134 // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed 135 func TestFreezerRepairDanglingHead(t *testing.T) { 136 t.Parallel() 137 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 138 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 139 140 { // Fill table 141 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 142 if err != nil { 143 t.Fatal(err) 144 } 145 // Write 15 bytes 255 times 146 for x := 0; x < 255; x++ { 147 data := getChunk(15, x) 148 f.Append(uint64(x), data) 149 } 150 // The last item should be there 151 if _, err = f.Retrieve(0xfe); err != nil { 152 t.Fatal(err) 153 } 154 f.Close() 155 } 156 // open the index 157 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 158 if err != nil { 159 t.Fatalf("Failed to open index file: %v", err) 160 } 161 // Remove 4 bytes 162 stat, err := idxFile.Stat() 163 if err != nil { 164 t.Fatalf("Failed to stat index file: %v", err) 165 } 166 idxFile.Truncate(stat.Size() - 4) 167 idxFile.Close() 168 // Now open it again 169 { 170 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 171 if err != nil { 172 t.Fatal(err) 173 } 174 // The last item should be missing 175 if _, err = f.Retrieve(0xff); err == nil { 176 t.Errorf("Expected error for missing index entry") 177 } 178 // The one before should still be there 179 if _, err = f.Retrieve(0xfd); err != nil { 180 t.Fatalf("Expected no error, got %v", err) 181 } 182 } 183 } 184 185 // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed 186 func TestFreezerRepairDanglingHeadLarge(t *testing.T) { 187 t.Parallel() 188 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 189 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 190 191 { // Fill a table and close it 192 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 193 if err != nil { 194 t.Fatal(err) 195 } 196 // Write 15 bytes 255 times 197 for x := 0; x < 0xff; x++ { 198 data := getChunk(15, x) 199 f.Append(uint64(x), data) 200 } 201 // The last item should be there 202 if _, err = f.Retrieve(f.items - 1); err != nil { 203 t.Fatal(err) 204 } 205 f.Close() 206 } 207 // open the index 208 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 209 if err != nil { 210 t.Fatalf("Failed to open index file: %v", err) 211 } 212 // Remove everything but the first item, and leave data unaligned 213 // 0-indexEntry, 1-indexEntry, corrupt-indexEntry 214 idxFile.Truncate(indexEntrySize + indexEntrySize + indexEntrySize/2) 215 idxFile.Close() 216 // Now open it again 217 { 218 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 219 if err != nil { 220 t.Fatal(err) 221 } 222 // The first item should be there 223 if _, err = f.Retrieve(0); err != nil { 224 t.Fatal(err) 225 } 226 // The second item should be missing 227 if _, err = f.Retrieve(1); err == nil { 228 t.Errorf("Expected error for missing index entry") 229 } 230 // We should now be able to store items again, from item = 1 231 for x := 1; x < 0xff; x++ { 232 data := getChunk(15, ^x) 233 f.Append(uint64(x), data) 234 } 235 f.Close() 236 } 237 // And if we open it, we should now be able to read all of them (new values) 238 { 239 f, _ := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 240 for y := 1; y < 255; y++ { 241 exp := getChunk(15, ^y) 242 got, err := f.Retrieve(uint64(y)) 243 if err != nil { 244 t.Fatal(err) 245 } 246 if !bytes.Equal(got, exp) { 247 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 248 } 249 } 250 } 251 } 252 253 // TestSnappyDetection tests that we fail to open a snappy database and vice versa 254 func TestSnappyDetection(t *testing.T) { 255 t.Parallel() 256 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 257 fname := fmt.Sprintf("snappytest-%d", rand.Uint64()) 258 // Open with snappy 259 { 260 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 261 if err != nil { 262 t.Fatal(err) 263 } 264 // Write 15 bytes 255 times 265 for x := 0; x < 0xff; x++ { 266 data := getChunk(15, x) 267 f.Append(uint64(x), data) 268 } 269 f.Close() 270 } 271 // Open without snappy 272 { 273 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, false) 274 if err != nil { 275 t.Fatal(err) 276 } 277 if _, err = f.Retrieve(0); err == nil { 278 f.Close() 279 t.Fatalf("expected empty table") 280 } 281 } 282 283 // Open with snappy 284 { 285 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 286 if err != nil { 287 t.Fatal(err) 288 } 289 // There should be 255 items 290 if _, err = f.Retrieve(0xfe); err != nil { 291 f.Close() 292 t.Fatalf("expected no error, got %v", err) 293 } 294 } 295 296 } 297 func assertFileSize(f string, size int64) error { 298 stat, err := os.Stat(f) 299 if err != nil { 300 return err 301 } 302 if stat.Size() != size { 303 return fmt.Errorf("error, expected size %d, got %d", size, stat.Size()) 304 } 305 return nil 306 307 } 308 309 // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data, 310 // the index is repaired 311 func TestFreezerRepairDanglingIndex(t *testing.T) { 312 t.Parallel() 313 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 314 fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64()) 315 316 { // Fill a table and close it 317 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 318 if err != nil { 319 t.Fatal(err) 320 } 321 // Write 15 bytes 9 times : 150 bytes 322 for x := 0; x < 9; x++ { 323 data := getChunk(15, x) 324 f.Append(uint64(x), data) 325 } 326 // The last item should be there 327 if _, err = f.Retrieve(f.items - 1); err != nil { 328 f.Close() 329 t.Fatal(err) 330 } 331 f.Close() 332 // File sizes should be 45, 45, 45 : items[3, 3, 3) 333 } 334 // Crop third file 335 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname)) 336 // Truncate third file: 45 ,45, 20 337 { 338 if err := assertFileSize(fileToCrop, 45); err != nil { 339 t.Fatal(err) 340 } 341 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 342 if err != nil { 343 t.Fatal(err) 344 } 345 file.Truncate(20) 346 file.Close() 347 } 348 // Open db it again 349 // It should restore the file(s) to 350 // 45, 45, 15 351 // with 3+3+1 items 352 { 353 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 354 if err != nil { 355 t.Fatal(err) 356 } 357 if f.items != 7 { 358 f.Close() 359 t.Fatalf("expected %d items, got %d", 7, f.items) 360 } 361 if err := assertFileSize(fileToCrop, 15); err != nil { 362 t.Fatal(err) 363 } 364 } 365 } 366 367 func TestFreezerTruncate(t *testing.T) { 368 369 t.Parallel() 370 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 371 fname := fmt.Sprintf("truncation-%d", rand.Uint64()) 372 373 { // Fill table 374 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 375 if err != nil { 376 t.Fatal(err) 377 } 378 // Write 15 bytes 30 times 379 for x := 0; x < 30; x++ { 380 data := getChunk(15, x) 381 f.Append(uint64(x), data) 382 } 383 // The last item should be there 384 if _, err = f.Retrieve(f.items - 1); err != nil { 385 t.Fatal(err) 386 } 387 f.Close() 388 } 389 // Reopen, truncate 390 { 391 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 392 if err != nil { 393 t.Fatal(err) 394 } 395 defer f.Close() 396 f.truncate(10) // 150 bytes 397 if f.items != 10 { 398 t.Fatalf("expected %d items, got %d", 10, f.items) 399 } 400 // 45, 45, 45, 15 -- bytes should be 15 401 if f.headBytes != 15 { 402 t.Fatalf("expected %d bytes, got %d", 15, f.headBytes) 403 } 404 405 } 406 407 } 408 409 // TestFreezerRepairFirstFile tests a head file with the very first item only half-written. 410 // That will rewind the index, and _should_ truncate the head file 411 func TestFreezerRepairFirstFile(t *testing.T) { 412 t.Parallel() 413 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 414 fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64()) 415 { // Fill table 416 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 417 if err != nil { 418 t.Fatal(err) 419 } 420 // Write 80 bytes, splitting out into two files 421 f.Append(0, getChunk(40, 0xFF)) 422 f.Append(1, getChunk(40, 0xEE)) 423 // The last item should be there 424 if _, err = f.Retrieve(f.items - 1); err != nil { 425 t.Fatal(err) 426 } 427 f.Close() 428 } 429 // Truncate the file in half 430 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname)) 431 { 432 if err := assertFileSize(fileToCrop, 40); err != nil { 433 t.Fatal(err) 434 } 435 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 436 if err != nil { 437 t.Fatal(err) 438 } 439 file.Truncate(20) 440 file.Close() 441 } 442 // Reopen 443 { 444 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 445 if err != nil { 446 t.Fatal(err) 447 } 448 if f.items != 1 { 449 f.Close() 450 t.Fatalf("expected %d items, got %d", 0, f.items) 451 } 452 // Write 40 bytes 453 f.Append(1, getChunk(40, 0xDD)) 454 f.Close() 455 // Should have been truncated down to zero and then 40 written 456 if err := assertFileSize(fileToCrop, 40); err != nil { 457 t.Fatal(err) 458 } 459 } 460 } 461 462 // TestFreezerReadAndTruncate tests: 463 // - we have a table open 464 // - do some reads, so files are open in readonly 465 // - truncate so those files are 'removed' 466 // - check that we did not keep the rdonly file descriptors 467 func TestFreezerReadAndTruncate(t *testing.T) { 468 t.Parallel() 469 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 470 fname := fmt.Sprintf("read_truncate-%d", rand.Uint64()) 471 { // Fill table 472 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 473 if err != nil { 474 t.Fatal(err) 475 } 476 // Write 15 bytes 30 times 477 for x := 0; x < 30; x++ { 478 data := getChunk(15, x) 479 f.Append(uint64(x), data) 480 } 481 // The last item should be there 482 if _, err = f.Retrieve(f.items - 1); err != nil { 483 t.Fatal(err) 484 } 485 f.Close() 486 } 487 // Reopen and read all files 488 { 489 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 50, true) 490 if err != nil { 491 t.Fatal(err) 492 } 493 if f.items != 30 { 494 f.Close() 495 t.Fatalf("expected %d items, got %d", 0, f.items) 496 } 497 for y := byte(0); y < 30; y++ { 498 f.Retrieve(uint64(y)) 499 } 500 // Now, truncate back to zero 501 f.truncate(0) 502 // Write the data again 503 for x := 0; x < 30; x++ { 504 data := getChunk(15, ^x) 505 if err := f.Append(uint64(x), data); err != nil { 506 t.Fatalf("error %v", err) 507 } 508 } 509 f.Close() 510 } 511 } 512 513 func TestOffset(t *testing.T) { 514 t.Parallel() 515 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 516 fname := fmt.Sprintf("offset-%d", rand.Uint64()) 517 { // Fill table 518 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true) 519 if err != nil { 520 t.Fatal(err) 521 } 522 // Write 6 x 20 bytes, splitting out into three files 523 f.Append(0, getChunk(20, 0xFF)) 524 f.Append(1, getChunk(20, 0xEE)) 525 526 f.Append(2, getChunk(20, 0xdd)) 527 f.Append(3, getChunk(20, 0xcc)) 528 529 f.Append(4, getChunk(20, 0xbb)) 530 f.Append(5, getChunk(20, 0xaa)) 531 f.DumpIndex(0, 100) 532 f.Close() 533 } 534 // Now crop it. 535 { 536 // delete files 0 and 1 537 for i := 0; i < 2; i++ { 538 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i)) 539 if err := os.Remove(p); err != nil { 540 t.Fatal(err) 541 } 542 } 543 // Read the index file 544 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 545 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 546 if err != nil { 547 t.Fatal(err) 548 } 549 indexBuf := make([]byte, 7*indexEntrySize) 550 indexFile.Read(indexBuf) 551 552 // Update the index file, so that we store 553 // [ file = 2, offset = 4 ] at index zero 554 555 tailId := uint32(2) // First file is 2 556 itemOffset := uint32(4) // We have removed four items 557 zeroIndex := indexEntry{ 558 filenum: tailId, 559 offset: itemOffset, 560 } 561 buf := zeroIndex.marshallBinary() 562 // Overwrite index zero 563 copy(indexBuf, buf) 564 // Remove the four next indices by overwriting 565 copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) 566 indexFile.WriteAt(indexBuf, 0) 567 // Need to truncate the moved index items 568 indexFile.Truncate(indexEntrySize * (1 + 2)) 569 indexFile.Close() 570 571 } 572 // Now open again 573 checkPresent := func(numDeleted uint64) { 574 f, err := newCustomTable(os.TempDir(), fname, rm, wm, sg, 40, true) 575 if err != nil { 576 t.Fatal(err) 577 } 578 f.DumpIndex(0, 100) 579 // It should allow writing item 6 580 f.Append(numDeleted+2, getChunk(20, 0x99)) 581 582 // It should be fine to fetch 4,5,6 583 if got, err := f.Retrieve(numDeleted); err != nil { 584 t.Fatal(err) 585 } else if exp := getChunk(20, 0xbb); !bytes.Equal(got, exp) { 586 t.Fatalf("expected %x got %x", exp, got) 587 } 588 if got, err := f.Retrieve(numDeleted + 1); err != nil { 589 t.Fatal(err) 590 } else if exp := getChunk(20, 0xaa); !bytes.Equal(got, exp) { 591 t.Fatalf("expected %x got %x", exp, got) 592 } 593 if got, err := f.Retrieve(numDeleted + 2); err != nil { 594 t.Fatal(err) 595 } else if exp := getChunk(20, 0x99); !bytes.Equal(got, exp) { 596 t.Fatalf("expected %x got %x", exp, got) 597 } 598 599 // It should error at 0, 1,2,3 600 for i := numDeleted - 1; i > numDeleted-10; i-- { 601 if _, err := f.Retrieve(i); err == nil { 602 t.Fatal("expected err") 603 } 604 } 605 } 606 checkPresent(4) 607 // Now, let's pretend we have deleted 1M items 608 { 609 // Read the index file 610 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 611 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 612 if err != nil { 613 t.Fatal(err) 614 } 615 indexBuf := make([]byte, 3*indexEntrySize) 616 indexFile.Read(indexBuf) 617 618 // Update the index file, so that we store 619 // [ file = 2, offset = 1M ] at index zero 620 621 tailId := uint32(2) // First file is 2 622 itemOffset := uint32(1000000) // We have removed 1M items 623 zeroIndex := indexEntry{ 624 offset: itemOffset, 625 filenum: tailId, 626 } 627 buf := zeroIndex.marshallBinary() 628 // Overwrite index zero 629 copy(indexBuf, buf) 630 indexFile.WriteAt(indexBuf, 0) 631 indexFile.Close() 632 } 633 checkPresent(1000000) 634 } 635 636 // TODO (?) 637 // - test that if we remove several head-files, aswell as data last data-file, 638 // the index is truncated accordingly 639 // Right now, the freezer would fail on these conditions: 640 // 1. have data files d0, d1, d2, d3 641 // 2. remove d2,d3 642 // 643 // However, all 'normal' failure modes arising due to failing to sync() or save a file 644 // should be handled already, and the case described above can only (?) happen if an 645 // external process/user deletes files from the filesystem. 646 647 // TestAppendTruncateParallel is a test to check if the Append/truncate operations are 648 // racy. 649 // 650 // The reason why it's not a regular fuzzer, within tests/fuzzers, is that it is dependent 651 // on timing rather than 'clever' input -- there's no determinism. 652 func TestAppendTruncateParallel(t *testing.T) { 653 dir, err := ioutil.TempDir("", "freezer") 654 if err != nil { 655 t.Fatal(err) 656 } 657 defer os.RemoveAll(dir) 658 659 f, err := newCustomTable(dir, "tmp", metrics.NilMeter{}, metrics.NilMeter{}, metrics.NilGauge{}, 8, true) 660 if err != nil { 661 t.Fatal(err) 662 } 663 664 fill := func(mark uint64) []byte { 665 data := make([]byte, 8) 666 binary.LittleEndian.PutUint64(data, mark) 667 return data 668 } 669 670 for i := 0; i < 5000; i++ { 671 f.truncate(0) 672 data0 := fill(0) 673 f.Append(0, data0) 674 data1 := fill(1) 675 676 var wg sync.WaitGroup 677 wg.Add(2) 678 go func() { 679 f.truncate(0) 680 wg.Done() 681 }() 682 go func() { 683 f.Append(1, data1) 684 wg.Done() 685 }() 686 wg.Wait() 687 688 if have, err := f.Retrieve(0); err == nil { 689 if !bytes.Equal(have, data0) { 690 t.Fatalf("have %x want %x", have, data0) 691 } 692 } 693 } 694 }