github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/core/rawdb/freezer_table_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "fmt" 23 "math/rand" 24 "os" 25 "path/filepath" 26 "reflect" 27 "testing" 28 "testing/quick" 29 30 "github.com/davecgh/go-spew/spew" 31 "github.com/ethereum/go-ethereum/metrics" 32 "github.com/stretchr/testify/require" 33 ) 34 35 // TestFreezerBasics test initializing a freezertable from scratch, writing to the table, 36 // and reading it back. 37 func TestFreezerBasics(t *testing.T) { 38 t.Parallel() 39 // set cutoff at 50 bytes 40 f, err := newTable(os.TempDir(), 41 fmt.Sprintf("unittest-%d", rand.Uint64()), 42 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 43 if err != nil { 44 t.Fatal(err) 45 } 46 defer f.Close() 47 48 // Write 15 bytes 255 times, results in 85 files 49 writeChunks(t, f, 255, 15) 50 51 //print(t, f, 0) 52 //print(t, f, 1) 53 //print(t, f, 2) 54 // 55 //db[0] = 000000000000000000000000000000 56 //db[1] = 010101010101010101010101010101 57 //db[2] = 020202020202020202020202020202 58 59 for y := 0; y < 255; y++ { 60 exp := getChunk(15, y) 61 got, err := f.Retrieve(uint64(y)) 62 if err != nil { 63 t.Fatalf("reading item %d: %v", y, err) 64 } 65 if !bytes.Equal(got, exp) { 66 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 67 } 68 } 69 // Check that we cannot read too far 70 _, err = f.Retrieve(uint64(255)) 71 if err != errOutOfBounds { 72 t.Fatal(err) 73 } 74 } 75 76 // TestFreezerBasicsClosing tests same as TestFreezerBasics, but also closes and reopens the freezer between 77 // every operation 78 func TestFreezerBasicsClosing(t *testing.T) { 79 t.Parallel() 80 // set cutoff at 50 bytes 81 var ( 82 fname = fmt.Sprintf("basics-close-%d", rand.Uint64()) 83 rm, wm, sg = metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 84 f *freezerTable 85 err error 86 ) 87 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 88 if err != nil { 89 t.Fatal(err) 90 } 91 92 // Write 15 bytes 255 times, results in 85 files. 93 // In-between writes, the table is closed and re-opened. 94 for x := 0; x < 255; x++ { 95 data := getChunk(15, x) 96 batch := f.newBatch() 97 require.NoError(t, batch.AppendRaw(uint64(x), data)) 98 require.NoError(t, batch.commit()) 99 f.Close() 100 101 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 102 if err != nil { 103 t.Fatal(err) 104 } 105 } 106 defer f.Close() 107 108 for y := 0; y < 255; y++ { 109 exp := getChunk(15, y) 110 got, err := f.Retrieve(uint64(y)) 111 if err != nil { 112 t.Fatal(err) 113 } 114 if !bytes.Equal(got, exp) { 115 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 116 } 117 f.Close() 118 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 119 if err != nil { 120 t.Fatal(err) 121 } 122 } 123 } 124 125 // TestFreezerRepairDanglingHead tests that we can recover if index entries are removed 126 func TestFreezerRepairDanglingHead(t *testing.T) { 127 t.Parallel() 128 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 129 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 130 131 // Fill table 132 { 133 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 134 if err != nil { 135 t.Fatal(err) 136 } 137 // Write 15 bytes 255 times 138 writeChunks(t, f, 255, 15) 139 140 // The last item should be there 141 if _, err = f.Retrieve(0xfe); err != nil { 142 t.Fatal(err) 143 } 144 f.Close() 145 } 146 147 // open the index 148 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 149 if err != nil { 150 t.Fatalf("Failed to open index file: %v", err) 151 } 152 // Remove 4 bytes 153 stat, err := idxFile.Stat() 154 if err != nil { 155 t.Fatalf("Failed to stat index file: %v", err) 156 } 157 idxFile.Truncate(stat.Size() - 4) 158 idxFile.Close() 159 160 // Now open it again 161 { 162 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 163 if err != nil { 164 t.Fatal(err) 165 } 166 // The last item should be missing 167 if _, err = f.Retrieve(0xff); err == nil { 168 t.Errorf("Expected error for missing index entry") 169 } 170 // The one before should still be there 171 if _, err = f.Retrieve(0xfd); err != nil { 172 t.Fatalf("Expected no error, got %v", err) 173 } 174 } 175 } 176 177 // TestFreezerRepairDanglingHeadLarge tests that we can recover if very many index entries are removed 178 func TestFreezerRepairDanglingHeadLarge(t *testing.T) { 179 t.Parallel() 180 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 181 fname := fmt.Sprintf("dangling_headtest-%d", rand.Uint64()) 182 183 // Fill a table and close it 184 { 185 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 186 if err != nil { 187 t.Fatal(err) 188 } 189 // Write 15 bytes 255 times 190 writeChunks(t, f, 255, 15) 191 192 // The last item should be there 193 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 194 t.Fatal(err) 195 } 196 f.Close() 197 } 198 199 // open the index 200 idxFile, err := os.OpenFile(filepath.Join(os.TempDir(), fmt.Sprintf("%s.ridx", fname)), os.O_RDWR, 0644) 201 if err != nil { 202 t.Fatalf("Failed to open index file: %v", err) 203 } 204 // Remove everything but the first item, and leave data unaligned 205 // 0-indexEntry, 1-indexEntry, corrupt-indexEntry 206 idxFile.Truncate(2*indexEntrySize + indexEntrySize/2) 207 idxFile.Close() 208 209 // Now open it again 210 { 211 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 212 if err != nil { 213 t.Fatal(err) 214 } 215 // The first item should be there 216 if _, err = f.Retrieve(0); err != nil { 217 t.Fatal(err) 218 } 219 // The second item should be missing 220 if _, err = f.Retrieve(1); err == nil { 221 t.Errorf("Expected error for missing index entry") 222 } 223 // We should now be able to store items again, from item = 1 224 batch := f.newBatch() 225 for x := 1; x < 0xff; x++ { 226 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 227 } 228 require.NoError(t, batch.commit()) 229 f.Close() 230 } 231 232 // And if we open it, we should now be able to read all of them (new values) 233 { 234 f, _ := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 235 for y := 1; y < 255; y++ { 236 exp := getChunk(15, ^y) 237 got, err := f.Retrieve(uint64(y)) 238 if err != nil { 239 t.Fatal(err) 240 } 241 if !bytes.Equal(got, exp) { 242 t.Fatalf("test %d, got \n%x != \n%x", y, got, exp) 243 } 244 } 245 } 246 } 247 248 // TestSnappyDetection tests that we fail to open a snappy database and vice versa 249 func TestSnappyDetection(t *testing.T) { 250 t.Parallel() 251 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 252 fname := fmt.Sprintf("snappytest-%d", rand.Uint64()) 253 254 // Open with snappy 255 { 256 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 257 if err != nil { 258 t.Fatal(err) 259 } 260 // Write 15 bytes 255 times 261 writeChunks(t, f, 255, 15) 262 f.Close() 263 } 264 265 // Open without snappy 266 { 267 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, false, false) 268 if err != nil { 269 t.Fatal(err) 270 } 271 if _, err = f.Retrieve(0); err == nil { 272 f.Close() 273 t.Fatalf("expected empty table") 274 } 275 } 276 277 // Open with snappy 278 { 279 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 280 if err != nil { 281 t.Fatal(err) 282 } 283 // There should be 255 items 284 if _, err = f.Retrieve(0xfe); err != nil { 285 f.Close() 286 t.Fatalf("expected no error, got %v", err) 287 } 288 } 289 } 290 291 func assertFileSize(f string, size int64) error { 292 stat, err := os.Stat(f) 293 if err != nil { 294 return err 295 } 296 if stat.Size() != size { 297 return fmt.Errorf("error, expected size %d, got %d", size, stat.Size()) 298 } 299 return nil 300 } 301 302 // TestFreezerRepairDanglingIndex checks that if the index has more entries than there are data, 303 // the index is repaired 304 func TestFreezerRepairDanglingIndex(t *testing.T) { 305 t.Parallel() 306 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 307 fname := fmt.Sprintf("dangling_indextest-%d", rand.Uint64()) 308 309 // Fill a table and close it 310 { 311 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 312 if err != nil { 313 t.Fatal(err) 314 } 315 // Write 15 bytes 9 times : 150 bytes 316 writeChunks(t, f, 9, 15) 317 318 // The last item should be there 319 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 320 f.Close() 321 t.Fatal(err) 322 } 323 f.Close() 324 // File sizes should be 45, 45, 45 : items[3, 3, 3) 325 } 326 327 // Crop third file 328 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0002.rdat", fname)) 329 // Truncate third file: 45 ,45, 20 330 { 331 if err := assertFileSize(fileToCrop, 45); err != nil { 332 t.Fatal(err) 333 } 334 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 335 if err != nil { 336 t.Fatal(err) 337 } 338 file.Truncate(20) 339 file.Close() 340 } 341 342 // Open db it again 343 // It should restore the file(s) to 344 // 45, 45, 15 345 // with 3+3+1 items 346 { 347 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 348 if err != nil { 349 t.Fatal(err) 350 } 351 defer f.Close() 352 if f.items.Load() != 7 { 353 t.Fatalf("expected %d items, got %d", 7, f.items.Load()) 354 } 355 if err := assertFileSize(fileToCrop, 15); err != nil { 356 t.Fatal(err) 357 } 358 } 359 } 360 361 func TestFreezerTruncate(t *testing.T) { 362 t.Parallel() 363 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 364 fname := fmt.Sprintf("truncation-%d", rand.Uint64()) 365 366 // Fill table 367 { 368 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 369 if err != nil { 370 t.Fatal(err) 371 } 372 // Write 15 bytes 30 times 373 writeChunks(t, f, 30, 15) 374 375 // The last item should be there 376 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 377 t.Fatal(err) 378 } 379 f.Close() 380 } 381 382 // Reopen, truncate 383 { 384 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 385 if err != nil { 386 t.Fatal(err) 387 } 388 defer f.Close() 389 f.truncateHead(10) // 150 bytes 390 if f.items.Load() != 10 { 391 t.Fatalf("expected %d items, got %d", 10, f.items.Load()) 392 } 393 // 45, 45, 45, 15 -- bytes should be 15 394 if f.headBytes != 15 { 395 t.Fatalf("expected %d bytes, got %d", 15, f.headBytes) 396 } 397 } 398 } 399 400 // TestFreezerRepairFirstFile tests a head file with the very first item only half-written. 401 // That will rewind the index, and _should_ truncate the head file 402 func TestFreezerRepairFirstFile(t *testing.T) { 403 t.Parallel() 404 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 405 fname := fmt.Sprintf("truncationfirst-%d", rand.Uint64()) 406 407 // Fill table 408 { 409 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 410 if err != nil { 411 t.Fatal(err) 412 } 413 // Write 80 bytes, splitting out into two files 414 batch := f.newBatch() 415 require.NoError(t, batch.AppendRaw(0, getChunk(40, 0xFF))) 416 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xEE))) 417 require.NoError(t, batch.commit()) 418 419 // The last item should be there 420 if _, err = f.Retrieve(1); err != nil { 421 t.Fatal(err) 422 } 423 f.Close() 424 } 425 426 // Truncate the file in half 427 fileToCrop := filepath.Join(os.TempDir(), fmt.Sprintf("%s.0001.rdat", fname)) 428 { 429 if err := assertFileSize(fileToCrop, 40); err != nil { 430 t.Fatal(err) 431 } 432 file, err := os.OpenFile(fileToCrop, os.O_RDWR, 0644) 433 if err != nil { 434 t.Fatal(err) 435 } 436 file.Truncate(20) 437 file.Close() 438 } 439 440 // Reopen 441 { 442 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 443 if err != nil { 444 t.Fatal(err) 445 } 446 if f.items.Load() != 1 { 447 f.Close() 448 t.Fatalf("expected %d items, got %d", 0, f.items.Load()) 449 } 450 451 // Write 40 bytes 452 batch := f.newBatch() 453 require.NoError(t, batch.AppendRaw(1, getChunk(40, 0xDD))) 454 require.NoError(t, batch.commit()) 455 456 f.Close() 457 458 // Should have been truncated down to zero and then 40 written 459 if err := assertFileSize(fileToCrop, 40); err != nil { 460 t.Fatal(err) 461 } 462 } 463 } 464 465 // TestFreezerReadAndTruncate tests: 466 // - we have a table open 467 // - do some reads, so files are open in readonly 468 // - truncate so those files are 'removed' 469 // - check that we did not keep the rdonly file descriptors 470 func TestFreezerReadAndTruncate(t *testing.T) { 471 t.Parallel() 472 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 473 fname := fmt.Sprintf("read_truncate-%d", rand.Uint64()) 474 475 // Fill table 476 { 477 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 478 if err != nil { 479 t.Fatal(err) 480 } 481 // Write 15 bytes 30 times 482 writeChunks(t, f, 30, 15) 483 484 // The last item should be there 485 if _, err = f.Retrieve(f.items.Load() - 1); err != nil { 486 t.Fatal(err) 487 } 488 f.Close() 489 } 490 491 // Reopen and read all files 492 { 493 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 494 if err != nil { 495 t.Fatal(err) 496 } 497 if f.items.Load() != 30 { 498 f.Close() 499 t.Fatalf("expected %d items, got %d", 0, f.items.Load()) 500 } 501 for y := byte(0); y < 30; y++ { 502 f.Retrieve(uint64(y)) 503 } 504 505 // Now, truncate back to zero 506 f.truncateHead(0) 507 508 // Write the data again 509 batch := f.newBatch() 510 for x := 0; x < 30; x++ { 511 require.NoError(t, batch.AppendRaw(uint64(x), getChunk(15, ^x))) 512 } 513 require.NoError(t, batch.commit()) 514 f.Close() 515 } 516 } 517 518 func TestFreezerOffset(t *testing.T) { 519 t.Parallel() 520 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 521 fname := fmt.Sprintf("offset-%d", rand.Uint64()) 522 523 // Fill table 524 { 525 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 526 if err != nil { 527 t.Fatal(err) 528 } 529 530 // Write 6 x 20 bytes, splitting out into three files 531 batch := f.newBatch() 532 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 533 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 534 535 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 536 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 537 538 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 539 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 540 require.NoError(t, batch.commit()) 541 542 t.Log(f.dumpIndexString(0, 100)) 543 f.Close() 544 } 545 546 // Now crop it. 547 { 548 // delete files 0 and 1 549 for i := 0; i < 2; i++ { 550 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.%04d.rdat", fname, i)) 551 if err := os.Remove(p); err != nil { 552 t.Fatal(err) 553 } 554 } 555 // Read the index file 556 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 557 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 558 if err != nil { 559 t.Fatal(err) 560 } 561 indexBuf := make([]byte, 7*indexEntrySize) 562 indexFile.Read(indexBuf) 563 564 // Update the index file, so that we store 565 // [ file = 2, offset = 4 ] at index zero 566 567 zeroIndex := indexEntry{ 568 filenum: uint32(2), // First file is 2 569 offset: uint32(4), // We have removed four items 570 } 571 buf := zeroIndex.append(nil) 572 573 // Overwrite index zero 574 copy(indexBuf, buf) 575 576 // Remove the four next indices by overwriting 577 copy(indexBuf[indexEntrySize:], indexBuf[indexEntrySize*5:]) 578 indexFile.WriteAt(indexBuf, 0) 579 580 // Need to truncate the moved index items 581 indexFile.Truncate(indexEntrySize * (1 + 2)) 582 indexFile.Close() 583 } 584 585 // Now open again 586 { 587 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 588 if err != nil { 589 t.Fatal(err) 590 } 591 defer f.Close() 592 t.Log(f.dumpIndexString(0, 100)) 593 594 // It should allow writing item 6. 595 batch := f.newBatch() 596 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x99))) 597 require.NoError(t, batch.commit()) 598 599 checkRetrieveError(t, f, map[uint64]error{ 600 0: errOutOfBounds, 601 1: errOutOfBounds, 602 2: errOutOfBounds, 603 3: errOutOfBounds, 604 }) 605 checkRetrieve(t, f, map[uint64][]byte{ 606 4: getChunk(20, 0xbb), 607 5: getChunk(20, 0xaa), 608 6: getChunk(20, 0x99), 609 }) 610 } 611 612 // Edit the index again, with a much larger initial offset of 1M. 613 { 614 // Read the index file 615 p := filepath.Join(os.TempDir(), fmt.Sprintf("%v.ridx", fname)) 616 indexFile, err := os.OpenFile(p, os.O_RDWR, 0644) 617 if err != nil { 618 t.Fatal(err) 619 } 620 indexBuf := make([]byte, 3*indexEntrySize) 621 indexFile.Read(indexBuf) 622 623 // Update the index file, so that we store 624 // [ file = 2, offset = 1M ] at index zero 625 626 zeroIndex := indexEntry{ 627 offset: uint32(1000000), // We have removed 1M items 628 filenum: uint32(2), // First file is 2 629 } 630 buf := zeroIndex.append(nil) 631 632 // Overwrite index zero 633 copy(indexBuf, buf) 634 indexFile.WriteAt(indexBuf, 0) 635 indexFile.Close() 636 } 637 638 // Check that existing items have been moved to index 1M. 639 { 640 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 641 if err != nil { 642 t.Fatal(err) 643 } 644 defer f.Close() 645 t.Log(f.dumpIndexString(0, 100)) 646 647 checkRetrieveError(t, f, map[uint64]error{ 648 0: errOutOfBounds, 649 1: errOutOfBounds, 650 2: errOutOfBounds, 651 3: errOutOfBounds, 652 999999: errOutOfBounds, 653 }) 654 checkRetrieve(t, f, map[uint64][]byte{ 655 1000000: getChunk(20, 0xbb), 656 1000001: getChunk(20, 0xaa), 657 }) 658 } 659 } 660 661 func assertTableSize(t *testing.T, f *freezerTable, size int) { 662 t.Helper() 663 if got, err := f.size(); got != uint64(size) { 664 t.Fatalf("expected size of %d bytes, got %d, err: %v", size, got, err) 665 } 666 } 667 668 func TestTruncateTail(t *testing.T) { 669 t.Parallel() 670 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 671 fname := fmt.Sprintf("truncate-tail-%d", rand.Uint64()) 672 673 // Fill table 674 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 675 if err != nil { 676 t.Fatal(err) 677 } 678 679 // Write 7 x 20 bytes, splitting out into four files 680 batch := f.newBatch() 681 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 682 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 683 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 684 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 685 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 686 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 687 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 688 require.NoError(t, batch.commit()) 689 690 // nothing to do, all the items should still be there. 691 f.truncateTail(0) 692 fmt.Println(f.dumpIndexString(0, 1000)) 693 checkRetrieve(t, f, map[uint64][]byte{ 694 0: getChunk(20, 0xFF), 695 1: getChunk(20, 0xEE), 696 2: getChunk(20, 0xdd), 697 3: getChunk(20, 0xcc), 698 4: getChunk(20, 0xbb), 699 5: getChunk(20, 0xaa), 700 6: getChunk(20, 0x11), 701 }) 702 // maxFileSize*fileCount + headBytes + indexFileSize - hiddenBytes 703 expected := 20*7 + 48 - 0 704 assertTableSize(t, f, expected) 705 706 // truncate single element( item 0 ), deletion is only supported at file level 707 f.truncateTail(1) 708 fmt.Println(f.dumpIndexString(0, 1000)) 709 checkRetrieveError(t, f, map[uint64]error{ 710 0: errOutOfBounds, 711 }) 712 checkRetrieve(t, f, map[uint64][]byte{ 713 1: getChunk(20, 0xEE), 714 2: getChunk(20, 0xdd), 715 3: getChunk(20, 0xcc), 716 4: getChunk(20, 0xbb), 717 5: getChunk(20, 0xaa), 718 6: getChunk(20, 0x11), 719 }) 720 expected = 20*7 + 48 - 20 721 assertTableSize(t, f, expected) 722 723 // Reopen the table, the deletion information should be persisted as well 724 f.Close() 725 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 726 if err != nil { 727 t.Fatal(err) 728 } 729 checkRetrieveError(t, f, map[uint64]error{ 730 0: errOutOfBounds, 731 }) 732 checkRetrieve(t, f, map[uint64][]byte{ 733 1: getChunk(20, 0xEE), 734 2: getChunk(20, 0xdd), 735 3: getChunk(20, 0xcc), 736 4: getChunk(20, 0xbb), 737 5: getChunk(20, 0xaa), 738 6: getChunk(20, 0x11), 739 }) 740 741 // truncate two elements( item 0, item 1 ), the file 0 should be deleted 742 f.truncateTail(2) 743 checkRetrieveError(t, f, map[uint64]error{ 744 0: errOutOfBounds, 745 1: errOutOfBounds, 746 }) 747 checkRetrieve(t, f, map[uint64][]byte{ 748 2: getChunk(20, 0xdd), 749 3: getChunk(20, 0xcc), 750 4: getChunk(20, 0xbb), 751 5: getChunk(20, 0xaa), 752 6: getChunk(20, 0x11), 753 }) 754 expected = 20*5 + 36 - 0 755 assertTableSize(t, f, expected) 756 757 // Reopen the table, the above testing should still pass 758 f.Close() 759 f, err = newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 760 if err != nil { 761 t.Fatal(err) 762 } 763 defer f.Close() 764 765 checkRetrieveError(t, f, map[uint64]error{ 766 0: errOutOfBounds, 767 1: errOutOfBounds, 768 }) 769 checkRetrieve(t, f, map[uint64][]byte{ 770 2: getChunk(20, 0xdd), 771 3: getChunk(20, 0xcc), 772 4: getChunk(20, 0xbb), 773 5: getChunk(20, 0xaa), 774 6: getChunk(20, 0x11), 775 }) 776 777 // truncate 3 more elements( item 2, 3, 4), the file 1 should be deleted 778 // file 2 should only contain item 5 779 f.truncateTail(5) 780 checkRetrieveError(t, f, map[uint64]error{ 781 0: errOutOfBounds, 782 1: errOutOfBounds, 783 2: errOutOfBounds, 784 3: errOutOfBounds, 785 4: errOutOfBounds, 786 }) 787 checkRetrieve(t, f, map[uint64][]byte{ 788 5: getChunk(20, 0xaa), 789 6: getChunk(20, 0x11), 790 }) 791 expected = 20*3 + 24 - 20 792 assertTableSize(t, f, expected) 793 794 // truncate all, the entire freezer should be deleted 795 f.truncateTail(7) 796 checkRetrieveError(t, f, map[uint64]error{ 797 0: errOutOfBounds, 798 1: errOutOfBounds, 799 2: errOutOfBounds, 800 3: errOutOfBounds, 801 4: errOutOfBounds, 802 5: errOutOfBounds, 803 6: errOutOfBounds, 804 }) 805 expected = 12 806 assertTableSize(t, f, expected) 807 } 808 809 func TestTruncateHead(t *testing.T) { 810 t.Parallel() 811 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 812 fname := fmt.Sprintf("truncate-head-blow-tail-%d", rand.Uint64()) 813 814 // Fill table 815 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 816 if err != nil { 817 t.Fatal(err) 818 } 819 820 // Write 7 x 20 bytes, splitting out into four files 821 batch := f.newBatch() 822 require.NoError(t, batch.AppendRaw(0, getChunk(20, 0xFF))) 823 require.NoError(t, batch.AppendRaw(1, getChunk(20, 0xEE))) 824 require.NoError(t, batch.AppendRaw(2, getChunk(20, 0xdd))) 825 require.NoError(t, batch.AppendRaw(3, getChunk(20, 0xcc))) 826 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 827 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 828 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 829 require.NoError(t, batch.commit()) 830 831 f.truncateTail(4) // Tail = 4 832 833 // NewHead is required to be 3, the entire table should be truncated 834 f.truncateHead(4) 835 checkRetrieveError(t, f, map[uint64]error{ 836 0: errOutOfBounds, // Deleted by tail 837 1: errOutOfBounds, // Deleted by tail 838 2: errOutOfBounds, // Deleted by tail 839 3: errOutOfBounds, // Deleted by tail 840 4: errOutOfBounds, // Deleted by Head 841 5: errOutOfBounds, // Deleted by Head 842 6: errOutOfBounds, // Deleted by Head 843 }) 844 845 // Append new items 846 batch = f.newBatch() 847 require.NoError(t, batch.AppendRaw(4, getChunk(20, 0xbb))) 848 require.NoError(t, batch.AppendRaw(5, getChunk(20, 0xaa))) 849 require.NoError(t, batch.AppendRaw(6, getChunk(20, 0x11))) 850 require.NoError(t, batch.commit()) 851 852 checkRetrieve(t, f, map[uint64][]byte{ 853 4: getChunk(20, 0xbb), 854 5: getChunk(20, 0xaa), 855 6: getChunk(20, 0x11), 856 }) 857 } 858 859 func checkRetrieve(t *testing.T, f *freezerTable, items map[uint64][]byte) { 860 t.Helper() 861 862 for item, wantBytes := range items { 863 value, err := f.Retrieve(item) 864 if err != nil { 865 t.Fatalf("can't get expected item %d: %v", item, err) 866 } 867 if !bytes.Equal(value, wantBytes) { 868 t.Fatalf("item %d has wrong value %x (want %x)", item, value, wantBytes) 869 } 870 } 871 } 872 873 func checkRetrieveError(t *testing.T, f *freezerTable, items map[uint64]error) { 874 t.Helper() 875 876 for item, wantError := range items { 877 value, err := f.Retrieve(item) 878 if err == nil { 879 t.Fatalf("unexpected value %x for item %d, want error %v", item, value, wantError) 880 } 881 if err != wantError { 882 t.Fatalf("wrong error for item %d: %v", item, err) 883 } 884 } 885 } 886 887 // Gets a chunk of data, filled with 'b' 888 func getChunk(size int, b int) []byte { 889 data := make([]byte, size) 890 for i := range data { 891 data[i] = byte(b) 892 } 893 return data 894 } 895 896 // TODO (?) 897 // - test that if we remove several head-files, as well as data last data-file, 898 // the index is truncated accordingly 899 // Right now, the freezer would fail on these conditions: 900 // 1. have data files d0, d1, d2, d3 901 // 2. remove d2,d3 902 // 903 // However, all 'normal' failure modes arising due to failing to sync() or save a file 904 // should be handled already, and the case described above can only (?) happen if an 905 // external process/user deletes files from the filesystem. 906 907 func writeChunks(t *testing.T, ft *freezerTable, n int, length int) { 908 t.Helper() 909 910 batch := ft.newBatch() 911 for i := 0; i < n; i++ { 912 if err := batch.AppendRaw(uint64(i), getChunk(length, i)); err != nil { 913 t.Fatalf("AppendRaw(%d, ...) returned error: %v", i, err) 914 } 915 } 916 if err := batch.commit(); err != nil { 917 t.Fatalf("Commit returned error: %v", err) 918 } 919 } 920 921 // TestSequentialRead does some basic tests on the RetrieveItems. 922 func TestSequentialRead(t *testing.T) { 923 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 924 fname := fmt.Sprintf("batchread-%d", rand.Uint64()) 925 { // Fill table 926 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 927 if err != nil { 928 t.Fatal(err) 929 } 930 // Write 15 bytes 30 times 931 writeChunks(t, f, 30, 15) 932 f.dumpIndexStdout(0, 30) 933 f.Close() 934 } 935 { // Open it, iterate, verify iteration 936 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 50, true, false) 937 if err != nil { 938 t.Fatal(err) 939 } 940 items, err := f.RetrieveItems(0, 10000, 100000) 941 if err != nil { 942 t.Fatal(err) 943 } 944 if have, want := len(items), 30; have != want { 945 t.Fatalf("want %d items, have %d ", want, have) 946 } 947 for i, have := range items { 948 want := getChunk(15, i) 949 if !bytes.Equal(want, have) { 950 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 951 } 952 } 953 f.Close() 954 } 955 { // Open it, iterate, verify byte limit. The byte limit is less than item 956 // size, so each lookup should only return one item 957 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 40, true, false) 958 if err != nil { 959 t.Fatal(err) 960 } 961 items, err := f.RetrieveItems(0, 10000, 10) 962 if err != nil { 963 t.Fatal(err) 964 } 965 if have, want := len(items), 1; have != want { 966 t.Fatalf("want %d items, have %d ", want, have) 967 } 968 for i, have := range items { 969 want := getChunk(15, i) 970 if !bytes.Equal(want, have) { 971 t.Fatalf("data corruption: have\n%x\n, want \n%x\n", have, want) 972 } 973 } 974 f.Close() 975 } 976 } 977 978 // TestSequentialReadByteLimit does some more advanced tests on batch reads. 979 // These tests check that when the byte limit hits, we correctly abort in time, 980 // but also properly do all the deferred reads for the previous data, regardless 981 // of whether the data crosses a file boundary or not. 982 func TestSequentialReadByteLimit(t *testing.T) { 983 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 984 fname := fmt.Sprintf("batchread-2-%d", rand.Uint64()) 985 { // Fill table 986 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 987 if err != nil { 988 t.Fatal(err) 989 } 990 // Write 10 bytes 30 times, 991 // Splitting it at every 100 bytes (10 items) 992 writeChunks(t, f, 30, 10) 993 f.Close() 994 } 995 for i, tc := range []struct { 996 items uint64 997 limit uint64 998 want int 999 }{ 1000 {9, 89, 8}, 1001 {10, 99, 9}, 1002 {11, 109, 10}, 1003 {100, 89, 8}, 1004 {100, 99, 9}, 1005 {100, 109, 10}, 1006 } { 1007 { 1008 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 1009 if err != nil { 1010 t.Fatal(err) 1011 } 1012 items, err := f.RetrieveItems(0, tc.items, tc.limit) 1013 if err != nil { 1014 t.Fatal(err) 1015 } 1016 if have, want := len(items), tc.want; have != want { 1017 t.Fatalf("test %d: want %d items, have %d ", i, want, have) 1018 } 1019 for ii, have := range items { 1020 want := getChunk(10, ii) 1021 if !bytes.Equal(want, have) { 1022 t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) 1023 } 1024 } 1025 f.Close() 1026 } 1027 } 1028 } 1029 1030 // TestSequentialReadNoByteLimit tests the batch-read if maxBytes is not specified. 1031 // Freezer should return the requested items regardless the size limitation. 1032 func TestSequentialReadNoByteLimit(t *testing.T) { 1033 rm, wm, sg := metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge() 1034 fname := fmt.Sprintf("batchread-3-%d", rand.Uint64()) 1035 { // Fill table 1036 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 1037 if err != nil { 1038 t.Fatal(err) 1039 } 1040 // Write 10 bytes 30 times, 1041 // Splitting it at every 100 bytes (10 items) 1042 writeChunks(t, f, 30, 10) 1043 f.Close() 1044 } 1045 for i, tc := range []struct { 1046 items uint64 1047 want int 1048 }{ 1049 {1, 1}, 1050 {30, 30}, 1051 {31, 30}, 1052 } { 1053 { 1054 f, err := newTable(os.TempDir(), fname, rm, wm, sg, 100, true, false) 1055 if err != nil { 1056 t.Fatal(err) 1057 } 1058 items, err := f.RetrieveItems(0, tc.items, 0) 1059 if err != nil { 1060 t.Fatal(err) 1061 } 1062 if have, want := len(items), tc.want; have != want { 1063 t.Fatalf("test %d: want %d items, have %d ", i, want, have) 1064 } 1065 for ii, have := range items { 1066 want := getChunk(10, ii) 1067 if !bytes.Equal(want, have) { 1068 t.Fatalf("test %d: data corruption item %d: have\n%x\n, want \n%x\n", i, ii, have, want) 1069 } 1070 } 1071 f.Close() 1072 } 1073 } 1074 } 1075 1076 func TestFreezerReadonly(t *testing.T) { 1077 tmpdir := os.TempDir() 1078 // Case 1: Check it fails on non-existent file. 1079 _, err := newTable(tmpdir, 1080 fmt.Sprintf("readonlytest-%d", rand.Uint64()), 1081 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1082 if err == nil { 1083 t.Fatal("readonly table instantiation should fail for non-existent table") 1084 } 1085 1086 // Case 2: Check that it fails on invalid index length. 1087 fname := fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1088 idxFile, err := openFreezerFileForAppend(filepath.Join(tmpdir, fmt.Sprintf("%s.ridx", fname))) 1089 if err != nil { 1090 t.Errorf("Failed to open index file: %v\n", err) 1091 } 1092 // size should not be a multiple of indexEntrySize. 1093 idxFile.Write(make([]byte, 17)) 1094 idxFile.Close() 1095 _, err = newTable(tmpdir, fname, 1096 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1097 if err == nil { 1098 t.Errorf("readonly table instantiation should fail for invalid index size") 1099 } 1100 1101 // Case 3: Open table non-readonly table to write some data. 1102 // Then corrupt the head file and make sure opening the table 1103 // again in readonly triggers an error. 1104 fname = fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1105 f, err := newTable(tmpdir, fname, 1106 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1107 if err != nil { 1108 t.Fatalf("failed to instantiate table: %v", err) 1109 } 1110 writeChunks(t, f, 8, 32) 1111 // Corrupt table file 1112 if _, err := f.head.Write([]byte{1, 1}); err != nil { 1113 t.Fatal(err) 1114 } 1115 if err := f.Close(); err != nil { 1116 t.Fatal(err) 1117 } 1118 _, err = newTable(tmpdir, fname, 1119 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1120 if err == nil { 1121 t.Errorf("readonly table instantiation should fail for corrupt table file") 1122 } 1123 1124 // Case 4: Write some data to a table and later re-open it as readonly. 1125 // Should be successful. 1126 fname = fmt.Sprintf("readonlytest-%d", rand.Uint64()) 1127 f, err = newTable(tmpdir, fname, 1128 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1129 if err != nil { 1130 t.Fatalf("failed to instantiate table: %v\n", err) 1131 } 1132 writeChunks(t, f, 32, 128) 1133 if err := f.Close(); err != nil { 1134 t.Fatal(err) 1135 } 1136 f, err = newTable(tmpdir, fname, 1137 metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, true) 1138 if err != nil { 1139 t.Fatal(err) 1140 } 1141 v, err := f.Retrieve(10) 1142 if err != nil { 1143 t.Fatal(err) 1144 } 1145 exp := getChunk(128, 10) 1146 if !bytes.Equal(v, exp) { 1147 t.Errorf("retrieved value is incorrect") 1148 } 1149 1150 // Case 5: Now write some data via a batch. 1151 // This should fail either during AppendRaw or Commit 1152 batch := f.newBatch() 1153 writeErr := batch.AppendRaw(32, make([]byte, 1)) 1154 if writeErr == nil { 1155 writeErr = batch.commit() 1156 } 1157 if writeErr == nil { 1158 t.Fatalf("Writing to readonly table should fail") 1159 } 1160 } 1161 1162 // randTest performs random freezer table operations. 1163 // Instances of this test are created by Generate. 1164 type randTest []randTestStep 1165 1166 type randTestStep struct { 1167 op int 1168 items []uint64 // for append and retrieve 1169 blobs [][]byte // for append 1170 target uint64 // for truncate(head/tail) 1171 err error // for debugging 1172 } 1173 1174 const ( 1175 opReload = iota 1176 opAppend 1177 opRetrieve 1178 opTruncateHead 1179 opTruncateHeadAll 1180 opTruncateTail 1181 opTruncateTailAll 1182 opCheckAll 1183 opMax // boundary value, not an actual op 1184 ) 1185 1186 func getVals(first uint64, n int) [][]byte { 1187 var ret [][]byte 1188 for i := 0; i < n; i++ { 1189 val := make([]byte, 8) 1190 binary.BigEndian.PutUint64(val, first+uint64(i)) 1191 ret = append(ret, val) 1192 } 1193 return ret 1194 } 1195 1196 func (randTest) Generate(r *rand.Rand, size int) reflect.Value { 1197 var ( 1198 deleted uint64 // The number of deleted items from tail 1199 items []uint64 // The index of entries in table 1200 1201 // getItems retrieves the indexes for items in table. 1202 getItems = func(n int) []uint64 { 1203 length := len(items) 1204 if length == 0 { 1205 return nil 1206 } 1207 var ret []uint64 1208 index := rand.Intn(length) 1209 for i := index; len(ret) < n && i < length; i++ { 1210 ret = append(ret, items[i]) 1211 } 1212 return ret 1213 } 1214 1215 // addItems appends the given length items into the table. 1216 addItems = func(n int) []uint64 { 1217 var first = deleted 1218 if len(items) != 0 { 1219 first = items[len(items)-1] + 1 1220 } 1221 var ret []uint64 1222 for i := 0; i < n; i++ { 1223 ret = append(ret, first+uint64(i)) 1224 } 1225 items = append(items, ret...) 1226 return ret 1227 } 1228 ) 1229 1230 var steps randTest 1231 for i := 0; i < size; i++ { 1232 step := randTestStep{op: r.Intn(opMax)} 1233 switch step.op { 1234 case opReload, opCheckAll: 1235 case opAppend: 1236 num := r.Intn(3) 1237 step.items = addItems(num) 1238 if len(step.items) == 0 { 1239 step.blobs = nil 1240 } else { 1241 step.blobs = getVals(step.items[0], num) 1242 } 1243 case opRetrieve: 1244 step.items = getItems(r.Intn(3)) 1245 case opTruncateHead: 1246 if len(items) == 0 { 1247 step.target = deleted 1248 } else { 1249 index := r.Intn(len(items)) 1250 items = items[:index] 1251 step.target = deleted + uint64(index) 1252 } 1253 case opTruncateHeadAll: 1254 step.target = deleted 1255 items = items[:0] 1256 case opTruncateTail: 1257 if len(items) == 0 { 1258 step.target = deleted 1259 } else { 1260 index := r.Intn(len(items)) 1261 items = items[index:] 1262 deleted += uint64(index) 1263 step.target = deleted 1264 } 1265 case opTruncateTailAll: 1266 step.target = deleted + uint64(len(items)) 1267 items = items[:0] 1268 deleted = step.target 1269 } 1270 steps = append(steps, step) 1271 } 1272 return reflect.ValueOf(steps) 1273 } 1274 1275 func runRandTest(rt randTest) bool { 1276 fname := fmt.Sprintf("randtest-%d", rand.Uint64()) 1277 f, err := newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1278 if err != nil { 1279 panic("failed to initialize table") 1280 } 1281 var values [][]byte 1282 for i, step := range rt { 1283 switch step.op { 1284 case opReload: 1285 f.Close() 1286 f, err = newTable(os.TempDir(), fname, metrics.NewMeter(), metrics.NewMeter(), metrics.NewGauge(), 50, true, false) 1287 if err != nil { 1288 rt[i].err = fmt.Errorf("failed to reload table %v", err) 1289 } 1290 case opCheckAll: 1291 tail := f.itemHidden.Load() 1292 head := f.items.Load() 1293 1294 if tail == head { 1295 continue 1296 } 1297 got, err := f.RetrieveItems(f.itemHidden.Load(), head-tail, 100000) 1298 if err != nil { 1299 rt[i].err = err 1300 } else { 1301 if !reflect.DeepEqual(got, values) { 1302 rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v", got, values) 1303 } 1304 } 1305 1306 case opAppend: 1307 batch := f.newBatch() 1308 for i := 0; i < len(step.items); i++ { 1309 batch.AppendRaw(step.items[i], step.blobs[i]) 1310 } 1311 batch.commit() 1312 values = append(values, step.blobs...) 1313 1314 case opRetrieve: 1315 var blobs [][]byte 1316 if len(step.items) == 0 { 1317 continue 1318 } 1319 tail := f.itemHidden.Load() 1320 for i := 0; i < len(step.items); i++ { 1321 blobs = append(blobs, values[step.items[i]-tail]) 1322 } 1323 got, err := f.RetrieveItems(step.items[0], uint64(len(step.items)), 100000) 1324 if err != nil { 1325 rt[i].err = err 1326 } else { 1327 if !reflect.DeepEqual(got, blobs) { 1328 rt[i].err = fmt.Errorf("mismatch on retrieved values %v %v %v", got, blobs, step.items) 1329 } 1330 } 1331 1332 case opTruncateHead: 1333 f.truncateHead(step.target) 1334 1335 length := f.items.Load() - f.itemHidden.Load() 1336 values = values[:length] 1337 1338 case opTruncateHeadAll: 1339 f.truncateHead(step.target) 1340 values = nil 1341 1342 case opTruncateTail: 1343 prev := f.itemHidden.Load() 1344 f.truncateTail(step.target) 1345 1346 truncated := f.itemHidden.Load() - prev 1347 values = values[truncated:] 1348 1349 case opTruncateTailAll: 1350 f.truncateTail(step.target) 1351 values = nil 1352 } 1353 // Abort the test on error. 1354 if rt[i].err != nil { 1355 return false 1356 } 1357 } 1358 f.Close() 1359 return true 1360 } 1361 1362 func TestRandom(t *testing.T) { 1363 if err := quick.Check(runRandTest, nil); err != nil { 1364 if cerr, ok := err.(*quick.CheckError); ok { 1365 t.Fatalf("random test iteration %d failed: %s", cerr.Count, spew.Sdump(cerr.In)) 1366 } 1367 t.Fatal(err) 1368 } 1369 }