github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/common/ledger/blkstorage/blockfile_mgr_test.go (about) 1 /* 2 Copyright IBM Corp. 2016 All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package blkstorage 8 9 import ( 10 "fmt" 11 "io/ioutil" 12 "os" 13 "testing" 14 15 "github.com/golang/protobuf/proto" 16 "github.com/hyperledger/fabric-protos-go/common" 17 "github.com/hyperledger/fabric-protos-go/peer" 18 "github.com/osdi23p228/fabric/common/ledger/testutil" 19 "github.com/osdi23p228/fabric/internal/pkg/txflags" 20 "github.com/osdi23p228/fabric/protoutil" 21 "github.com/stretchr/testify/require" 22 ) 23 24 func TestBlockfileMgrBlockReadWrite(t *testing.T) { 25 env := newTestEnv(t, NewConf(testPath(), 0)) 26 defer env.Cleanup() 27 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 28 defer blkfileMgrWrapper.close() 29 blocks := testutil.ConstructTestBlocks(t, 10) 30 blkfileMgrWrapper.addBlocks(blocks) 31 blkfileMgrWrapper.testGetBlockByHash(blocks, nil) 32 blkfileMgrWrapper.testGetBlockByNumber(blocks, 0, nil) 33 } 34 35 func TestAddBlockWithWrongHash(t *testing.T) { 36 env := newTestEnv(t, NewConf(testPath(), 0)) 37 defer env.Cleanup() 38 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 39 defer blkfileMgrWrapper.close() 40 blocks := testutil.ConstructTestBlocks(t, 10) 41 blkfileMgrWrapper.addBlocks(blocks[0:9]) 42 lastBlock := blocks[9] 43 lastBlock.Header.PreviousHash = []byte("someJunkHash") // set the hash to something unexpected 44 err := blkfileMgrWrapper.blockfileMgr.addBlock(lastBlock) 45 require.Error(t, err, "An error is expected when adding a block with some unexpected hash") 46 require.Contains(t, err.Error(), "unexpected Previous block hash. Expected PreviousHash") 47 t.Logf("err = %s", err) 48 } 49 50 func TestBlockfileMgrCrashDuringWriting(t *testing.T) { 51 testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 10, false) 52 testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 1, false) 53 testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 0, false) 54 testBlockfileMgrCrashDuringWriting(t, 0, 0, 1000, 10, false) 55 testBlockfileMgrCrashDuringWriting(t, 0, 5, 1000, 10, false) 56 57 testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 10, true) 58 testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 1, true) 59 testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 0, true) 60 testBlockfileMgrCrashDuringWriting(t, 0, 0, 1000, 10, true) 61 testBlockfileMgrCrashDuringWriting(t, 0, 5, 1000, 10, true) 62 } 63 64 func testBlockfileMgrCrashDuringWriting(t *testing.T, numBlksBeforeSavingBlkfilesInfo int, 65 numBlksAfterSavingBlkfilesInfo int, numLastBlockBytes int, numPartialBytesToWrite int, 66 deleteBFInfo bool) { 67 env := newTestEnv(t, NewConf(testPath(), 0)) 68 defer env.Cleanup() 69 ledgerid := "testLedger" 70 blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid) 71 bg, gb := testutil.NewBlockGenerator(t, ledgerid, false) 72 73 // create all necessary blocks 74 totalBlocks := numBlksBeforeSavingBlkfilesInfo + numBlksAfterSavingBlkfilesInfo 75 allBlocks := []*common.Block{gb} 76 allBlocks = append(allBlocks, bg.NextTestBlocks(totalBlocks+1)...) 77 78 // identify the blocks that are to be added beforeCP, afterCP, and after restart 79 blocksBeforeSavingBlkfilesInfo := []*common.Block{} 80 blocksAfterSavingBlkfilesInfo := []*common.Block{} 81 if numBlksBeforeSavingBlkfilesInfo != 0 { 82 blocksBeforeSavingBlkfilesInfo = allBlocks[0:numBlksBeforeSavingBlkfilesInfo] 83 } 84 if numBlksAfterSavingBlkfilesInfo != 0 { 85 blocksAfterSavingBlkfilesInfo = allBlocks[numBlksBeforeSavingBlkfilesInfo : numBlksBeforeSavingBlkfilesInfo+numBlksAfterSavingBlkfilesInfo] 86 } 87 blocksAfterRestart := allBlocks[numBlksBeforeSavingBlkfilesInfo+numBlksAfterSavingBlkfilesInfo:] 88 89 // add blocks before cp 90 blkfileMgrWrapper.addBlocks(blocksBeforeSavingBlkfilesInfo) 91 currentBlkfilesInfo := blkfileMgrWrapper.blockfileMgr.blockfilesInfo 92 blkfilesInfo1 := &blockfilesInfo{ 93 latestFileNumber: currentBlkfilesInfo.latestFileNumber, 94 latestFileSize: currentBlkfilesInfo.latestFileSize, 95 noBlockFiles: currentBlkfilesInfo.noBlockFiles, 96 lastPersistedBlock: currentBlkfilesInfo.lastPersistedBlock, 97 } 98 99 // add blocks after cp 100 blkfileMgrWrapper.addBlocks(blocksAfterSavingBlkfilesInfo) 101 blkfilesInfo2 := blkfileMgrWrapper.blockfileMgr.blockfilesInfo 102 103 // simulate a crash scenario 104 lastBlockBytes := []byte{} 105 encodedLen := proto.EncodeVarint(uint64(numLastBlockBytes)) 106 randomBytes := testutil.ConstructRandomBytes(t, numLastBlockBytes) 107 lastBlockBytes = append(lastBlockBytes, encodedLen...) 108 lastBlockBytes = append(lastBlockBytes, randomBytes...) 109 partialBytes := lastBlockBytes[:numPartialBytesToWrite] 110 blkfileMgrWrapper.blockfileMgr.currentFileWriter.append(partialBytes, true) 111 if deleteBFInfo { 112 err := blkfileMgrWrapper.blockfileMgr.db.Delete(blkMgrInfoKey, true) 113 require.NoError(t, err) 114 } else { 115 blkfileMgrWrapper.blockfileMgr.saveBlkfilesInfo(blkfilesInfo1, true) 116 } 117 blkfileMgrWrapper.close() 118 119 // simulate a start after a crash 120 blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid) 121 defer blkfileMgrWrapper.close() 122 blkfilesInfo3 := blkfileMgrWrapper.blockfileMgr.blockfilesInfo 123 require.Equal(t, blkfilesInfo2, blkfilesInfo3) 124 125 // add fresh blocks after restart 126 blkfileMgrWrapper.addBlocks(blocksAfterRestart) 127 testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 0, len(allBlocks)-1, allBlocks) 128 } 129 130 func TestBlockfileMgrBlockIterator(t *testing.T) { 131 env := newTestEnv(t, NewConf(testPath(), 0)) 132 defer env.Cleanup() 133 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 134 defer blkfileMgrWrapper.close() 135 blocks := testutil.ConstructTestBlocks(t, 10) 136 blkfileMgrWrapper.addBlocks(blocks) 137 testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 0, 7, blocks[0:8]) 138 } 139 140 func testBlockfileMgrBlockIterator(t *testing.T, blockfileMgr *blockfileMgr, 141 firstBlockNum int, lastBlockNum int, expectedBlocks []*common.Block) { 142 itr, err := blockfileMgr.retrieveBlocks(uint64(firstBlockNum)) 143 require.NoError(t, err, "Error while getting blocks iterator") 144 defer itr.Close() 145 numBlocksItrated := 0 146 for { 147 block, err := itr.Next() 148 require.NoError(t, err, "Error while getting block number [%d] from iterator", numBlocksItrated) 149 require.Equal(t, expectedBlocks[numBlocksItrated], block) 150 numBlocksItrated++ 151 if numBlocksItrated == lastBlockNum-firstBlockNum+1 { 152 break 153 } 154 } 155 require.Equal(t, lastBlockNum-firstBlockNum+1, numBlocksItrated) 156 } 157 158 func TestBlockfileMgrBlockchainInfo(t *testing.T) { 159 env := newTestEnv(t, NewConf(testPath(), 0)) 160 defer env.Cleanup() 161 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 162 defer blkfileMgrWrapper.close() 163 164 bcInfo := blkfileMgrWrapper.blockfileMgr.getBlockchainInfo() 165 require.Equal(t, &common.BlockchainInfo{Height: 0, CurrentBlockHash: nil, PreviousBlockHash: nil}, bcInfo) 166 167 blocks := testutil.ConstructTestBlocks(t, 10) 168 blkfileMgrWrapper.addBlocks(blocks) 169 bcInfo = blkfileMgrWrapper.blockfileMgr.getBlockchainInfo() 170 require.Equal(t, uint64(10), bcInfo.Height) 171 } 172 173 func TestBlockfileMgrGetTxById(t *testing.T) { 174 env := newTestEnv(t, NewConf(testPath(), 0)) 175 defer env.Cleanup() 176 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 177 defer blkfileMgrWrapper.close() 178 blocks := testutil.ConstructTestBlocks(t, 2) 179 blkfileMgrWrapper.addBlocks(blocks) 180 for _, blk := range blocks { 181 for j, txEnvelopeBytes := range blk.Data.Data { 182 // blockNum starts with 0 183 txID, err := protoutil.GetOrComputeTxIDFromEnvelope(blk.Data.Data[j]) 184 require.NoError(t, err) 185 txEnvelopeFromFileMgr, err := blkfileMgrWrapper.blockfileMgr.retrieveTransactionByID(txID) 186 require.NoError(t, err, "Error while retrieving tx from blkfileMgr") 187 txEnvelope, err := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes) 188 require.NoError(t, err, "Error while unmarshalling tx") 189 require.Equal(t, txEnvelope, txEnvelopeFromFileMgr) 190 } 191 } 192 } 193 194 // TestBlockfileMgrGetTxByIdDuplicateTxid tests that a transaction with an existing txid 195 // (within same block or a different block) should not over-write the index by-txid (FAB-8557) 196 func TestBlockfileMgrGetTxByIdDuplicateTxid(t *testing.T) { 197 env := newTestEnv(t, NewConf(testPath(), 0)) 198 defer env.Cleanup() 199 blkStore, err := env.provider.Open("testLedger") 200 require.NoError(env.t, err) 201 blkFileMgr := blkStore.fileMgr 202 bg, gb := testutil.NewBlockGenerator(t, "testLedger", false) 203 require.NoError(t, blkFileMgr.addBlock(gb)) 204 205 block1 := bg.NextBlockWithTxid( 206 [][]byte{ 207 []byte("tx with id=txid-1"), 208 []byte("tx with id=txid-2"), 209 []byte("another tx with existing id=txid-1"), 210 }, 211 []string{"txid-1", "txid-2", "txid-1"}, 212 ) 213 txValidationFlags := txflags.New(3) 214 txValidationFlags.SetFlag(0, peer.TxValidationCode_VALID) 215 txValidationFlags.SetFlag(1, peer.TxValidationCode_INVALID_OTHER_REASON) 216 txValidationFlags.SetFlag(2, peer.TxValidationCode_DUPLICATE_TXID) 217 block1.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txValidationFlags 218 require.NoError(t, blkFileMgr.addBlock(block1)) 219 220 block2 := bg.NextBlockWithTxid( 221 [][]byte{ 222 []byte("tx with id=txid-3"), 223 []byte("yet another tx with existing id=txid-1"), 224 }, 225 []string{"txid-3", "txid-1"}, 226 ) 227 txValidationFlags = txflags.New(2) 228 txValidationFlags.SetFlag(0, peer.TxValidationCode_VALID) 229 txValidationFlags.SetFlag(1, peer.TxValidationCode_DUPLICATE_TXID) 230 block2.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txValidationFlags 231 require.NoError(t, blkFileMgr.addBlock(block2)) 232 233 txenvp1, err := protoutil.GetEnvelopeFromBlock(block1.Data.Data[0]) 234 require.NoError(t, err) 235 txenvp2, err := protoutil.GetEnvelopeFromBlock(block1.Data.Data[1]) 236 require.NoError(t, err) 237 txenvp3, err := protoutil.GetEnvelopeFromBlock(block2.Data.Data[0]) 238 require.NoError(t, err) 239 240 indexedTxenvp, _ := blkFileMgr.retrieveTransactionByID("txid-1") 241 require.Equal(t, txenvp1, indexedTxenvp) 242 indexedTxenvp, _ = blkFileMgr.retrieveTransactionByID("txid-2") 243 require.Equal(t, txenvp2, indexedTxenvp) 244 indexedTxenvp, _ = blkFileMgr.retrieveTransactionByID("txid-3") 245 require.Equal(t, txenvp3, indexedTxenvp) 246 247 blk, _ := blkFileMgr.retrieveBlockByTxID("txid-1") 248 require.Equal(t, block1, blk) 249 blk, _ = blkFileMgr.retrieveBlockByTxID("txid-2") 250 require.Equal(t, block1, blk) 251 blk, _ = blkFileMgr.retrieveBlockByTxID("txid-3") 252 require.Equal(t, block2, blk) 253 254 validationCode, _ := blkFileMgr.retrieveTxValidationCodeByTxID("txid-1") 255 require.Equal(t, peer.TxValidationCode_VALID, validationCode) 256 validationCode, _ = blkFileMgr.retrieveTxValidationCodeByTxID("txid-2") 257 require.Equal(t, peer.TxValidationCode_INVALID_OTHER_REASON, validationCode) 258 validationCode, _ = blkFileMgr.retrieveTxValidationCodeByTxID("txid-3") 259 require.Equal(t, peer.TxValidationCode_VALID, validationCode) 260 261 // though we do not expose an API for retrieving all the txs by same id but we may in future 262 // and the data is persisted to support this. below code tests this behavior internally 263 w := &testBlockfileMgrWrapper{ 264 t: t, 265 blockfileMgr: blkFileMgr, 266 } 267 w.testGetMultipleDataByTxID( 268 "txid-1", 269 []*expectedBlkTxValidationCode{ 270 { 271 blk: block1, 272 txEnv: protoutil.ExtractEnvelopeOrPanic(block1, 0), 273 validationCode: peer.TxValidationCode_VALID, 274 }, 275 { 276 blk: block1, 277 txEnv: protoutil.ExtractEnvelopeOrPanic(block1, 2), 278 validationCode: peer.TxValidationCode_DUPLICATE_TXID, 279 }, 280 { 281 blk: block2, 282 txEnv: protoutil.ExtractEnvelopeOrPanic(block2, 1), 283 validationCode: peer.TxValidationCode_DUPLICATE_TXID, 284 }, 285 }, 286 ) 287 288 w.testGetMultipleDataByTxID( 289 "txid-2", 290 []*expectedBlkTxValidationCode{ 291 { 292 blk: block1, 293 txEnv: protoutil.ExtractEnvelopeOrPanic(block1, 1), 294 validationCode: peer.TxValidationCode_INVALID_OTHER_REASON, 295 }, 296 }, 297 ) 298 299 w.testGetMultipleDataByTxID( 300 "txid-3", 301 []*expectedBlkTxValidationCode{ 302 { 303 blk: block2, 304 txEnv: protoutil.ExtractEnvelopeOrPanic(block2, 0), 305 validationCode: peer.TxValidationCode_VALID, 306 }, 307 }, 308 ) 309 } 310 311 func TestBlockfileMgrGetTxByBlockNumTranNum(t *testing.T) { 312 env := newTestEnv(t, NewConf(testPath(), 0)) 313 defer env.Cleanup() 314 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 315 defer blkfileMgrWrapper.close() 316 blocks := testutil.ConstructTestBlocks(t, 10) 317 blkfileMgrWrapper.addBlocks(blocks) 318 for blockIndex, blk := range blocks { 319 for tranIndex, txEnvelopeBytes := range blk.Data.Data { 320 // blockNum and tranNum both start with 0 321 txEnvelopeFromFileMgr, err := blkfileMgrWrapper.blockfileMgr.retrieveTransactionByBlockNumTranNum(uint64(blockIndex), uint64(tranIndex)) 322 require.NoError(t, err, "Error while retrieving tx from blkfileMgr") 323 txEnvelope, err := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes) 324 require.NoError(t, err, "Error while unmarshalling tx") 325 require.Equal(t, txEnvelope, txEnvelopeFromFileMgr) 326 } 327 } 328 } 329 330 func TestBlockfileMgrRestart(t *testing.T) { 331 env := newTestEnv(t, NewConf(testPath(), 0)) 332 defer env.Cleanup() 333 ledgerid := "testLedger" 334 blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid) 335 blocks := testutil.ConstructTestBlocks(t, 10) 336 blkfileMgrWrapper.addBlocks(blocks) 337 expectedHeight := uint64(10) 338 require.Equal(t, expectedHeight, blkfileMgrWrapper.blockfileMgr.getBlockchainInfo().Height) 339 blkfileMgrWrapper.close() 340 341 blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid) 342 defer blkfileMgrWrapper.close() 343 require.Equal(t, 9, int(blkfileMgrWrapper.blockfileMgr.blockfilesInfo.lastPersistedBlock)) 344 blkfileMgrWrapper.testGetBlockByHash(blocks, nil) 345 require.Equal(t, expectedHeight, blkfileMgrWrapper.blockfileMgr.getBlockchainInfo().Height) 346 } 347 348 func TestBlockfileMgrFileRolling(t *testing.T) { 349 blocks := testutil.ConstructTestBlocks(t, 200) 350 size := 0 351 for _, block := range blocks[:100] { 352 by, _, err := serializeBlock(block) 353 require.NoError(t, err, "Error while serializing block") 354 blockBytesSize := len(by) 355 encodedLen := proto.EncodeVarint(uint64(blockBytesSize)) 356 size += blockBytesSize + len(encodedLen) 357 } 358 359 maxFileSie := int(0.75 * float64(size)) 360 env := newTestEnv(t, NewConf(testPath(), maxFileSie)) 361 defer env.Cleanup() 362 ledgerid := "testLedger" 363 blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid) 364 blkfileMgrWrapper.addBlocks(blocks[:100]) 365 require.Equal(t, 1, blkfileMgrWrapper.blockfileMgr.blockfilesInfo.latestFileNumber) 366 blkfileMgrWrapper.testGetBlockByHash(blocks[:100], nil) 367 blkfileMgrWrapper.close() 368 369 blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid) 370 defer blkfileMgrWrapper.close() 371 blkfileMgrWrapper.addBlocks(blocks[100:]) 372 require.Equal(t, 2, blkfileMgrWrapper.blockfileMgr.blockfilesInfo.latestFileNumber) 373 blkfileMgrWrapper.testGetBlockByHash(blocks[100:], nil) 374 } 375 376 func TestBlockfileMgrGetBlockByTxID(t *testing.T) { 377 env := newTestEnv(t, NewConf(testPath(), 0)) 378 defer env.Cleanup() 379 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 380 defer blkfileMgrWrapper.close() 381 blocks := testutil.ConstructTestBlocks(t, 10) 382 blkfileMgrWrapper.addBlocks(blocks) 383 for _, blk := range blocks { 384 for j := range blk.Data.Data { 385 // blockNum starts with 1 386 txID, err := protoutil.GetOrComputeTxIDFromEnvelope(blk.Data.Data[j]) 387 require.NoError(t, err) 388 389 blockFromFileMgr, err := blkfileMgrWrapper.blockfileMgr.retrieveBlockByTxID(txID) 390 require.NoError(t, err, "Error while retrieving block from blkfileMgr") 391 require.Equal(t, blk, blockFromFileMgr) 392 } 393 } 394 } 395 396 func TestBlockfileMgrSimulateCrashAtFirstBlockInFile(t *testing.T) { 397 t.Run("blockfilesInfo persisted", func(t *testing.T) { 398 testBlockfileMgrSimulateCrashAtFirstBlockInFile(t, false) 399 }) 400 401 t.Run("blockfilesInfo to be computed from block files", func(t *testing.T) { 402 testBlockfileMgrSimulateCrashAtFirstBlockInFile(t, true) 403 }) 404 } 405 406 func testBlockfileMgrSimulateCrashAtFirstBlockInFile(t *testing.T, deleteBlkfilesInfo bool) { 407 // open blockfileMgr and add 5 blocks 408 env := newTestEnv(t, NewConf(testPath(), 0)) 409 defer env.Cleanup() 410 411 blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger") 412 blockfileMgr := blkfileMgrWrapper.blockfileMgr 413 blocks := testutil.ConstructTestBlocks(t, 10) 414 for i := 0; i < 10; i++ { 415 fmt.Printf("blocks[i].Header.Number = %d\n", blocks[i].Header.Number) 416 } 417 blkfileMgrWrapper.addBlocks(blocks[:5]) 418 firstFilePath := blockfileMgr.currentFileWriter.filePath 419 firstBlkFileSize := testutilGetFileSize(t, firstFilePath) 420 421 // move to next file and simulate crash scenario while writing the first block 422 blockfileMgr.moveToNextFile() 423 partialBytesForNextBlock := append( 424 proto.EncodeVarint(uint64(10000)), 425 []byte("partialBytesForNextBlock depicting a crash during first block in file")..., 426 ) 427 blockfileMgr.currentFileWriter.append(partialBytesForNextBlock, true) 428 if deleteBlkfilesInfo { 429 err := blockfileMgr.db.Delete(blkMgrInfoKey, true) 430 require.NoError(t, err) 431 } 432 blkfileMgrWrapper.close() 433 434 // verify that the block file number 1 has been created with partial bytes as a side-effect of crash 435 lastFilePath := blockfileMgr.currentFileWriter.filePath 436 lastFileContent, err := ioutil.ReadFile(lastFilePath) 437 require.NoError(t, err) 438 require.Equal(t, lastFileContent, partialBytesForNextBlock) 439 440 // simulate reopen after crash 441 blkfileMgrWrapper = newTestBlockfileWrapper(env, "testLedger") 442 defer blkfileMgrWrapper.close() 443 444 // last block file (block file number 1) should have been truncated to zero length and concluded as the next file to append to 445 require.Equal(t, 0, testutilGetFileSize(t, lastFilePath)) 446 require.Equal(t, 447 &blockfilesInfo{ 448 latestFileNumber: 1, 449 latestFileSize: 0, 450 lastPersistedBlock: 4, 451 noBlockFiles: false, 452 }, 453 blkfileMgrWrapper.blockfileMgr.blockfilesInfo, 454 ) 455 456 // Add 5 more blocks and assert that they are added to last file (block file number 1) and full scanning across two files works as expected 457 blkfileMgrWrapper.addBlocks(blocks[5:]) 458 require.True(t, testutilGetFileSize(t, lastFilePath) > 0) 459 require.Equal(t, firstBlkFileSize, testutilGetFileSize(t, firstFilePath)) 460 blkfileMgrWrapper.testGetBlockByNumber(blocks, 0, nil) 461 testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 0, len(blocks)-1, blocks) 462 } 463 464 func testutilGetFileSize(t *testing.T, path string) int { 465 fi, err := os.Stat(path) 466 require.NoError(t, err) 467 return int(fi.Size()) 468 }