github.com/sykesm/fabric@v1.1.0-preview.0.20200129034918-2aa12b1a0181/common/ledger/blkstorage/fsblkstorage/blockfile_mgr_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package fsblkstorage
     8  
     9  import (
    10  	"fmt"
    11  	"io/ioutil"
    12  	"os"
    13  	"testing"
    14  
    15  	"github.com/golang/protobuf/proto"
    16  	"github.com/hyperledger/fabric-protos-go/common"
    17  	"github.com/hyperledger/fabric-protos-go/peer"
    18  	"github.com/hyperledger/fabric/common/ledger/testutil"
    19  	ledgerutil "github.com/hyperledger/fabric/core/ledger/util"
    20  	"github.com/hyperledger/fabric/protoutil"
    21  	"github.com/stretchr/testify/assert"
    22  )
    23  
    24  func TestBlockfileMgrBlockReadWrite(t *testing.T) {
    25  	env := newTestEnv(t, NewConf(testPath(), 0))
    26  	defer env.Cleanup()
    27  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
    28  	defer blkfileMgrWrapper.close()
    29  	blocks := testutil.ConstructTestBlocks(t, 10)
    30  	blkfileMgrWrapper.addBlocks(blocks)
    31  	blkfileMgrWrapper.testGetBlockByHash(blocks, nil)
    32  	blkfileMgrWrapper.testGetBlockByNumber(blocks, 0, nil)
    33  }
    34  
    35  func TestAddBlockWithWrongHash(t *testing.T) {
    36  	env := newTestEnv(t, NewConf(testPath(), 0))
    37  	defer env.Cleanup()
    38  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
    39  	defer blkfileMgrWrapper.close()
    40  	blocks := testutil.ConstructTestBlocks(t, 10)
    41  	blkfileMgrWrapper.addBlocks(blocks[0:9])
    42  	lastBlock := blocks[9]
    43  	lastBlock.Header.PreviousHash = []byte("someJunkHash") // set the hash to something unexpected
    44  	err := blkfileMgrWrapper.blockfileMgr.addBlock(lastBlock)
    45  	assert.Error(t, err, "An error is expected when adding a block with some unexpected hash")
    46  	assert.Contains(t, err.Error(), "unexpected Previous block hash. Expected PreviousHash")
    47  	t.Logf("err = %s", err)
    48  }
    49  
    50  func TestBlockfileMgrCrashDuringWriting(t *testing.T) {
    51  	testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 10, false)
    52  	testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 1, false)
    53  	testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 0, false)
    54  	testBlockfileMgrCrashDuringWriting(t, 0, 0, 1000, 10, false)
    55  	testBlockfileMgrCrashDuringWriting(t, 0, 5, 1000, 10, false)
    56  
    57  	testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 10, true)
    58  	testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 1, true)
    59  	testBlockfileMgrCrashDuringWriting(t, 10, 2, 1000, 0, true)
    60  	testBlockfileMgrCrashDuringWriting(t, 0, 0, 1000, 10, true)
    61  	testBlockfileMgrCrashDuringWriting(t, 0, 5, 1000, 10, true)
    62  }
    63  
    64  func testBlockfileMgrCrashDuringWriting(t *testing.T, numBlocksBeforeCheckpoint int,
    65  	numBlocksAfterCheckpoint int, numLastBlockBytes int, numPartialBytesToWrite int,
    66  	deleteCPInfo bool) {
    67  	env := newTestEnv(t, NewConf(testPath(), 0))
    68  	defer env.Cleanup()
    69  	ledgerid := "testLedger"
    70  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
    71  	bg, gb := testutil.NewBlockGenerator(t, ledgerid, false)
    72  
    73  	// create all necessary blocks
    74  	totalBlocks := numBlocksBeforeCheckpoint + numBlocksAfterCheckpoint
    75  	allBlocks := []*common.Block{gb}
    76  	allBlocks = append(allBlocks, bg.NextTestBlocks(totalBlocks+1)...)
    77  
    78  	// identify the blocks that are to be added beforeCP, afterCP, and after restart
    79  	blocksBeforeCP := []*common.Block{}
    80  	blocksAfterCP := []*common.Block{}
    81  	if numBlocksBeforeCheckpoint != 0 {
    82  		blocksBeforeCP = allBlocks[0:numBlocksBeforeCheckpoint]
    83  	}
    84  	if numBlocksAfterCheckpoint != 0 {
    85  		blocksAfterCP = allBlocks[numBlocksBeforeCheckpoint : numBlocksBeforeCheckpoint+numBlocksAfterCheckpoint]
    86  	}
    87  	blocksAfterRestart := allBlocks[numBlocksBeforeCheckpoint+numBlocksAfterCheckpoint:]
    88  
    89  	// add blocks before cp
    90  	blkfileMgrWrapper.addBlocks(blocksBeforeCP)
    91  	currentCPInfo := blkfileMgrWrapper.blockfileMgr.cpInfo
    92  	cpInfo1 := &checkpointInfo{
    93  		currentCPInfo.latestFileChunkSuffixNum,
    94  		currentCPInfo.latestFileChunksize,
    95  		currentCPInfo.isChainEmpty,
    96  		currentCPInfo.lastBlockNumber}
    97  
    98  	// add blocks after cp
    99  	blkfileMgrWrapper.addBlocks(blocksAfterCP)
   100  	cpInfo2 := blkfileMgrWrapper.blockfileMgr.cpInfo
   101  
   102  	// simulate a crash scenario
   103  	lastBlockBytes := []byte{}
   104  	encodedLen := proto.EncodeVarint(uint64(numLastBlockBytes))
   105  	randomBytes := testutil.ConstructRandomBytes(t, numLastBlockBytes)
   106  	lastBlockBytes = append(lastBlockBytes, encodedLen...)
   107  	lastBlockBytes = append(lastBlockBytes, randomBytes...)
   108  	partialBytes := lastBlockBytes[:numPartialBytesToWrite]
   109  	blkfileMgrWrapper.blockfileMgr.currentFileWriter.append(partialBytes, true)
   110  	if deleteCPInfo {
   111  		err := blkfileMgrWrapper.blockfileMgr.db.Delete(blkMgrInfoKey, true)
   112  		assert.NoError(t, err)
   113  	} else {
   114  		blkfileMgrWrapper.blockfileMgr.saveCurrentInfo(cpInfo1, true)
   115  	}
   116  	blkfileMgrWrapper.close()
   117  
   118  	// simulate a start after a crash
   119  	blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid)
   120  	defer blkfileMgrWrapper.close()
   121  	cpInfo3 := blkfileMgrWrapper.blockfileMgr.cpInfo
   122  	assert.Equal(t, cpInfo2, cpInfo3)
   123  
   124  	// add fresh blocks after restart
   125  	blkfileMgrWrapper.addBlocks(blocksAfterRestart)
   126  	testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 0, len(allBlocks)-1, allBlocks)
   127  }
   128  
   129  func TestBlockfileMgrBlockIterator(t *testing.T) {
   130  	env := newTestEnv(t, NewConf(testPath(), 0))
   131  	defer env.Cleanup()
   132  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
   133  	defer blkfileMgrWrapper.close()
   134  	blocks := testutil.ConstructTestBlocks(t, 10)
   135  	blkfileMgrWrapper.addBlocks(blocks)
   136  	testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 0, 7, blocks[0:8])
   137  }
   138  
   139  func testBlockfileMgrBlockIterator(t *testing.T, blockfileMgr *blockfileMgr,
   140  	firstBlockNum int, lastBlockNum int, expectedBlocks []*common.Block) {
   141  	itr, err := blockfileMgr.retrieveBlocks(uint64(firstBlockNum))
   142  	assert.NoError(t, err, "Error while getting blocks iterator")
   143  	defer itr.Close()
   144  	numBlocksItrated := 0
   145  	for {
   146  		block, err := itr.Next()
   147  		assert.NoError(t, err, "Error while getting block number [%d] from iterator", numBlocksItrated)
   148  		assert.Equal(t, expectedBlocks[numBlocksItrated], block)
   149  		numBlocksItrated++
   150  		if numBlocksItrated == lastBlockNum-firstBlockNum+1 {
   151  			break
   152  		}
   153  	}
   154  	assert.Equal(t, lastBlockNum-firstBlockNum+1, numBlocksItrated)
   155  }
   156  
   157  func TestBlockfileMgrBlockchainInfo(t *testing.T) {
   158  	env := newTestEnv(t, NewConf(testPath(), 0))
   159  	defer env.Cleanup()
   160  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
   161  	defer blkfileMgrWrapper.close()
   162  
   163  	bcInfo := blkfileMgrWrapper.blockfileMgr.getBlockchainInfo()
   164  	assert.Equal(t, &common.BlockchainInfo{Height: 0, CurrentBlockHash: nil, PreviousBlockHash: nil}, bcInfo)
   165  
   166  	blocks := testutil.ConstructTestBlocks(t, 10)
   167  	blkfileMgrWrapper.addBlocks(blocks)
   168  	bcInfo = blkfileMgrWrapper.blockfileMgr.getBlockchainInfo()
   169  	assert.Equal(t, uint64(10), bcInfo.Height)
   170  }
   171  
   172  func TestBlockfileMgrGetTxById(t *testing.T) {
   173  	env := newTestEnv(t, NewConf(testPath(), 0))
   174  	defer env.Cleanup()
   175  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
   176  	defer blkfileMgrWrapper.close()
   177  	blocks := testutil.ConstructTestBlocks(t, 2)
   178  	blkfileMgrWrapper.addBlocks(blocks)
   179  	for _, blk := range blocks {
   180  		for j, txEnvelopeBytes := range blk.Data.Data {
   181  			// blockNum starts with 0
   182  			txID, err := protoutil.GetOrComputeTxIDFromEnvelope(blk.Data.Data[j])
   183  			assert.NoError(t, err)
   184  			txEnvelopeFromFileMgr, err := blkfileMgrWrapper.blockfileMgr.retrieveTransactionByID(txID)
   185  			assert.NoError(t, err, "Error while retrieving tx from blkfileMgr")
   186  			txEnvelope, err := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes)
   187  			assert.NoError(t, err, "Error while unmarshalling tx")
   188  			assert.Equal(t, txEnvelope, txEnvelopeFromFileMgr)
   189  		}
   190  	}
   191  }
   192  
   193  // TestBlockfileMgrGetTxByIdDuplicateTxid tests that a transaction with an existing txid
   194  // (within same block or a different block) should not over-write the index by-txid (FAB-8557)
   195  func TestBlockfileMgrGetTxByIdDuplicateTxid(t *testing.T) {
   196  	env := newTestEnv(t, NewConf(testPath(), 0))
   197  	defer env.Cleanup()
   198  	blkStore, err := env.provider.OpenBlockStore("testLedger")
   199  	assert.NoError(env.t, err)
   200  	blkFileMgr := blkStore.(*fsBlockStore).fileMgr
   201  	bg, gb := testutil.NewBlockGenerator(t, "testLedger", false)
   202  	assert.NoError(t, blkFileMgr.addBlock(gb))
   203  
   204  	block1 := bg.NextBlockWithTxid(
   205  		[][]byte{
   206  			[]byte("tx with id=txid-1"),
   207  			[]byte("tx with id=txid-2"),
   208  			[]byte("another tx with existing id=txid-1"),
   209  		},
   210  		[]string{"txid-1", "txid-2", "txid-1"},
   211  	)
   212  	txValidationFlags := ledgerutil.NewTxValidationFlags(3)
   213  	txValidationFlags.SetFlag(0, peer.TxValidationCode_VALID)
   214  	txValidationFlags.SetFlag(1, peer.TxValidationCode_INVALID_OTHER_REASON)
   215  	txValidationFlags.SetFlag(2, peer.TxValidationCode_DUPLICATE_TXID)
   216  	block1.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txValidationFlags
   217  	assert.NoError(t, blkFileMgr.addBlock(block1))
   218  
   219  	block2 := bg.NextBlockWithTxid(
   220  		[][]byte{
   221  			[]byte("tx with id=txid-3"),
   222  			[]byte("yet another tx with existing id=txid-1"),
   223  		},
   224  		[]string{"txid-3", "txid-1"},
   225  	)
   226  	txValidationFlags = ledgerutil.NewTxValidationFlags(2)
   227  	txValidationFlags.SetFlag(0, peer.TxValidationCode_VALID)
   228  	txValidationFlags.SetFlag(1, peer.TxValidationCode_DUPLICATE_TXID)
   229  	block2.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txValidationFlags
   230  	assert.NoError(t, blkFileMgr.addBlock(block2))
   231  
   232  	txenvp1, err := protoutil.GetEnvelopeFromBlock(block1.Data.Data[0])
   233  	assert.NoError(t, err)
   234  	txenvp2, err := protoutil.GetEnvelopeFromBlock(block1.Data.Data[1])
   235  	assert.NoError(t, err)
   236  	txenvp3, err := protoutil.GetEnvelopeFromBlock(block2.Data.Data[0])
   237  	assert.NoError(t, err)
   238  
   239  	indexedTxenvp, _ := blkFileMgr.retrieveTransactionByID("txid-1")
   240  	assert.Equal(t, txenvp1, indexedTxenvp)
   241  	indexedTxenvp, _ = blkFileMgr.retrieveTransactionByID("txid-2")
   242  	assert.Equal(t, txenvp2, indexedTxenvp)
   243  	indexedTxenvp, _ = blkFileMgr.retrieveTransactionByID("txid-3")
   244  	assert.Equal(t, txenvp3, indexedTxenvp)
   245  
   246  	blk, _ := blkFileMgr.retrieveBlockByTxID("txid-1")
   247  	assert.Equal(t, block1, blk)
   248  	blk, _ = blkFileMgr.retrieveBlockByTxID("txid-2")
   249  	assert.Equal(t, block1, blk)
   250  	blk, _ = blkFileMgr.retrieveBlockByTxID("txid-3")
   251  	assert.Equal(t, block2, blk)
   252  
   253  	validationCode, _ := blkFileMgr.retrieveTxValidationCodeByTxID("txid-1")
   254  	assert.Equal(t, peer.TxValidationCode_VALID, validationCode)
   255  	validationCode, _ = blkFileMgr.retrieveTxValidationCodeByTxID("txid-2")
   256  	assert.Equal(t, peer.TxValidationCode_INVALID_OTHER_REASON, validationCode)
   257  	validationCode, _ = blkFileMgr.retrieveTxValidationCodeByTxID("txid-3")
   258  	assert.Equal(t, peer.TxValidationCode_VALID, validationCode)
   259  
   260  	// though we do not expose an API for retrieving all the txs by same id but we may in future
   261  	// and the data is persisted to support this. below code tests this behavior internally
   262  	w := &testBlockfileMgrWrapper{
   263  		t:            t,
   264  		blockfileMgr: blkFileMgr,
   265  	}
   266  	w.testGetMultipleDataByTxID(
   267  		"txid-1",
   268  		[]*expectedBlkTxValidationCode{
   269  			{
   270  				blk:            block1,
   271  				txEnv:          protoutil.ExtractEnvelopeOrPanic(block1, 0),
   272  				validationCode: peer.TxValidationCode_VALID,
   273  			},
   274  			{
   275  				blk:            block1,
   276  				txEnv:          protoutil.ExtractEnvelopeOrPanic(block1, 2),
   277  				validationCode: peer.TxValidationCode_DUPLICATE_TXID,
   278  			},
   279  			{
   280  				blk:            block2,
   281  				txEnv:          protoutil.ExtractEnvelopeOrPanic(block2, 1),
   282  				validationCode: peer.TxValidationCode_DUPLICATE_TXID,
   283  			},
   284  		},
   285  	)
   286  
   287  	w.testGetMultipleDataByTxID(
   288  		"txid-2",
   289  		[]*expectedBlkTxValidationCode{
   290  			{
   291  				blk:            block1,
   292  				txEnv:          protoutil.ExtractEnvelopeOrPanic(block1, 1),
   293  				validationCode: peer.TxValidationCode_INVALID_OTHER_REASON,
   294  			},
   295  		},
   296  	)
   297  
   298  	w.testGetMultipleDataByTxID(
   299  		"txid-3",
   300  		[]*expectedBlkTxValidationCode{
   301  			{
   302  				blk:            block2,
   303  				txEnv:          protoutil.ExtractEnvelopeOrPanic(block2, 0),
   304  				validationCode: peer.TxValidationCode_VALID,
   305  			},
   306  		},
   307  	)
   308  }
   309  
   310  func TestBlockfileMgrGetTxByBlockNumTranNum(t *testing.T) {
   311  	env := newTestEnv(t, NewConf(testPath(), 0))
   312  	defer env.Cleanup()
   313  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
   314  	defer blkfileMgrWrapper.close()
   315  	blocks := testutil.ConstructTestBlocks(t, 10)
   316  	blkfileMgrWrapper.addBlocks(blocks)
   317  	for blockIndex, blk := range blocks {
   318  		for tranIndex, txEnvelopeBytes := range blk.Data.Data {
   319  			// blockNum and tranNum both start with 0
   320  			txEnvelopeFromFileMgr, err := blkfileMgrWrapper.blockfileMgr.retrieveTransactionByBlockNumTranNum(uint64(blockIndex), uint64(tranIndex))
   321  			assert.NoError(t, err, "Error while retrieving tx from blkfileMgr")
   322  			txEnvelope, err := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes)
   323  			assert.NoError(t, err, "Error while unmarshalling tx")
   324  			assert.Equal(t, txEnvelope, txEnvelopeFromFileMgr)
   325  		}
   326  	}
   327  }
   328  
   329  func TestBlockfileMgrRestart(t *testing.T) {
   330  	env := newTestEnv(t, NewConf(testPath(), 0))
   331  	defer env.Cleanup()
   332  	ledgerid := "testLedger"
   333  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
   334  	blocks := testutil.ConstructTestBlocks(t, 10)
   335  	blkfileMgrWrapper.addBlocks(blocks)
   336  	expectedHeight := uint64(10)
   337  	assert.Equal(t, expectedHeight, blkfileMgrWrapper.blockfileMgr.getBlockchainInfo().Height)
   338  	blkfileMgrWrapper.close()
   339  
   340  	blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid)
   341  	defer blkfileMgrWrapper.close()
   342  	assert.Equal(t, 9, int(blkfileMgrWrapper.blockfileMgr.cpInfo.lastBlockNumber))
   343  	blkfileMgrWrapper.testGetBlockByHash(blocks, nil)
   344  	assert.Equal(t, expectedHeight, blkfileMgrWrapper.blockfileMgr.getBlockchainInfo().Height)
   345  }
   346  
   347  func TestBlockfileMgrFileRolling(t *testing.T) {
   348  	blocks := testutil.ConstructTestBlocks(t, 200)
   349  	size := 0
   350  	for _, block := range blocks[:100] {
   351  		by, _, err := serializeBlock(block)
   352  		assert.NoError(t, err, "Error while serializing block")
   353  		blockBytesSize := len(by)
   354  		encodedLen := proto.EncodeVarint(uint64(blockBytesSize))
   355  		size += blockBytesSize + len(encodedLen)
   356  	}
   357  
   358  	maxFileSie := int(0.75 * float64(size))
   359  	env := newTestEnv(t, NewConf(testPath(), maxFileSie))
   360  	defer env.Cleanup()
   361  	ledgerid := "testLedger"
   362  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
   363  	blkfileMgrWrapper.addBlocks(blocks[:100])
   364  	assert.Equal(t, 1, blkfileMgrWrapper.blockfileMgr.cpInfo.latestFileChunkSuffixNum)
   365  	blkfileMgrWrapper.testGetBlockByHash(blocks[:100], nil)
   366  	blkfileMgrWrapper.close()
   367  
   368  	blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid)
   369  	defer blkfileMgrWrapper.close()
   370  	blkfileMgrWrapper.addBlocks(blocks[100:])
   371  	assert.Equal(t, 2, blkfileMgrWrapper.blockfileMgr.cpInfo.latestFileChunkSuffixNum)
   372  	blkfileMgrWrapper.testGetBlockByHash(blocks[100:], nil)
   373  }
   374  
   375  func TestBlockfileMgrGetBlockByTxID(t *testing.T) {
   376  	env := newTestEnv(t, NewConf(testPath(), 0))
   377  	defer env.Cleanup()
   378  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
   379  	defer blkfileMgrWrapper.close()
   380  	blocks := testutil.ConstructTestBlocks(t, 10)
   381  	blkfileMgrWrapper.addBlocks(blocks)
   382  	for _, blk := range blocks {
   383  		for j := range blk.Data.Data {
   384  			// blockNum starts with 1
   385  			txID, err := protoutil.GetOrComputeTxIDFromEnvelope(blk.Data.Data[j])
   386  			assert.NoError(t, err)
   387  
   388  			blockFromFileMgr, err := blkfileMgrWrapper.blockfileMgr.retrieveBlockByTxID(txID)
   389  			assert.NoError(t, err, "Error while retrieving block from blkfileMgr")
   390  			assert.Equal(t, blk, blockFromFileMgr)
   391  		}
   392  	}
   393  }
   394  
   395  func TestBlockfileMgrSimulateCrashAtFirstBlockInFile(t *testing.T) {
   396  	t.Run("CPInfo persisted", func(t *testing.T) {
   397  		testBlockfileMgrSimulateCrashAtFirstBlockInFile(t, false)
   398  	})
   399  
   400  	t.Run("CPInfo to be computed from block files", func(t *testing.T) {
   401  		testBlockfileMgrSimulateCrashAtFirstBlockInFile(t, true)
   402  	})
   403  }
   404  
   405  func testBlockfileMgrSimulateCrashAtFirstBlockInFile(t *testing.T, deleteCPInfo bool) {
   406  	// open blockfileMgr and add 5 blocks
   407  	env := newTestEnv(t, NewConf(testPath(), 0))
   408  	defer env.Cleanup()
   409  
   410  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testLedger")
   411  	blockfileMgr := blkfileMgrWrapper.blockfileMgr
   412  	blocks := testutil.ConstructTestBlocks(t, 10)
   413  	for i := 0; i < 10; i++ {
   414  		fmt.Printf("blocks[i].Header.Number = %d\n", blocks[i].Header.Number)
   415  	}
   416  	blkfileMgrWrapper.addBlocks(blocks[:5])
   417  	firstFilePath := blockfileMgr.currentFileWriter.filePath
   418  	firstBlkFileSize := testutilGetFileSize(t, firstFilePath)
   419  
   420  	// move to next file and simulate crash scenario while writing the first block
   421  	blockfileMgr.moveToNextFile()
   422  	partialBytesForNextBlock := append(
   423  		proto.EncodeVarint(uint64(10000)),
   424  		[]byte("partialBytesForNextBlock depicting a crash during first block in file")...,
   425  	)
   426  	blockfileMgr.currentFileWriter.append(partialBytesForNextBlock, true)
   427  	if deleteCPInfo {
   428  		err := blockfileMgr.db.Delete(blkMgrInfoKey, true)
   429  		assert.NoError(t, err)
   430  	}
   431  	blkfileMgrWrapper.close()
   432  
   433  	// verify that the block file number 1 has been created with partial bytes as a side-effect of crash
   434  	lastFilePath := blockfileMgr.currentFileWriter.filePath
   435  	lastFileContent, err := ioutil.ReadFile(lastFilePath)
   436  	assert.NoError(t, err)
   437  	assert.Equal(t, lastFileContent, partialBytesForNextBlock)
   438  
   439  	// simulate reopen after crash
   440  	blkfileMgrWrapper = newTestBlockfileWrapper(env, "testLedger")
   441  	defer blkfileMgrWrapper.close()
   442  
   443  	// last block file (block file number 1) should have been truncated to zero length and concluded as the next file to append to
   444  	assert.Equal(t, 0, testutilGetFileSize(t, lastFilePath))
   445  	assert.Equal(t,
   446  		&checkpointInfo{
   447  			latestFileChunkSuffixNum: 1,
   448  			latestFileChunksize:      0,
   449  			lastBlockNumber:          4,
   450  			isChainEmpty:             false,
   451  		},
   452  		blkfileMgrWrapper.blockfileMgr.cpInfo,
   453  	)
   454  
   455  	// Add 5 more blocks and assert that they are added to last file (block file number 1) and full scanning across two files works as expected
   456  	blkfileMgrWrapper.addBlocks(blocks[5:])
   457  	assert.True(t, testutilGetFileSize(t, lastFilePath) > 0)
   458  	assert.Equal(t, firstBlkFileSize, testutilGetFileSize(t, firstFilePath))
   459  	blkfileMgrWrapper.testGetBlockByNumber(blocks, 0, nil)
   460  	testBlockfileMgrBlockIterator(t, blkfileMgrWrapper.blockfileMgr, 0, len(blocks)-1, blocks)
   461  }
   462  
   463  func testutilGetFileSize(t *testing.T, path string) int {
   464  	fi, err := os.Stat(path)
   465  	assert.NoError(t, err)
   466  	return int(fi.Size())
   467  }