github.com/sykesm/fabric@v1.1.0-preview.0.20200129034918-2aa12b1a0181/common/ledger/blkstorage/fsblkstorage/blockfile_mgr.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package fsblkstorage
     8  
     9  import (
    10  	"bytes"
    11  	"fmt"
    12  	"math"
    13  	"sync"
    14  	"sync/atomic"
    15  
    16  	"github.com/davecgh/go-spew/spew"
    17  	"github.com/golang/protobuf/proto"
    18  	"github.com/hyperledger/fabric-protos-go/common"
    19  	"github.com/hyperledger/fabric-protos-go/peer"
    20  	"github.com/hyperledger/fabric/common/flogging"
    21  	"github.com/hyperledger/fabric/common/ledger/blkstorage"
    22  	"github.com/hyperledger/fabric/common/ledger/util"
    23  	"github.com/hyperledger/fabric/common/ledger/util/leveldbhelper"
    24  	"github.com/hyperledger/fabric/protoutil"
    25  	"github.com/pkg/errors"
    26  )
    27  
    28  var logger = flogging.MustGetLogger("fsblkstorage")
    29  
    30  const (
    31  	blockfilePrefix = "blockfile_"
    32  )
    33  
    34  var (
    35  	blkMgrInfoKey = []byte("blkMgrInfo")
    36  )
    37  
    38  type blockfileMgr struct {
    39  	rootDir           string
    40  	conf              *Conf
    41  	db                *leveldbhelper.DBHandle
    42  	index             index
    43  	cpInfo            *checkpointInfo
    44  	cpInfoCond        *sync.Cond
    45  	currentFileWriter *blockfileWriter
    46  	bcInfo            atomic.Value
    47  }
    48  
    49  /*
    50  Creates a new manager that will manage the files used for block persistence.
    51  This manager manages the file system FS including
    52    -- the directory where the files are stored
    53    -- the individual files where the blocks are stored
    54    -- the checkpoint which tracks the latest file being persisted to
    55    -- the index which tracks what block and transaction is in what file
    56  When a new blockfile manager is started (i.e. only on start-up), it checks
    57  if this start-up is the first time the system is coming up or is this a restart
    58  of the system.
    59  
    60  The blockfile manager stores blocks of data into a file system.  That file
    61  storage is done by creating sequentially numbered files of a configured size
    62  i.e blockfile_000000, blockfile_000001, etc..
    63  
    64  Each transaction in a block is stored with information about the number of
    65  bytes in that transaction
    66   Adding txLoc [fileSuffixNum=0, offset=3, bytesLength=104] for tx [1:0] to index
    67   Adding txLoc [fileSuffixNum=0, offset=107, bytesLength=104] for tx [1:1] to index
    68  Each block is stored with the total encoded length of that block as well as the
    69  tx location offsets.
    70  
    71  Remember that these steps are only done once at start-up of the system.
    72  At start up a new manager:
    73    *) Checks if the directory for storing files exists, if not creates the dir
    74    *) Checks if the key value database exists, if not creates one
    75         (will create a db dir)
    76    *) Determines the checkpoint information (cpinfo) used for storage
    77  		-- Loads from db if exist, if not instantiate a new cpinfo
    78  		-- If cpinfo was loaded from db, compares to FS
    79  		-- If cpinfo and file system are not in sync, syncs cpInfo from FS
    80    *) Starts a new file writer
    81  		-- truncates file per cpinfo to remove any excess past last block
    82    *) Determines the index information used to find tx and blocks in
    83    the file blkstorage
    84  		-- Instantiates a new blockIdxInfo
    85  		-- Loads the index from the db if exists
    86  		-- syncIndex comparing the last block indexed to what is in the FS
    87  		-- If index and file system are not in sync, syncs index from the FS
    88    *)  Updates blockchain info used by the APIs
    89  */
    90  func newBlockfileMgr(id string, conf *Conf, indexConfig *blkstorage.IndexConfig, indexStore *leveldbhelper.DBHandle) *blockfileMgr {
    91  	logger.Debugf("newBlockfileMgr() initializing file-based block storage for ledger: %s ", id)
    92  	//Determine the root directory for the blockfile storage, if it does not exist create it
    93  	rootDir := conf.getLedgerBlockDir(id)
    94  	_, err := util.CreateDirIfMissing(rootDir)
    95  	if err != nil {
    96  		panic(fmt.Sprintf("Error creating block storage root dir [%s]: %s", rootDir, err))
    97  	}
    98  	// Instantiate the manager, i.e. blockFileMgr structure
    99  	mgr := &blockfileMgr{rootDir: rootDir, conf: conf, db: indexStore}
   100  
   101  	// cp = checkpointInfo, retrieve from the database the file suffix or number of where blocks were stored.
   102  	// It also retrieves the current size of that file and the last block number that was written to that file.
   103  	// At init checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0]
   104  	cpInfo, err := mgr.loadCurrentInfo()
   105  	if err != nil {
   106  		panic(fmt.Sprintf("Could not get block file info for current block file from db: %s", err))
   107  	}
   108  	if cpInfo == nil {
   109  		logger.Info(`Getting block information from block storage`)
   110  		if cpInfo, err = constructCheckpointInfoFromBlockFiles(rootDir); err != nil {
   111  			panic(fmt.Sprintf("Could not build checkpoint info from block files: %s", err))
   112  		}
   113  		logger.Debugf("Info constructed by scanning the blocks dir = %s", spew.Sdump(cpInfo))
   114  	} else {
   115  		logger.Debug(`Synching block information from block storage (if needed)`)
   116  		syncCPInfoFromFS(rootDir, cpInfo)
   117  	}
   118  	err = mgr.saveCurrentInfo(cpInfo, true)
   119  	if err != nil {
   120  		panic(fmt.Sprintf("Could not save next block file info to db: %s", err))
   121  	}
   122  
   123  	//Open a writer to the file identified by the number and truncate it to only contain the latest block
   124  	// that was completely saved (file system, index, cpinfo, etc)
   125  	currentFileWriter, err := newBlockfileWriter(deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum))
   126  	if err != nil {
   127  		panic(fmt.Sprintf("Could not open writer to current file: %s", err))
   128  	}
   129  	//Truncate the file to remove excess past last block
   130  	err = currentFileWriter.truncateFile(cpInfo.latestFileChunksize)
   131  	if err != nil {
   132  		panic(fmt.Sprintf("Could not truncate current file to known size in db: %s", err))
   133  	}
   134  
   135  	// Create a new KeyValue store database handler for the blocks index in the keyvalue database
   136  	if mgr.index, err = newBlockIndex(indexConfig, indexStore); err != nil {
   137  		panic(fmt.Sprintf("error in block index: %s", err))
   138  	}
   139  
   140  	// Update the manager with the checkpoint info and the file writer
   141  	mgr.cpInfo = cpInfo
   142  	mgr.currentFileWriter = currentFileWriter
   143  	// Create a checkpoint condition (event) variable, for the  goroutine waiting for
   144  	// or announcing the occurrence of an event.
   145  	mgr.cpInfoCond = sync.NewCond(&sync.Mutex{})
   146  
   147  	// init BlockchainInfo for external API's
   148  	bcInfo := &common.BlockchainInfo{
   149  		Height:            0,
   150  		CurrentBlockHash:  nil,
   151  		PreviousBlockHash: nil}
   152  
   153  	if !cpInfo.isChainEmpty {
   154  		//If start up is a restart of an existing storage, sync the index from block storage and update BlockchainInfo for external API's
   155  		mgr.syncIndex()
   156  		lastBlockHeader, err := mgr.retrieveBlockHeaderByNumber(cpInfo.lastBlockNumber)
   157  		if err != nil {
   158  			panic(fmt.Sprintf("Could not retrieve header of the last block form file: %s", err))
   159  		}
   160  		lastBlockHash := protoutil.BlockHeaderHash(lastBlockHeader)
   161  		previousBlockHash := lastBlockHeader.PreviousHash
   162  		bcInfo = &common.BlockchainInfo{
   163  			Height:            cpInfo.lastBlockNumber + 1,
   164  			CurrentBlockHash:  lastBlockHash,
   165  			PreviousBlockHash: previousBlockHash}
   166  	}
   167  	mgr.bcInfo.Store(bcInfo)
   168  	return mgr
   169  }
   170  
   171  //cp = checkpointInfo, from the database gets the file suffix and the size of
   172  // the file of where the last block was written.  Also retrieves contains the
   173  // last block number that was written.  At init
   174  //checkpointInfo:latestFileChunkSuffixNum=[0], latestFileChunksize=[0], lastBlockNumber=[0]
   175  func syncCPInfoFromFS(rootDir string, cpInfo *checkpointInfo) {
   176  	logger.Debugf("Starting checkpoint=%s", cpInfo)
   177  	//Checks if the file suffix of where the last block was written exists
   178  	filePath := deriveBlockfilePath(rootDir, cpInfo.latestFileChunkSuffixNum)
   179  	exists, size, err := util.FileExists(filePath)
   180  	if err != nil {
   181  		panic(fmt.Sprintf("Error in checking whether file [%s] exists: %s", filePath, err))
   182  	}
   183  	logger.Debugf("status of file [%s]: exists=[%t], size=[%d]", filePath, exists, size)
   184  	//Test is !exists because when file number is first used the file does not exist yet
   185  	//checks that the file exists and that the size of the file is what is stored in cpinfo
   186  	//status of file [/tmp/tests/ledger/blkstorage/fsblkstorage/blocks/blockfile_000000]: exists=[false], size=[0]
   187  	if !exists || int(size) == cpInfo.latestFileChunksize {
   188  		// check point info is in sync with the file on disk
   189  		return
   190  	}
   191  	//Scan the file system to verify that the checkpoint info stored in db is correct
   192  	_, endOffsetLastBlock, numBlocks, err := scanForLastCompleteBlock(
   193  		rootDir, cpInfo.latestFileChunkSuffixNum, int64(cpInfo.latestFileChunksize))
   194  	if err != nil {
   195  		panic(fmt.Sprintf("Could not open current file for detecting last block in the file: %s", err))
   196  	}
   197  	cpInfo.latestFileChunksize = int(endOffsetLastBlock)
   198  	if numBlocks == 0 {
   199  		return
   200  	}
   201  	//Updates the checkpoint info for the actual last block number stored and it's end location
   202  	if cpInfo.isChainEmpty {
   203  		cpInfo.lastBlockNumber = uint64(numBlocks - 1)
   204  	} else {
   205  		cpInfo.lastBlockNumber += uint64(numBlocks)
   206  	}
   207  	cpInfo.isChainEmpty = false
   208  	logger.Debugf("Checkpoint after updates by scanning the last file segment:%s", cpInfo)
   209  }
   210  
   211  func deriveBlockfilePath(rootDir string, suffixNum int) string {
   212  	return rootDir + "/" + blockfilePrefix + fmt.Sprintf("%06d", suffixNum)
   213  }
   214  
   215  func (mgr *blockfileMgr) close() {
   216  	mgr.currentFileWriter.close()
   217  }
   218  
   219  func (mgr *blockfileMgr) moveToNextFile() {
   220  	cpInfo := &checkpointInfo{
   221  		latestFileChunkSuffixNum: mgr.cpInfo.latestFileChunkSuffixNum + 1,
   222  		latestFileChunksize:      0,
   223  		lastBlockNumber:          mgr.cpInfo.lastBlockNumber}
   224  
   225  	nextFileWriter, err := newBlockfileWriter(
   226  		deriveBlockfilePath(mgr.rootDir, cpInfo.latestFileChunkSuffixNum))
   227  
   228  	if err != nil {
   229  		panic(fmt.Sprintf("Could not open writer to next file: %s", err))
   230  	}
   231  	mgr.currentFileWriter.close()
   232  	err = mgr.saveCurrentInfo(cpInfo, true)
   233  	if err != nil {
   234  		panic(fmt.Sprintf("Could not save next block file info to db: %s", err))
   235  	}
   236  	mgr.currentFileWriter = nextFileWriter
   237  	mgr.updateCheckpoint(cpInfo)
   238  }
   239  
   240  func (mgr *blockfileMgr) addBlock(block *common.Block) error {
   241  	bcInfo := mgr.getBlockchainInfo()
   242  	if block.Header.Number != bcInfo.Height {
   243  		return errors.Errorf(
   244  			"block number should have been %d but was %d",
   245  			mgr.getBlockchainInfo().Height, block.Header.Number,
   246  		)
   247  	}
   248  
   249  	// Add the previous hash check - Though, not essential but may not be a bad idea to
   250  	// verify the field `block.Header.PreviousHash` present in the block.
   251  	// This check is a simple bytes comparison and hence does not cause any observable performance penalty
   252  	// and may help in detecting a rare scenario if there is any bug in the ordering service.
   253  	if !bytes.Equal(block.Header.PreviousHash, bcInfo.CurrentBlockHash) {
   254  		return errors.Errorf(
   255  			"unexpected Previous block hash. Expected PreviousHash = [%x], PreviousHash referred in the latest block= [%x]",
   256  			bcInfo.CurrentBlockHash, block.Header.PreviousHash,
   257  		)
   258  	}
   259  	blockBytes, info, err := serializeBlock(block)
   260  	if err != nil {
   261  		return errors.WithMessage(err, "error serializing block")
   262  	}
   263  	blockHash := protoutil.BlockHeaderHash(block.Header)
   264  	//Get the location / offset where each transaction starts in the block and where the block ends
   265  	txOffsets := info.txOffsets
   266  	currentOffset := mgr.cpInfo.latestFileChunksize
   267  
   268  	blockBytesLen := len(blockBytes)
   269  	blockBytesEncodedLen := proto.EncodeVarint(uint64(blockBytesLen))
   270  	totalBytesToAppend := blockBytesLen + len(blockBytesEncodedLen)
   271  
   272  	//Determine if we need to start a new file since the size of this block
   273  	//exceeds the amount of space left in the current file
   274  	if currentOffset+totalBytesToAppend > mgr.conf.maxBlockfileSize {
   275  		mgr.moveToNextFile()
   276  		currentOffset = 0
   277  	}
   278  	//append blockBytesEncodedLen to the file
   279  	err = mgr.currentFileWriter.append(blockBytesEncodedLen, false)
   280  	if err == nil {
   281  		//append the actual block bytes to the file
   282  		err = mgr.currentFileWriter.append(blockBytes, true)
   283  	}
   284  	if err != nil {
   285  		truncateErr := mgr.currentFileWriter.truncateFile(mgr.cpInfo.latestFileChunksize)
   286  		if truncateErr != nil {
   287  			panic(fmt.Sprintf("Could not truncate current file to known size after an error during block append: %s", err))
   288  		}
   289  		return errors.WithMessage(err, "error appending block to file")
   290  	}
   291  
   292  	//Update the checkpoint info with the results of adding the new block
   293  	currentCPInfo := mgr.cpInfo
   294  	newCPInfo := &checkpointInfo{
   295  		latestFileChunkSuffixNum: currentCPInfo.latestFileChunkSuffixNum,
   296  		latestFileChunksize:      currentCPInfo.latestFileChunksize + totalBytesToAppend,
   297  		isChainEmpty:             false,
   298  		lastBlockNumber:          block.Header.Number}
   299  	//save the checkpoint information in the database
   300  	if err = mgr.saveCurrentInfo(newCPInfo, false); err != nil {
   301  		truncateErr := mgr.currentFileWriter.truncateFile(currentCPInfo.latestFileChunksize)
   302  		if truncateErr != nil {
   303  			panic(fmt.Sprintf("Error in truncating current file to known size after an error in saving checkpoint info: %s", err))
   304  		}
   305  		return errors.WithMessage(err, "error saving current file info to db")
   306  	}
   307  
   308  	//Index block file location pointer updated with file suffex and offset for the new block
   309  	blockFLP := &fileLocPointer{fileSuffixNum: newCPInfo.latestFileChunkSuffixNum}
   310  	blockFLP.offset = currentOffset
   311  	// shift the txoffset because we prepend length of bytes before block bytes
   312  	for _, txOffset := range txOffsets {
   313  		txOffset.loc.offset += len(blockBytesEncodedLen)
   314  	}
   315  	//save the index in the database
   316  	if err = mgr.index.indexBlock(&blockIdxInfo{
   317  		blockNum: block.Header.Number, blockHash: blockHash,
   318  		flp: blockFLP, txOffsets: txOffsets, metadata: block.Metadata}); err != nil {
   319  		return err
   320  	}
   321  
   322  	//update the checkpoint info (for storage) and the blockchain info (for APIs) in the manager
   323  	mgr.updateCheckpoint(newCPInfo)
   324  	mgr.updateBlockchainInfo(blockHash, block)
   325  	return nil
   326  }
   327  
   328  func (mgr *blockfileMgr) syncIndex() error {
   329  	var lastBlockIndexed uint64
   330  	var indexEmpty bool
   331  	var err error
   332  	//from the database, get the last block that was indexed
   333  	if lastBlockIndexed, err = mgr.index.getLastBlockIndexed(); err != nil {
   334  		if err != errIndexEmpty {
   335  			return err
   336  		}
   337  		indexEmpty = true
   338  	}
   339  
   340  	//initialize index to file number:zero, offset:zero and blockNum:0
   341  	startFileNum := 0
   342  	startOffset := 0
   343  	skipFirstBlock := false
   344  	//get the last file that blocks were added to using the checkpoint info
   345  	endFileNum := mgr.cpInfo.latestFileChunkSuffixNum
   346  	startingBlockNum := uint64(0)
   347  
   348  	//if the index stored in the db has value, update the index information with those values
   349  	if !indexEmpty {
   350  		if lastBlockIndexed == mgr.cpInfo.lastBlockNumber {
   351  			logger.Debug("Both the block files and indices are in sync.")
   352  			return nil
   353  		}
   354  		logger.Debugf("Last block indexed [%d], Last block present in block files [%d]", lastBlockIndexed, mgr.cpInfo.lastBlockNumber)
   355  		var flp *fileLocPointer
   356  		if flp, err = mgr.index.getBlockLocByBlockNum(lastBlockIndexed); err != nil {
   357  			return err
   358  		}
   359  		startFileNum = flp.fileSuffixNum
   360  		startOffset = flp.locPointer.offset
   361  		skipFirstBlock = true
   362  		startingBlockNum = lastBlockIndexed + 1
   363  	} else {
   364  		logger.Debugf("No block indexed, Last block present in block files=[%d]", mgr.cpInfo.lastBlockNumber)
   365  	}
   366  
   367  	logger.Infof("Start building index from block [%d] to last block [%d]", startingBlockNum, mgr.cpInfo.lastBlockNumber)
   368  
   369  	//open a blockstream to the file location that was stored in the index
   370  	var stream *blockStream
   371  	if stream, err = newBlockStream(mgr.rootDir, startFileNum, int64(startOffset), endFileNum); err != nil {
   372  		return err
   373  	}
   374  	var blockBytes []byte
   375  	var blockPlacementInfo *blockPlacementInfo
   376  
   377  	if skipFirstBlock {
   378  		if blockBytes, _, err = stream.nextBlockBytesAndPlacementInfo(); err != nil {
   379  			return err
   380  		}
   381  		if blockBytes == nil {
   382  			return errors.Errorf("block bytes for block num = [%d] should not be nil here. The indexes for the block are already present",
   383  				lastBlockIndexed)
   384  		}
   385  	}
   386  
   387  	//Should be at the last block already, but go ahead and loop looking for next blockBytes.
   388  	//If there is another block, add it to the index.
   389  	//This will ensure block indexes are correct, for example if peer had crashed before indexes got updated.
   390  	blockIdxInfo := &blockIdxInfo{}
   391  	for {
   392  		if blockBytes, blockPlacementInfo, err = stream.nextBlockBytesAndPlacementInfo(); err != nil {
   393  			return err
   394  		}
   395  		if blockBytes == nil {
   396  			break
   397  		}
   398  		info, err := extractSerializedBlockInfo(blockBytes)
   399  		if err != nil {
   400  			return err
   401  		}
   402  
   403  		//The blockStartOffset will get applied to the txOffsets prior to indexing within indexBlock(),
   404  		//therefore just shift by the difference between blockBytesOffset and blockStartOffset
   405  		numBytesToShift := int(blockPlacementInfo.blockBytesOffset - blockPlacementInfo.blockStartOffset)
   406  		for _, offset := range info.txOffsets {
   407  			offset.loc.offset += numBytesToShift
   408  		}
   409  
   410  		//Update the blockIndexInfo with what was actually stored in file system
   411  		blockIdxInfo.blockHash = protoutil.BlockHeaderHash(info.blockHeader)
   412  		blockIdxInfo.blockNum = info.blockHeader.Number
   413  		blockIdxInfo.flp = &fileLocPointer{fileSuffixNum: blockPlacementInfo.fileNum,
   414  			locPointer: locPointer{offset: int(blockPlacementInfo.blockStartOffset)}}
   415  		blockIdxInfo.txOffsets = info.txOffsets
   416  		blockIdxInfo.metadata = info.metadata
   417  
   418  		logger.Debugf("syncIndex() indexing block [%d]", blockIdxInfo.blockNum)
   419  		if err = mgr.index.indexBlock(blockIdxInfo); err != nil {
   420  			return err
   421  		}
   422  		if blockIdxInfo.blockNum%10000 == 0 {
   423  			logger.Infof("Indexed block number [%d]", blockIdxInfo.blockNum)
   424  		}
   425  	}
   426  	logger.Infof("Finished building index. Last block indexed [%d]", blockIdxInfo.blockNum)
   427  	return nil
   428  }
   429  
   430  func (mgr *blockfileMgr) getBlockchainInfo() *common.BlockchainInfo {
   431  	return mgr.bcInfo.Load().(*common.BlockchainInfo)
   432  }
   433  
   434  func (mgr *blockfileMgr) updateCheckpoint(cpInfo *checkpointInfo) {
   435  	mgr.cpInfoCond.L.Lock()
   436  	defer mgr.cpInfoCond.L.Unlock()
   437  	mgr.cpInfo = cpInfo
   438  	logger.Debugf("Broadcasting about update checkpointInfo: %s", cpInfo)
   439  	mgr.cpInfoCond.Broadcast()
   440  }
   441  
   442  func (mgr *blockfileMgr) updateBlockchainInfo(latestBlockHash []byte, latestBlock *common.Block) {
   443  	currentBCInfo := mgr.getBlockchainInfo()
   444  	newBCInfo := &common.BlockchainInfo{
   445  		Height:            currentBCInfo.Height + 1,
   446  		CurrentBlockHash:  latestBlockHash,
   447  		PreviousBlockHash: latestBlock.Header.PreviousHash}
   448  
   449  	mgr.bcInfo.Store(newBCInfo)
   450  }
   451  
   452  func (mgr *blockfileMgr) retrieveBlockByHash(blockHash []byte) (*common.Block, error) {
   453  	logger.Debugf("retrieveBlockByHash() - blockHash = [%#v]", blockHash)
   454  	loc, err := mgr.index.getBlockLocByHash(blockHash)
   455  	if err != nil {
   456  		return nil, err
   457  	}
   458  	return mgr.fetchBlock(loc)
   459  }
   460  
   461  func (mgr *blockfileMgr) retrieveBlockByNumber(blockNum uint64) (*common.Block, error) {
   462  	logger.Debugf("retrieveBlockByNumber() - blockNum = [%d]", blockNum)
   463  
   464  	// interpret math.MaxUint64 as a request for last block
   465  	if blockNum == math.MaxUint64 {
   466  		blockNum = mgr.getBlockchainInfo().Height - 1
   467  	}
   468  
   469  	loc, err := mgr.index.getBlockLocByBlockNum(blockNum)
   470  	if err != nil {
   471  		return nil, err
   472  	}
   473  	return mgr.fetchBlock(loc)
   474  }
   475  
   476  func (mgr *blockfileMgr) retrieveBlockByTxID(txID string) (*common.Block, error) {
   477  	logger.Debugf("retrieveBlockByTxID() - txID = [%s]", txID)
   478  
   479  	loc, err := mgr.index.getBlockLocByTxID(txID)
   480  
   481  	if err != nil {
   482  		return nil, err
   483  	}
   484  	return mgr.fetchBlock(loc)
   485  }
   486  
   487  func (mgr *blockfileMgr) retrieveTxValidationCodeByTxID(txID string) (peer.TxValidationCode, error) {
   488  	logger.Debugf("retrieveTxValidationCodeByTxID() - txID = [%s]", txID)
   489  	return mgr.index.getTxValidationCodeByTxID(txID)
   490  }
   491  
   492  func (mgr *blockfileMgr) retrieveBlockHeaderByNumber(blockNum uint64) (*common.BlockHeader, error) {
   493  	logger.Debugf("retrieveBlockHeaderByNumber() - blockNum = [%d]", blockNum)
   494  	loc, err := mgr.index.getBlockLocByBlockNum(blockNum)
   495  	if err != nil {
   496  		return nil, err
   497  	}
   498  	blockBytes, err := mgr.fetchBlockBytes(loc)
   499  	if err != nil {
   500  		return nil, err
   501  	}
   502  	info, err := extractSerializedBlockInfo(blockBytes)
   503  	if err != nil {
   504  		return nil, err
   505  	}
   506  	return info.blockHeader, nil
   507  }
   508  
   509  func (mgr *blockfileMgr) retrieveBlocks(startNum uint64) (*blocksItr, error) {
   510  	return newBlockItr(mgr, startNum), nil
   511  }
   512  
   513  func (mgr *blockfileMgr) retrieveTransactionByID(txID string) (*common.Envelope, error) {
   514  	logger.Debugf("retrieveTransactionByID() - txId = [%s]", txID)
   515  	loc, err := mgr.index.getTxLoc(txID)
   516  	if err != nil {
   517  		return nil, err
   518  	}
   519  	return mgr.fetchTransactionEnvelope(loc)
   520  }
   521  
   522  func (mgr *blockfileMgr) retrieveTransactionByBlockNumTranNum(blockNum uint64, tranNum uint64) (*common.Envelope, error) {
   523  	logger.Debugf("retrieveTransactionByBlockNumTranNum() - blockNum = [%d], tranNum = [%d]", blockNum, tranNum)
   524  	loc, err := mgr.index.getTXLocByBlockNumTranNum(blockNum, tranNum)
   525  	if err != nil {
   526  		return nil, err
   527  	}
   528  	return mgr.fetchTransactionEnvelope(loc)
   529  }
   530  
   531  func (mgr *blockfileMgr) fetchBlock(lp *fileLocPointer) (*common.Block, error) {
   532  	blockBytes, err := mgr.fetchBlockBytes(lp)
   533  	if err != nil {
   534  		return nil, err
   535  	}
   536  	block, err := deserializeBlock(blockBytes)
   537  	if err != nil {
   538  		return nil, err
   539  	}
   540  	return block, nil
   541  }
   542  
   543  func (mgr *blockfileMgr) fetchTransactionEnvelope(lp *fileLocPointer) (*common.Envelope, error) {
   544  	logger.Debugf("Entering fetchTransactionEnvelope() %v\n", lp)
   545  	var err error
   546  	var txEnvelopeBytes []byte
   547  	if txEnvelopeBytes, err = mgr.fetchRawBytes(lp); err != nil {
   548  		return nil, err
   549  	}
   550  	_, n := proto.DecodeVarint(txEnvelopeBytes)
   551  	return protoutil.GetEnvelopeFromBlock(txEnvelopeBytes[n:])
   552  }
   553  
   554  func (mgr *blockfileMgr) fetchBlockBytes(lp *fileLocPointer) ([]byte, error) {
   555  	stream, err := newBlockfileStream(mgr.rootDir, lp.fileSuffixNum, int64(lp.offset))
   556  	if err != nil {
   557  		return nil, err
   558  	}
   559  	defer stream.close()
   560  	b, err := stream.nextBlockBytes()
   561  	if err != nil {
   562  		return nil, err
   563  	}
   564  	return b, nil
   565  }
   566  
   567  func (mgr *blockfileMgr) fetchRawBytes(lp *fileLocPointer) ([]byte, error) {
   568  	filePath := deriveBlockfilePath(mgr.rootDir, lp.fileSuffixNum)
   569  	reader, err := newBlockfileReader(filePath)
   570  	if err != nil {
   571  		return nil, err
   572  	}
   573  	defer reader.close()
   574  	b, err := reader.read(lp.offset, lp.bytesLength)
   575  	if err != nil {
   576  		return nil, err
   577  	}
   578  	return b, nil
   579  }
   580  
   581  //Get the current checkpoint information that is stored in the database
   582  func (mgr *blockfileMgr) loadCurrentInfo() (*checkpointInfo, error) {
   583  	var b []byte
   584  	var err error
   585  	if b, err = mgr.db.Get(blkMgrInfoKey); b == nil || err != nil {
   586  		return nil, err
   587  	}
   588  	i := &checkpointInfo{}
   589  	if err = i.unmarshal(b); err != nil {
   590  		return nil, err
   591  	}
   592  	logger.Debugf("loaded checkpointInfo:%s", i)
   593  	return i, nil
   594  }
   595  
   596  func (mgr *blockfileMgr) saveCurrentInfo(i *checkpointInfo, sync bool) error {
   597  	b, err := i.marshal()
   598  	if err != nil {
   599  		return err
   600  	}
   601  	if err = mgr.db.Put(blkMgrInfoKey, b, sync); err != nil {
   602  		return err
   603  	}
   604  	return nil
   605  }
   606  
   607  // scanForLastCompleteBlock scan a given block file and detects the last offset in the file
   608  // after which there may lie a block partially written (towards the end of the file in a crash scenario).
   609  func scanForLastCompleteBlock(rootDir string, fileNum int, startingOffset int64) ([]byte, int64, int, error) {
   610  	//scan the passed file number suffix starting from the passed offset to find the last completed block
   611  	numBlocks := 0
   612  	var lastBlockBytes []byte
   613  	blockStream, errOpen := newBlockfileStream(rootDir, fileNum, startingOffset)
   614  	if errOpen != nil {
   615  		return nil, 0, 0, errOpen
   616  	}
   617  	defer blockStream.close()
   618  	var errRead error
   619  	var blockBytes []byte
   620  	for {
   621  		blockBytes, errRead = blockStream.nextBlockBytes()
   622  		if blockBytes == nil || errRead != nil {
   623  			break
   624  		}
   625  		lastBlockBytes = blockBytes
   626  		numBlocks++
   627  	}
   628  	if errRead == ErrUnexpectedEndOfBlockfile {
   629  		logger.Debugf(`Error:%s
   630  		The error may happen if a crash has happened during block appending.
   631  		Resetting error to nil and returning current offset as a last complete block's end offset`, errRead)
   632  		errRead = nil
   633  	}
   634  	logger.Debugf("scanForLastCompleteBlock(): last complete block ends at offset=[%d]", blockStream.currentOffset)
   635  	return lastBlockBytes, blockStream.currentOffset, numBlocks, errRead
   636  }
   637  
   638  // checkpointInfo
   639  type checkpointInfo struct {
   640  	latestFileChunkSuffixNum int
   641  	latestFileChunksize      int
   642  	isChainEmpty             bool
   643  	lastBlockNumber          uint64
   644  }
   645  
   646  func (i *checkpointInfo) marshal() ([]byte, error) {
   647  	buffer := proto.NewBuffer([]byte{})
   648  	var err error
   649  	if err = buffer.EncodeVarint(uint64(i.latestFileChunkSuffixNum)); err != nil {
   650  		return nil, errors.Wrapf(err, "error encoding the latestFileChunkSuffixNum [%d]", i.latestFileChunkSuffixNum)
   651  	}
   652  	if err = buffer.EncodeVarint(uint64(i.latestFileChunksize)); err != nil {
   653  		return nil, errors.Wrapf(err, "error encoding the latestFileChunksize [%d]", i.latestFileChunksize)
   654  	}
   655  	if err = buffer.EncodeVarint(i.lastBlockNumber); err != nil {
   656  		return nil, errors.Wrapf(err, "error encoding the lastBlockNumber [%d]", i.lastBlockNumber)
   657  	}
   658  	var chainEmptyMarker uint64
   659  	if i.isChainEmpty {
   660  		chainEmptyMarker = 1
   661  	}
   662  	if err = buffer.EncodeVarint(chainEmptyMarker); err != nil {
   663  		return nil, errors.Wrapf(err, "error encoding chainEmptyMarker [%d]", chainEmptyMarker)
   664  	}
   665  	return buffer.Bytes(), nil
   666  }
   667  
   668  func (i *checkpointInfo) unmarshal(b []byte) error {
   669  	buffer := proto.NewBuffer(b)
   670  	var val uint64
   671  	var chainEmptyMarker uint64
   672  	var err error
   673  
   674  	if val, err = buffer.DecodeVarint(); err != nil {
   675  		return err
   676  	}
   677  	i.latestFileChunkSuffixNum = int(val)
   678  
   679  	if val, err = buffer.DecodeVarint(); err != nil {
   680  		return err
   681  	}
   682  	i.latestFileChunksize = int(val)
   683  
   684  	if val, err = buffer.DecodeVarint(); err != nil {
   685  		return err
   686  	}
   687  	i.lastBlockNumber = val
   688  	if chainEmptyMarker, err = buffer.DecodeVarint(); err != nil {
   689  		return err
   690  	}
   691  	i.isChainEmpty = chainEmptyMarker == 1
   692  	return nil
   693  }
   694  
   695  func (i *checkpointInfo) String() string {
   696  	return fmt.Sprintf("latestFileChunkSuffixNum=[%d], latestFileChunksize=[%d], isChainEmpty=[%t], lastBlockNumber=[%d]",
   697  		i.latestFileChunkSuffixNum, i.latestFileChunksize, i.isChainEmpty, i.lastBlockNumber)
   698  }