github.com/true-sqn/fabric@v2.1.1+incompatible/core/ledger/kvledger/kv_ledger.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package kvledger
     8  
     9  import (
    10  	"fmt"
    11  	"sync"
    12  	"time"
    13  
    14  	"github.com/golang/protobuf/proto"
    15  	"github.com/hyperledger/fabric-protos-go/common"
    16  	"github.com/hyperledger/fabric-protos-go/peer"
    17  	"github.com/hyperledger/fabric/common/flogging"
    18  	commonledger "github.com/hyperledger/fabric/common/ledger"
    19  	"github.com/hyperledger/fabric/common/util"
    20  	"github.com/hyperledger/fabric/core/ledger"
    21  	"github.com/hyperledger/fabric/core/ledger/cceventmgmt"
    22  	"github.com/hyperledger/fabric/core/ledger/confighistory"
    23  	"github.com/hyperledger/fabric/core/ledger/kvledger/bookkeeping"
    24  	"github.com/hyperledger/fabric/core/ledger/kvledger/history"
    25  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
    26  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/txmgr"
    27  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr"
    28  	"github.com/hyperledger/fabric/core/ledger/ledgerstorage"
    29  	"github.com/hyperledger/fabric/core/ledger/pvtdatapolicy"
    30  	lutil "github.com/hyperledger/fabric/core/ledger/util"
    31  	"github.com/hyperledger/fabric/protoutil"
    32  	"github.com/pkg/errors"
    33  )
    34  
    35  var logger = flogging.MustGetLogger("kvledger")
    36  
    37  // kvLedger provides an implementation of `ledger.PeerLedger`.
    38  // This implementation provides a key-value based data model
    39  type kvLedger struct {
    40  	ledgerID               string
    41  	blockStore             *ledgerstorage.Store
    42  	txtmgmt                txmgr.TxMgr
    43  	historyDB              *history.DB
    44  	configHistoryRetriever ledger.ConfigHistoryRetriever
    45  	blockAPIsRWLock        *sync.RWMutex
    46  	stats                  *ledgerStats
    47  	commitHash             []byte
    48  }
    49  
    50  // newKVLedger constructs new `KVLedger`
    51  func newKVLedger(
    52  	ledgerID string,
    53  	blockStore *ledgerstorage.Store,
    54  	versionedDB privacyenabledstate.DB,
    55  	historyDB *history.DB,
    56  	configHistoryMgr confighistory.Mgr,
    57  	stateListeners []ledger.StateListener,
    58  	bookkeeperProvider bookkeeping.Provider,
    59  	ccInfoProvider ledger.DeployedChaincodeInfoProvider,
    60  	ccLifecycleEventProvider ledger.ChaincodeLifecycleEventProvider,
    61  	stats *ledgerStats,
    62  	customTxProcessors map[common.HeaderType]ledger.CustomTxProcessor,
    63  	hasher ledger.Hasher,
    64  ) (*kvLedger, error) {
    65  	logger.Debugf("Creating KVLedger ledgerID=%s: ", ledgerID)
    66  	// Create a kvLedger for this chain/ledger, which encapsulates the underlying
    67  	// id store, blockstore, txmgr (state database), history database
    68  	l := &kvLedger{ledgerID: ledgerID, blockStore: blockStore, historyDB: historyDB, blockAPIsRWLock: &sync.RWMutex{}}
    69  
    70  	btlPolicy := pvtdatapolicy.ConstructBTLPolicy(&collectionInfoRetriever{ledgerID, l, ccInfoProvider})
    71  
    72  	if err := l.initTxMgr(
    73  		versionedDB,
    74  		stateListeners,
    75  		btlPolicy,
    76  		bookkeeperProvider,
    77  		ccInfoProvider,
    78  		customTxProcessors,
    79  		hasher,
    80  	); err != nil {
    81  		return nil, err
    82  	}
    83  
    84  	l.initBlockStore(btlPolicy)
    85  
    86  	// Retrieves the current commit hash from the blockstore
    87  	var err error
    88  	l.commitHash, err = l.lastPersistedCommitHash()
    89  	if err != nil {
    90  		return nil, err
    91  	}
    92  
    93  	// TODO Move the function `GetChaincodeEventListener` to ledger interface and
    94  	// this functionality of registering for events to ledgermgmt package so that this
    95  	// is reused across other future ledger implementations
    96  	ccEventListener := versionedDB.GetChaincodeEventListener()
    97  	logger.Debugf("Register state db for chaincode lifecycle events: %t", ccEventListener != nil)
    98  	if ccEventListener != nil {
    99  		cceventmgmt.GetMgr().Register(ledgerID, ccEventListener)
   100  		ccLifecycleEventProvider.RegisterListener(l.ledgerID, &ccEventListenerAdaptor{ccEventListener})
   101  	}
   102  
   103  	//Recover both state DB and history DB if they are out of sync with block storage
   104  	if err := l.recoverDBs(); err != nil {
   105  		return nil, err
   106  	}
   107  	l.configHistoryRetriever = configHistoryMgr.GetRetriever(ledgerID, l)
   108  
   109  	l.stats = stats
   110  	return l, nil
   111  }
   112  
   113  func (l *kvLedger) initTxMgr(
   114  	versionedDB privacyenabledstate.DB,
   115  	stateListeners []ledger.StateListener,
   116  	btlPolicy pvtdatapolicy.BTLPolicy,
   117  	bookkeeperProvider bookkeeping.Provider,
   118  	ccInfoProvider ledger.DeployedChaincodeInfoProvider,
   119  	customtxProcessors map[common.HeaderType]ledger.CustomTxProcessor,
   120  	hasher ledger.Hasher,
   121  ) error {
   122  	var err error
   123  	txmgr, err := lockbasedtxmgr.NewLockBasedTxMgr(
   124  		l.ledgerID,
   125  		versionedDB,
   126  		stateListeners,
   127  		btlPolicy,
   128  		bookkeeperProvider,
   129  		ccInfoProvider,
   130  		customtxProcessors,
   131  		hasher,
   132  	)
   133  	if err != nil {
   134  		return err
   135  	}
   136  	l.txtmgmt = txmgr
   137  	// This is a workaround for populating lifecycle cache.
   138  	// See comments on this function for deatils
   139  	qe, err := txmgr.NewQueryExecutorNoCollChecks()
   140  	if err != nil {
   141  		return err
   142  	}
   143  	defer qe.Done()
   144  	for _, sl := range stateListeners {
   145  		if err := sl.Initialize(l.ledgerID, qe); err != nil {
   146  			return err
   147  		}
   148  	}
   149  	return err
   150  }
   151  
   152  func (l *kvLedger) initBlockStore(btlPolicy pvtdatapolicy.BTLPolicy) {
   153  	l.blockStore.Init(btlPolicy)
   154  }
   155  
   156  func (l *kvLedger) lastPersistedCommitHash() ([]byte, error) {
   157  	bcInfo, err := l.GetBlockchainInfo()
   158  	if err != nil {
   159  		return nil, err
   160  	}
   161  	if bcInfo.Height == 0 {
   162  		logger.Debugf("Chain is empty")
   163  		return nil, nil
   164  	}
   165  
   166  	logger.Debugf("Fetching block [%d] to retrieve the currentCommitHash", bcInfo.Height-1)
   167  	block, err := l.GetBlockByNumber(bcInfo.Height - 1)
   168  	if err != nil {
   169  		return nil, err
   170  	}
   171  
   172  	if len(block.Metadata.Metadata) < int(common.BlockMetadataIndex_COMMIT_HASH+1) {
   173  		logger.Debugf("Last block metadata does not contain commit hash")
   174  		return nil, nil
   175  	}
   176  
   177  	commitHash := &common.Metadata{}
   178  	err = proto.Unmarshal(block.Metadata.Metadata[common.BlockMetadataIndex_COMMIT_HASH], commitHash)
   179  	if err != nil {
   180  		return nil, errors.Wrap(err, "error unmarshaling last persisted commit hash")
   181  	}
   182  	return commitHash.Value, nil
   183  }
   184  
   185  //Recover the state database and history database (if exist)
   186  //by recommitting last valid blocks
   187  func (l *kvLedger) recoverDBs() error {
   188  	logger.Debugf("Entering recoverDB()")
   189  	if err := l.syncStateAndHistoryDBWithBlockstore(); err != nil {
   190  		return err
   191  	}
   192  	if err := l.syncStateDBWithPvtdatastore(); err != nil {
   193  		return err
   194  	}
   195  	return nil
   196  }
   197  
   198  func (l *kvLedger) syncStateAndHistoryDBWithBlockstore() error {
   199  	//If there is no block in blockstorage, nothing to recover.
   200  	info, _ := l.blockStore.GetBlockchainInfo()
   201  	if info.Height == 0 {
   202  		logger.Debug("Block storage is empty.")
   203  		return nil
   204  	}
   205  	lastAvailableBlockNum := info.Height - 1
   206  	recoverables := []recoverable{l.txtmgmt}
   207  	if l.historyDB != nil {
   208  		recoverables = append(recoverables, l.historyDB)
   209  	}
   210  	recoverers := []*recoverer{}
   211  	for _, recoverable := range recoverables {
   212  		recoverFlag, firstBlockNum, err := recoverable.ShouldRecover(lastAvailableBlockNum)
   213  		if err != nil {
   214  			return err
   215  		}
   216  
   217  		// During ledger reset/rollback, the state database must be dropped. If the state database
   218  		// uses goleveldb, the reset/rollback code itself drop the DB. If it uses couchDB, the
   219  		// DB must be dropped manually. Hence, we compare (only for the stateDB) the height
   220  		// of the state DB and block store to ensure that the state DB is dropped.
   221  
   222  		// firstBlockNum is nothing but the nextBlockNum expected by the state DB.
   223  		// In other words, the firstBlockNum is nothing but the height of stateDB.
   224  		if firstBlockNum > lastAvailableBlockNum+1 {
   225  			dbName := recoverable.Name()
   226  			return fmt.Errorf("the %s database [height=%d] is ahead of the block store [height=%d]. "+
   227  				"This is possible when the %s database is not dropped after a ledger reset/rollback. "+
   228  				"The %s database can safely be dropped and will be rebuilt up to block store height upon the next peer start.",
   229  				dbName, firstBlockNum, lastAvailableBlockNum+1, dbName, dbName)
   230  		}
   231  		if recoverFlag {
   232  			recoverers = append(recoverers, &recoverer{firstBlockNum, recoverable})
   233  		}
   234  	}
   235  	if len(recoverers) == 0 {
   236  		return nil
   237  	}
   238  	if len(recoverers) == 1 {
   239  		return l.recommitLostBlocks(recoverers[0].firstBlockNum, lastAvailableBlockNum, recoverers[0].recoverable)
   240  	}
   241  
   242  	// both dbs need to be recovered
   243  	if recoverers[0].firstBlockNum > recoverers[1].firstBlockNum {
   244  		// swap (put the lagger db at 0 index)
   245  		recoverers[0], recoverers[1] = recoverers[1], recoverers[0]
   246  	}
   247  	if recoverers[0].firstBlockNum != recoverers[1].firstBlockNum {
   248  		// bring the lagger db equal to the other db
   249  		if err := l.recommitLostBlocks(recoverers[0].firstBlockNum, recoverers[1].firstBlockNum-1,
   250  			recoverers[0].recoverable); err != nil {
   251  			return err
   252  		}
   253  	}
   254  	// get both the db upto block storage
   255  	return l.recommitLostBlocks(recoverers[1].firstBlockNum, lastAvailableBlockNum,
   256  		recoverers[0].recoverable, recoverers[1].recoverable)
   257  }
   258  
   259  func (l *kvLedger) syncStateDBWithPvtdatastore() error {
   260  	// TODO: So far, the design philosophy was that the scope of block storage is
   261  	// limited to storing and retrieving blocks data with certain guarantees and statedb is
   262  	// for the state management. The higher layer, 'kvledger', coordinates the acts between
   263  	// the two. However, with maintaining the state of the consumption of blocks (i.e,
   264  	// lastUpdatedOldBlockList for pvtstore reconciliation) within private data block storage
   265  	// breaks that assumption. The knowledge of what blocks have been consumed for the purpose
   266  	// of state update should not lie with the source (i.e., pvtdatastorage). A potential fix
   267  	// is mentioned in FAB-12731
   268  
   269  	blocksPvtData, err := l.blockStore.GetLastUpdatedOldBlocksPvtData()
   270  	if err != nil {
   271  		return err
   272  	}
   273  
   274  	// as the pvtdataStore can contain pvtData of yet to be committed blocks,
   275  	// we need to filter them before passing it to the transaction manager for
   276  	// stateDB updates.
   277  	if err := l.filterYetToCommitBlocks(blocksPvtData); err != nil {
   278  		return err
   279  	}
   280  
   281  	if err = l.applyValidTxPvtDataOfOldBlocks(blocksPvtData); err != nil {
   282  		return err
   283  	}
   284  
   285  	l.blockStore.ResetLastUpdatedOldBlocksList()
   286  
   287  	return nil
   288  }
   289  
   290  func (l *kvLedger) filterYetToCommitBlocks(blocksPvtData map[uint64][]*ledger.TxPvtData) error {
   291  	info, err := l.blockStore.GetBlockchainInfo()
   292  	if err != nil {
   293  		return err
   294  	}
   295  	for blkNum := range blocksPvtData {
   296  		if blkNum > info.Height-1 {
   297  			logger.Infof("found pvtdata associated with yet to be committed block [%d]", blkNum)
   298  			delete(blocksPvtData, blkNum)
   299  		}
   300  	}
   301  	return nil
   302  }
   303  
   304  //recommitLostBlocks retrieves blocks in specified range and commit the write set to either
   305  //state DB or history DB or both
   306  func (l *kvLedger) recommitLostBlocks(firstBlockNum uint64, lastBlockNum uint64, recoverables ...recoverable) error {
   307  	logger.Infof("Recommitting lost blocks - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables)
   308  	var err error
   309  	var blockAndPvtdata *ledger.BlockAndPvtData
   310  	for blockNumber := firstBlockNum; blockNumber <= lastBlockNum; blockNumber++ {
   311  		if blockAndPvtdata, err = l.GetPvtDataAndBlockByNum(blockNumber, nil); err != nil {
   312  			return err
   313  		}
   314  		for _, r := range recoverables {
   315  			if err := r.CommitLostBlock(blockAndPvtdata); err != nil {
   316  				return err
   317  			}
   318  		}
   319  	}
   320  	logger.Infof("Recommitted lost blocks - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables)
   321  	return nil
   322  }
   323  
   324  // GetTransactionByID retrieves a transaction by id
   325  func (l *kvLedger) GetTransactionByID(txID string) (*peer.ProcessedTransaction, error) {
   326  	tranEnv, err := l.blockStore.RetrieveTxByID(txID)
   327  	if err != nil {
   328  		return nil, err
   329  	}
   330  	txVResult, err := l.blockStore.RetrieveTxValidationCodeByTxID(txID)
   331  	if err != nil {
   332  		return nil, err
   333  	}
   334  	processedTran := &peer.ProcessedTransaction{TransactionEnvelope: tranEnv, ValidationCode: int32(txVResult)}
   335  	l.blockAPIsRWLock.RLock()
   336  	l.blockAPIsRWLock.RUnlock()
   337  	return processedTran, nil
   338  }
   339  
   340  // GetBlockchainInfo returns basic info about blockchain
   341  func (l *kvLedger) GetBlockchainInfo() (*common.BlockchainInfo, error) {
   342  	bcInfo, err := l.blockStore.GetBlockchainInfo()
   343  	l.blockAPIsRWLock.RLock()
   344  	defer l.blockAPIsRWLock.RUnlock()
   345  	return bcInfo, err
   346  }
   347  
   348  // GetBlockByNumber returns block at a given height
   349  // blockNumber of  math.MaxUint64 will return last block
   350  func (l *kvLedger) GetBlockByNumber(blockNumber uint64) (*common.Block, error) {
   351  	block, err := l.blockStore.RetrieveBlockByNumber(blockNumber)
   352  	l.blockAPIsRWLock.RLock()
   353  	l.blockAPIsRWLock.RUnlock()
   354  	return block, err
   355  }
   356  
   357  // GetBlocksIterator returns an iterator that starts from `startBlockNumber`(inclusive).
   358  // The iterator is a blocking iterator i.e., it blocks till the next block gets available in the ledger
   359  // ResultsIterator contains type BlockHolder
   360  func (l *kvLedger) GetBlocksIterator(startBlockNumber uint64) (commonledger.ResultsIterator, error) {
   361  	blkItr, err := l.blockStore.RetrieveBlocks(startBlockNumber)
   362  	if err != nil {
   363  		return nil, err
   364  	}
   365  	return &blocksItr{l.blockAPIsRWLock, blkItr}, nil
   366  }
   367  
   368  // GetBlockByHash returns a block given it's hash
   369  func (l *kvLedger) GetBlockByHash(blockHash []byte) (*common.Block, error) {
   370  	block, err := l.blockStore.RetrieveBlockByHash(blockHash)
   371  	l.blockAPIsRWLock.RLock()
   372  	l.blockAPIsRWLock.RUnlock()
   373  	return block, err
   374  }
   375  
   376  // GetBlockByTxID returns a block which contains a transaction
   377  func (l *kvLedger) GetBlockByTxID(txID string) (*common.Block, error) {
   378  	block, err := l.blockStore.RetrieveBlockByTxID(txID)
   379  	l.blockAPIsRWLock.RLock()
   380  	l.blockAPIsRWLock.RUnlock()
   381  	return block, err
   382  }
   383  
   384  func (l *kvLedger) GetTxValidationCodeByTxID(txID string) (peer.TxValidationCode, error) {
   385  	txValidationCode, err := l.blockStore.RetrieveTxValidationCodeByTxID(txID)
   386  	l.blockAPIsRWLock.RLock()
   387  	l.blockAPIsRWLock.RUnlock()
   388  	return txValidationCode, err
   389  }
   390  
   391  // NewTxSimulator returns new `ledger.TxSimulator`
   392  func (l *kvLedger) NewTxSimulator(txid string) (ledger.TxSimulator, error) {
   393  	return l.txtmgmt.NewTxSimulator(txid)
   394  }
   395  
   396  // NewQueryExecutor gives handle to a query executor.
   397  // A client can obtain more than one 'QueryExecutor's for parallel execution.
   398  // Any synchronization should be performed at the implementation level if required
   399  func (l *kvLedger) NewQueryExecutor() (ledger.QueryExecutor, error) {
   400  	return l.txtmgmt.NewQueryExecutor(util.GenerateUUID())
   401  }
   402  
   403  // NewHistoryQueryExecutor gives handle to a history query executor.
   404  // A client can obtain more than one 'HistoryQueryExecutor's for parallel execution.
   405  // Any synchronization should be performed at the implementation level if required
   406  // Pass the ledger blockstore so that historical values can be looked up from the chain
   407  func (l *kvLedger) NewHistoryQueryExecutor() (ledger.HistoryQueryExecutor, error) {
   408  	if l.historyDB != nil {
   409  		return l.historyDB.NewQueryExecutor(l.blockStore)
   410  	}
   411  	return nil, nil
   412  }
   413  
   414  // CommitLegacy commits the block and the corresponding pvt data in an atomic operation
   415  func (l *kvLedger) CommitLegacy(pvtdataAndBlock *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   416  	var err error
   417  	block := pvtdataAndBlock.Block
   418  	blockNo := pvtdataAndBlock.Block.Header.Number
   419  
   420  	startBlockProcessing := time.Now()
   421  	if commitOpts.FetchPvtDataFromLedger {
   422  		// when we reach here, it means that the pvtdata store has the
   423  		// pvtdata associated with this block but the stateDB might not
   424  		// have it. During the commit of this block, no update would
   425  		// happen in the pvtdata store as it already has the required data.
   426  
   427  		// if there is any missing pvtData, reconciler will fetch them
   428  		// and update both the pvtdataStore and stateDB. Hence, we can
   429  		// fetch what is available in the pvtDataStore. If any or
   430  		// all of the pvtdata associated with the block got expired
   431  		// and no longer available in pvtdataStore, eventually these
   432  		// pvtdata would get expired in the stateDB as well (though it
   433  		// would miss the pvtData until then)
   434  		txPvtData, err := l.blockStore.GetPvtDataByNum(blockNo, nil)
   435  		if err != nil {
   436  			return err
   437  		}
   438  		pvtdataAndBlock.PvtData = convertTxPvtDataArrayToMap(txPvtData)
   439  	}
   440  
   441  	logger.Debugf("[%s] Validating state for block [%d]", l.ledgerID, blockNo)
   442  	txstatsInfo, updateBatchBytes, err := l.txtmgmt.ValidateAndPrepare(pvtdataAndBlock, true)
   443  	if err != nil {
   444  		return err
   445  	}
   446  	elapsedBlockProcessing := time.Since(startBlockProcessing)
   447  
   448  	startBlockstorageAndPvtdataCommit := time.Now()
   449  	logger.Debugf("[%s] Adding CommitHash to the block [%d]", l.ledgerID, blockNo)
   450  	// we need to ensure that only after a genesis block, commitHash is computed
   451  	// and added to the block. In other words, only after joining a new channel
   452  	// or peer reset, the commitHash would be added to the block
   453  	if block.Header.Number == 1 || l.commitHash != nil {
   454  		l.addBlockCommitHash(pvtdataAndBlock.Block, updateBatchBytes)
   455  	}
   456  
   457  	logger.Debugf("[%s] Committing block [%d] to storage", l.ledgerID, blockNo)
   458  	l.blockAPIsRWLock.Lock()
   459  	defer l.blockAPIsRWLock.Unlock()
   460  	if err = l.blockStore.CommitWithPvtData(pvtdataAndBlock); err != nil {
   461  		return err
   462  	}
   463  	elapsedBlockstorageAndPvtdataCommit := time.Since(startBlockstorageAndPvtdataCommit)
   464  
   465  	startCommitState := time.Now()
   466  	logger.Debugf("[%s] Committing block [%d] transactions to state database", l.ledgerID, blockNo)
   467  	if err = l.txtmgmt.Commit(); err != nil {
   468  		panic(errors.WithMessage(err, "error during commit to txmgr"))
   469  	}
   470  	elapsedCommitState := time.Since(startCommitState)
   471  
   472  	// History database could be written in parallel with state and/or async as a future optimization,
   473  	// although it has not been a bottleneck...no need to clutter the log with elapsed duration.
   474  	if l.historyDB != nil {
   475  		logger.Debugf("[%s] Committing block [%d] transactions to history database", l.ledgerID, blockNo)
   476  		if err := l.historyDB.Commit(block); err != nil {
   477  			panic(errors.WithMessage(err, "Error during commit to history db"))
   478  		}
   479  	}
   480  
   481  	logger.Infof("[%s] Committed block [%d] with %d transaction(s) in %dms (state_validation=%dms block_and_pvtdata_commit=%dms state_commit=%dms)"+
   482  		" commitHash=[%x]",
   483  		l.ledgerID, block.Header.Number, len(block.Data.Data),
   484  		time.Since(startBlockProcessing)/time.Millisecond,
   485  		elapsedBlockProcessing/time.Millisecond,
   486  		elapsedBlockstorageAndPvtdataCommit/time.Millisecond,
   487  		elapsedCommitState/time.Millisecond,
   488  		l.commitHash,
   489  	)
   490  	l.updateBlockStats(
   491  		elapsedBlockProcessing,
   492  		elapsedBlockstorageAndPvtdataCommit,
   493  		elapsedCommitState,
   494  		txstatsInfo,
   495  	)
   496  	return nil
   497  }
   498  
   499  func convertTxPvtDataArrayToMap(txPvtData []*ledger.TxPvtData) ledger.TxPvtDataMap {
   500  	txPvtDataMap := make(ledger.TxPvtDataMap)
   501  	for _, pvtData := range txPvtData {
   502  		txPvtDataMap[pvtData.SeqInBlock] = pvtData
   503  	}
   504  	return txPvtDataMap
   505  }
   506  
   507  func (l *kvLedger) updateBlockStats(
   508  	blockProcessingTime time.Duration,
   509  	blockstorageAndPvtdataCommitTime time.Duration,
   510  	statedbCommitTime time.Duration,
   511  	txstatsInfo []*txmgr.TxStatInfo,
   512  ) {
   513  	l.stats.updateBlockProcessingTime(blockProcessingTime)
   514  	l.stats.updateBlockstorageAndPvtdataCommitTime(blockstorageAndPvtdataCommitTime)
   515  	l.stats.updateStatedbCommitTime(statedbCommitTime)
   516  	l.stats.updateTransactionsStats(txstatsInfo)
   517  }
   518  
   519  // GetMissingPvtDataInfoForMostRecentBlocks returns the missing private data information for the
   520  // most recent `maxBlock` blocks which miss at least a private data of a eligible collection.
   521  func (l *kvLedger) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.MissingPvtDataInfo, error) {
   522  	// the missing pvtData info in the pvtdataStore could belong to a block which is yet
   523  	// to be processed and committed to the blockStore and stateDB.
   524  	// In such cases, we cannot return missing pvtData info. Otherwise, we would end up in
   525  	// an inconsistent state database.
   526  	if l.blockStore.IsPvtStoreAheadOfBlockStore() {
   527  		return nil, nil
   528  	}
   529  	return l.blockStore.GetMissingPvtDataInfoForMostRecentBlocks(maxBlock)
   530  }
   531  
   532  func (l *kvLedger) addBlockCommitHash(block *common.Block, updateBatchBytes []byte) {
   533  	var valueBytes []byte
   534  
   535  	txValidationCode := block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]
   536  	valueBytes = append(valueBytes, proto.EncodeVarint(uint64(len(txValidationCode)))...)
   537  	valueBytes = append(valueBytes, txValidationCode...)
   538  	valueBytes = append(valueBytes, updateBatchBytes...)
   539  	valueBytes = append(valueBytes, l.commitHash...)
   540  
   541  	l.commitHash = util.ComputeSHA256(valueBytes)
   542  	block.Metadata.Metadata[common.BlockMetadataIndex_COMMIT_HASH] = protoutil.MarshalOrPanic(&common.Metadata{Value: l.commitHash})
   543  }
   544  
   545  // GetPvtDataAndBlockByNum returns the block and the corresponding pvt data.
   546  // The pvt data is filtered by the list of 'collections' supplied
   547  func (l *kvLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   548  	blockAndPvtdata, err := l.blockStore.GetPvtDataAndBlockByNum(blockNum, filter)
   549  	l.blockAPIsRWLock.RLock()
   550  	l.blockAPIsRWLock.RUnlock()
   551  	return blockAndPvtdata, err
   552  }
   553  
   554  // GetPvtDataByNum returns only the pvt data  corresponding to the given block number
   555  // The pvt data is filtered by the list of 'collections' supplied
   556  func (l *kvLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   557  	pvtdata, err := l.blockStore.GetPvtDataByNum(blockNum, filter)
   558  	l.blockAPIsRWLock.RLock()
   559  	l.blockAPIsRWLock.RUnlock()
   560  	return pvtdata, err
   561  }
   562  
   563  // DoesPvtDataInfoExist returns true when
   564  // (1) the ledger has pvtdata associated with the given block number (or)
   565  // (2) a few or all pvtdata associated with the given block number is missing but the
   566  //     missing info is recorded in the ledger (or)
   567  // (3) the block is committed does not contain any pvtData.
   568  func (l *kvLedger) DoesPvtDataInfoExist(blockNum uint64) (bool, error) {
   569  	return l.blockStore.DoesPvtDataInfoExist(blockNum)
   570  }
   571  
   572  func (l *kvLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   573  	return l.configHistoryRetriever, nil
   574  }
   575  
   576  func (l *kvLedger) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata) ([]*ledger.PvtdataHashMismatch, error) {
   577  	logger.Debugf("[%s:] Comparing pvtData of [%d] old blocks against the hashes in transaction's rwset to find valid and invalid data",
   578  		l.ledgerID, len(reconciledPvtdata))
   579  
   580  	hashVerifiedPvtData, hashMismatches, err := constructValidAndInvalidPvtData(reconciledPvtdata, l.blockStore)
   581  	if err != nil {
   582  		return nil, err
   583  	}
   584  
   585  	err = l.applyValidTxPvtDataOfOldBlocks(hashVerifiedPvtData)
   586  	if err != nil {
   587  		return nil, err
   588  	}
   589  
   590  	logger.Debugf("[%s:] Committing pvtData of [%d] old blocks to the pvtdatastore", l.ledgerID, len(reconciledPvtdata))
   591  	err = l.blockStore.CommitPvtDataOfOldBlocks(hashVerifiedPvtData)
   592  	if err != nil {
   593  		return nil, err
   594  	}
   595  
   596  	return hashMismatches, nil
   597  }
   598  
   599  func (l *kvLedger) applyValidTxPvtDataOfOldBlocks(hashVerifiedPvtData map[uint64][]*ledger.TxPvtData) error {
   600  	logger.Debugf("[%s:] Filtering pvtData of invalidation transactions", l.ledgerID)
   601  	committedPvtData, err := filterPvtDataOfInvalidTx(hashVerifiedPvtData, l.blockStore)
   602  	if err != nil {
   603  		return err
   604  	}
   605  
   606  	// Assume the peer fails after storing the pvtData of old block in the stateDB but before
   607  	// storing it in block store. When the peer starts again, the reconciler finds that the
   608  	// pvtData is missing in the ledger store and hence, it would fetch those data again. As
   609  	// a result, RemoveStaleAndCommitPvtDataOfOldBlocks gets already existing data. In this
   610  	// scenario, RemoveStaleAndCommitPvtDataOfOldBlocks just replaces the old entry as we
   611  	// always makes the comparison between hashed version and this pvtData. There is no
   612  	// problem in terms of data consistency. However, if the reconciler is disabled before
   613  	// the peer restart, then the pvtData in stateDB may not be in sync with the pvtData in
   614  	// ledger store till the reconciler is enabled.
   615  	logger.Debugf("[%s:] Committing pvtData of [%d] old blocks to the stateDB", l.ledgerID, len(hashVerifiedPvtData))
   616  	return l.txtmgmt.RemoveStaleAndCommitPvtDataOfOldBlocks(committedPvtData)
   617  }
   618  
   619  func (l *kvLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   620  	return l, nil
   621  }
   622  
   623  // Close closes `KVLedger`
   624  func (l *kvLedger) Close() {
   625  	l.blockStore.Shutdown()
   626  	l.txtmgmt.Shutdown()
   627  }
   628  
   629  type blocksItr struct {
   630  	blockAPIsRWLock *sync.RWMutex
   631  	blocksItr       commonledger.ResultsIterator
   632  }
   633  
   634  func (itr *blocksItr) Next() (commonledger.QueryResult, error) {
   635  	block, err := itr.blocksItr.Next()
   636  	if err != nil {
   637  		return nil, err
   638  	}
   639  	itr.blockAPIsRWLock.RLock()
   640  	itr.blockAPIsRWLock.RUnlock()
   641  	return block, nil
   642  }
   643  
   644  func (itr *blocksItr) Close() {
   645  	itr.blocksItr.Close()
   646  }
   647  
   648  type collectionInfoRetriever struct {
   649  	ledgerID     string
   650  	ledger       ledger.PeerLedger
   651  	infoProvider ledger.DeployedChaincodeInfoProvider
   652  }
   653  
   654  func (r *collectionInfoRetriever) CollectionInfo(chaincodeName, collectionName string) (*peer.StaticCollectionConfig, error) {
   655  	qe, err := r.ledger.NewQueryExecutor()
   656  	if err != nil {
   657  		return nil, err
   658  	}
   659  	defer qe.Done()
   660  	return r.infoProvider.CollectionInfo(r.ledgerID, chaincodeName, collectionName, qe)
   661  }
   662  
   663  type ccEventListenerAdaptor struct {
   664  	legacyEventListener cceventmgmt.ChaincodeLifecycleEventListener
   665  }
   666  
   667  func (a *ccEventListenerAdaptor) HandleChaincodeDeploy(chaincodeDefinition *ledger.ChaincodeDefinition, dbArtifactsTar []byte) error {
   668  	return a.legacyEventListener.HandleChaincodeDeploy(&cceventmgmt.ChaincodeDefinition{
   669  		Name:              chaincodeDefinition.Name,
   670  		Hash:              chaincodeDefinition.Hash,
   671  		Version:           chaincodeDefinition.Version,
   672  		CollectionConfigs: chaincodeDefinition.CollectionConfigs,
   673  	},
   674  		dbArtifactsTar,
   675  	)
   676  }
   677  
   678  func (a *ccEventListenerAdaptor) ChaincodeDeployDone(succeeded bool) {
   679  	a.legacyEventListener.ChaincodeDeployDone(succeeded)
   680  }
   681  
   682  func filterPvtDataOfInvalidTx(hashVerifiedPvtData map[uint64][]*ledger.TxPvtData, blockStore *ledgerstorage.Store) (map[uint64][]*ledger.TxPvtData, error) {
   683  	committedPvtData := make(map[uint64][]*ledger.TxPvtData)
   684  	for blkNum, txsPvtData := range hashVerifiedPvtData {
   685  
   686  		// TODO: Instead of retrieving the whole block, we need to retrieve only
   687  		// the TxValidationFlags from the block metadata. For that, we would need
   688  		// to add a new index for the block metadata. FAB- FAB-15808
   689  		block, err := blockStore.RetrieveBlockByNumber(blkNum)
   690  		if err != nil {
   691  			return nil, err
   692  		}
   693  		blockValidationFlags := lutil.TxValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
   694  
   695  		var blksPvtData []*ledger.TxPvtData
   696  		for _, pvtData := range txsPvtData {
   697  			if blockValidationFlags.IsValid(int(pvtData.SeqInBlock)) {
   698  				blksPvtData = append(blksPvtData, pvtData)
   699  			}
   700  		}
   701  		committedPvtData[blkNum] = blksPvtData
   702  	}
   703  	return committedPvtData, nil
   704  }