github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/core/ledger/kvledger/kv_ledger.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package kvledger
     8  
     9  import (
    10  	"fmt"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/golang/protobuf/proto"
    16  	"github.com/hyperledger/fabric-protos-go/common"
    17  	"github.com/hyperledger/fabric-protos-go/peer"
    18  	"github.com/osdi23p228/fabric/bccsp"
    19  	"github.com/osdi23p228/fabric/common/flogging"
    20  	commonledger "github.com/osdi23p228/fabric/common/ledger"
    21  	"github.com/osdi23p228/fabric/common/ledger/blkstorage"
    22  	"github.com/osdi23p228/fabric/common/util"
    23  	"github.com/osdi23p228/fabric/core/ledger"
    24  	"github.com/osdi23p228/fabric/core/ledger/cceventmgmt"
    25  	"github.com/osdi23p228/fabric/core/ledger/confighistory"
    26  	"github.com/osdi23p228/fabric/core/ledger/kvledger/bookkeeping"
    27  	"github.com/osdi23p228/fabric/core/ledger/kvledger/history"
    28  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
    29  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/txmgr"
    30  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/validation"
    31  	"github.com/osdi23p228/fabric/core/ledger/pvtdatapolicy"
    32  	"github.com/osdi23p228/fabric/core/ledger/pvtdatastorage"
    33  	"github.com/osdi23p228/fabric/internal/pkg/txflags"
    34  	"github.com/osdi23p228/fabric/protoutil"
    35  	"github.com/pkg/errors"
    36  )
    37  
    38  var logger = flogging.MustGetLogger("kvledger")
    39  
    40  var (
    41  	rwsetHashOpts    = &bccsp.SHA256Opts{}
    42  	snapshotHashOpts = &bccsp.SHA256Opts{}
    43  )
    44  
    45  // kvLedger provides an implementation of `ledger.PeerLedger`.
    46  // This implementation provides a key-value based data model
    47  type kvLedger struct {
    48  	ledgerID               string
    49  	blockStore             *blkstorage.BlockStore
    50  	pvtdataStore           *pvtdatastorage.Store
    51  	txmgr                  *txmgr.LockBasedTxMgr
    52  	historyDB              *history.DB
    53  	configHistoryRetriever *confighistory.Retriever
    54  	blockAPIsRWLock        *sync.RWMutex
    55  	stats                  *ledgerStats
    56  	commitHash             []byte
    57  	hashProvider           ledger.HashProvider
    58  	snapshotsConfig        *ledger.SnapshotsConfig
    59  	// isPvtDataStoreAheadOfBlockStore is read during missing pvtData
    60  	// reconciliation and may be updated during a regular block commit.
    61  	// Hence, we use atomic value to ensure consistent read.
    62  	isPvtstoreAheadOfBlkstore atomic.Value
    63  }
    64  
    65  type lgrInitializer struct {
    66  	ledgerID                 string
    67  	blockStore               *blkstorage.BlockStore
    68  	pvtdataStore             *pvtdatastorage.Store
    69  	stateDB                  *privacyenabledstate.DB
    70  	historyDB                *history.DB
    71  	configHistoryMgr         *confighistory.Mgr
    72  	stateListeners           []ledger.StateListener
    73  	bookkeeperProvider       bookkeeping.Provider
    74  	ccInfoProvider           ledger.DeployedChaincodeInfoProvider
    75  	ccLifecycleEventProvider ledger.ChaincodeLifecycleEventProvider
    76  	stats                    *ledgerStats
    77  	customTxProcessors       map[common.HeaderType]ledger.CustomTxProcessor
    78  	hashProvider             ledger.HashProvider
    79  	snapshotsConfig          *ledger.SnapshotsConfig
    80  }
    81  
    82  func newKVLedger(initializer *lgrInitializer) (*kvLedger, error) {
    83  	ledgerID := initializer.ledgerID
    84  	logger.Debugf("Creating KVLedger ledgerID=%s: ", ledgerID)
    85  	l := &kvLedger{
    86  		ledgerID:        ledgerID,
    87  		blockStore:      initializer.blockStore,
    88  		pvtdataStore:    initializer.pvtdataStore,
    89  		historyDB:       initializer.historyDB,
    90  		hashProvider:    initializer.hashProvider,
    91  		snapshotsConfig: initializer.snapshotsConfig,
    92  		blockAPIsRWLock: &sync.RWMutex{},
    93  	}
    94  
    95  	btlPolicy := pvtdatapolicy.ConstructBTLPolicy(&collectionInfoRetriever{ledgerID, l, initializer.ccInfoProvider})
    96  
    97  	rwsetHashFunc := func(data []byte) ([]byte, error) {
    98  		hash, err := initializer.hashProvider.GetHash(rwsetHashOpts)
    99  		if err != nil {
   100  			return nil, err
   101  		}
   102  		if _, err = hash.Write(data); err != nil {
   103  			return nil, err
   104  		}
   105  		return hash.Sum(nil), nil
   106  	}
   107  
   108  	txmgrInitializer := &txmgr.Initializer{
   109  		LedgerID:            ledgerID,
   110  		DB:                  initializer.stateDB,
   111  		StateListeners:      initializer.stateListeners,
   112  		BtlPolicy:           btlPolicy,
   113  		BookkeepingProvider: initializer.bookkeeperProvider,
   114  		CCInfoProvider:      initializer.ccInfoProvider,
   115  		CustomTxProcessors:  initializer.customTxProcessors,
   116  		HashFunc:            rwsetHashFunc,
   117  	}
   118  	if err := l.initTxMgr(txmgrInitializer); err != nil {
   119  		return nil, err
   120  	}
   121  
   122  	// btlPolicy internally uses queryexecuter and indirectly ends up using txmgr.
   123  	// Hence, we need to init the pvtdataStore once the txmgr is initiated.
   124  	l.pvtdataStore.Init(btlPolicy)
   125  
   126  	var err error
   127  	l.commitHash, err = l.lastPersistedCommitHash()
   128  	if err != nil {
   129  		return nil, err
   130  	}
   131  
   132  	isAhead, err := l.isPvtDataStoreAheadOfBlockStore()
   133  	if err != nil {
   134  		return nil, err
   135  	}
   136  	l.isPvtstoreAheadOfBlkstore.Store(isAhead)
   137  
   138  	// TODO Move the function `GetChaincodeEventListener` to ledger interface and
   139  	// this functionality of registering for events to ledgermgmt package so that this
   140  	// is reused across other future ledger implementations
   141  	ccEventListener := initializer.stateDB.GetChaincodeEventListener()
   142  	logger.Debugf("Register state db for chaincode lifecycle events: %t", ccEventListener != nil)
   143  	if ccEventListener != nil {
   144  		cceventmgmt.GetMgr().Register(ledgerID, ccEventListener)
   145  		initializer.ccLifecycleEventProvider.RegisterListener(ledgerID, &ccEventListenerAdaptor{ccEventListener})
   146  	}
   147  
   148  	//Recover both state DB and history DB if they are out of sync with block storage
   149  	if err := l.recoverDBs(); err != nil {
   150  		return nil, err
   151  	}
   152  	l.configHistoryRetriever = initializer.configHistoryMgr.GetRetriever(ledgerID, l)
   153  
   154  	l.stats = initializer.stats
   155  	return l, nil
   156  }
   157  
   158  func (l *kvLedger) initTxMgr(initializer *txmgr.Initializer) error {
   159  	var err error
   160  	txmgr, err := txmgr.NewLockBasedTxMgr(initializer)
   161  	if err != nil {
   162  		return err
   163  	}
   164  	l.txmgr = txmgr
   165  	// This is a workaround for populating lifecycle cache.
   166  	// See comments on this function for details
   167  	qe, err := txmgr.NewQueryExecutorNoCollChecks()
   168  	if err != nil {
   169  		return err
   170  	}
   171  	defer qe.Done()
   172  	for _, sl := range initializer.StateListeners {
   173  		if err := sl.Initialize(l.ledgerID, qe); err != nil {
   174  			return err
   175  		}
   176  	}
   177  	return err
   178  }
   179  
   180  func (l *kvLedger) lastPersistedCommitHash() ([]byte, error) {
   181  	bcInfo, err := l.GetBlockchainInfo()
   182  	if err != nil {
   183  		return nil, err
   184  	}
   185  	if bcInfo.Height == 0 {
   186  		logger.Debugf("Chain is empty")
   187  		return nil, nil
   188  	}
   189  
   190  	logger.Debugf("Fetching block [%d] to retrieve the currentCommitHash", bcInfo.Height-1)
   191  	block, err := l.GetBlockByNumber(bcInfo.Height - 1)
   192  	if err != nil {
   193  		return nil, err
   194  	}
   195  
   196  	if len(block.Metadata.Metadata) < int(common.BlockMetadataIndex_COMMIT_HASH+1) {
   197  		logger.Debugf("Last block metadata does not contain commit hash")
   198  		return nil, nil
   199  	}
   200  
   201  	commitHash := &common.Metadata{}
   202  	err = proto.Unmarshal(block.Metadata.Metadata[common.BlockMetadataIndex_COMMIT_HASH], commitHash)
   203  	if err != nil {
   204  		return nil, errors.Wrap(err, "error unmarshaling last persisted commit hash")
   205  	}
   206  	return commitHash.Value, nil
   207  }
   208  
   209  func (l *kvLedger) isPvtDataStoreAheadOfBlockStore() (bool, error) {
   210  	blockStoreInfo, err := l.blockStore.GetBlockchainInfo()
   211  	if err != nil {
   212  		return false, err
   213  	}
   214  	pvtstoreHeight, err := l.pvtdataStore.LastCommittedBlockHeight()
   215  	if err != nil {
   216  		return false, err
   217  	}
   218  	return pvtstoreHeight > blockStoreInfo.Height, nil
   219  }
   220  
   221  func (l *kvLedger) recoverDBs() error {
   222  	logger.Debugf("Entering recoverDB()")
   223  	if err := l.syncStateAndHistoryDBWithBlockstore(); err != nil {
   224  		return err
   225  	}
   226  	if err := l.syncStateDBWithOldBlkPvtdata(); err != nil {
   227  		return err
   228  	}
   229  	return nil
   230  }
   231  
   232  func (l *kvLedger) syncStateAndHistoryDBWithBlockstore() error {
   233  	//If there is no block in blockstorage, nothing to recover.
   234  	info, _ := l.blockStore.GetBlockchainInfo()
   235  	if info.Height == 0 {
   236  		logger.Debug("Block storage is empty.")
   237  		return nil
   238  	}
   239  	lastAvailableBlockNum := info.Height - 1
   240  	recoverables := []recoverable{l.txmgr}
   241  	if l.historyDB != nil {
   242  		recoverables = append(recoverables, l.historyDB)
   243  	}
   244  	recoverers := []*recoverer{}
   245  	for _, recoverable := range recoverables {
   246  		recoverFlag, firstBlockNum, err := recoverable.ShouldRecover(lastAvailableBlockNum)
   247  		if err != nil {
   248  			return err
   249  		}
   250  
   251  		// During ledger reset/rollback, the state database must be dropped. If the state database
   252  		// uses goleveldb, the reset/rollback code itself drop the DB. If it uses couchDB, the
   253  		// DB must be dropped manually. Hence, we compare (only for the stateDB) the height
   254  		// of the state DB and block store to ensure that the state DB is dropped.
   255  
   256  		// firstBlockNum is nothing but the nextBlockNum expected by the state DB.
   257  		// In other words, the firstBlockNum is nothing but the height of stateDB.
   258  		if firstBlockNum > lastAvailableBlockNum+1 {
   259  			dbName := recoverable.Name()
   260  			return fmt.Errorf("the %s database [height=%d] is ahead of the block store [height=%d]. "+
   261  				"This is possible when the %s database is not dropped after a ledger reset/rollback. "+
   262  				"The %s database can safely be dropped and will be rebuilt up to block store height upon the next peer start.",
   263  				dbName, firstBlockNum, lastAvailableBlockNum+1, dbName, dbName)
   264  		}
   265  		if recoverFlag {
   266  			recoverers = append(recoverers, &recoverer{firstBlockNum, recoverable})
   267  		}
   268  	}
   269  	if len(recoverers) == 0 {
   270  		return nil
   271  	}
   272  	if len(recoverers) == 1 {
   273  		return l.recommitLostBlocks(recoverers[0].firstBlockNum, lastAvailableBlockNum, recoverers[0].recoverable)
   274  	}
   275  
   276  	// both dbs need to be recovered
   277  	if recoverers[0].firstBlockNum > recoverers[1].firstBlockNum {
   278  		// swap (put the lagger db at 0 index)
   279  		recoverers[0], recoverers[1] = recoverers[1], recoverers[0]
   280  	}
   281  	if recoverers[0].firstBlockNum != recoverers[1].firstBlockNum {
   282  		// bring the lagger db equal to the other db
   283  		if err := l.recommitLostBlocks(recoverers[0].firstBlockNum, recoverers[1].firstBlockNum-1,
   284  			recoverers[0].recoverable); err != nil {
   285  			return err
   286  		}
   287  	}
   288  	// get both the db upto block storage
   289  	return l.recommitLostBlocks(recoverers[1].firstBlockNum, lastAvailableBlockNum,
   290  		recoverers[0].recoverable, recoverers[1].recoverable)
   291  }
   292  
   293  func (l *kvLedger) syncStateDBWithOldBlkPvtdata() error {
   294  	// TODO: syncStateDBWithOldBlkPvtdata, GetLastUpdatedOldBlocksPvtData(),
   295  	// and ResetLastUpdatedOldBlocksList() can be removed in > v2 LTS.
   296  	// From v2.0 onwards, we do not store the last updatedBlksList.
   297  	// Only to support the rolling upgrade from v14 LTS to v2 LTS, we
   298  	// retain these three functions in v2.0 - FAB-16294.
   299  
   300  	blocksPvtData, err := l.pvtdataStore.GetLastUpdatedOldBlocksPvtData()
   301  	if err != nil {
   302  		return err
   303  	}
   304  
   305  	// Assume that the peer has restarted after a rollback or a reset.
   306  	// As the pvtdataStore can contain pvtData of yet to be committed blocks,
   307  	// we need to filter them before passing it to the transaction manager
   308  	// for stateDB updates.
   309  	if err := l.filterYetToCommitBlocks(blocksPvtData); err != nil {
   310  		return err
   311  	}
   312  
   313  	if err = l.applyValidTxPvtDataOfOldBlocks(blocksPvtData); err != nil {
   314  		return err
   315  	}
   316  
   317  	return l.pvtdataStore.ResetLastUpdatedOldBlocksList()
   318  }
   319  
   320  func (l *kvLedger) filterYetToCommitBlocks(blocksPvtData map[uint64][]*ledger.TxPvtData) error {
   321  	info, err := l.blockStore.GetBlockchainInfo()
   322  	if err != nil {
   323  		return err
   324  	}
   325  	for blkNum := range blocksPvtData {
   326  		if blkNum > info.Height-1 {
   327  			logger.Infof("found pvtdata associated with yet to be committed block [%d]", blkNum)
   328  			delete(blocksPvtData, blkNum)
   329  		}
   330  	}
   331  	return nil
   332  }
   333  
   334  //recommitLostBlocks retrieves blocks in specified range and commit the write set to either
   335  //state DB or history DB or both
   336  func (l *kvLedger) recommitLostBlocks(firstBlockNum uint64, lastBlockNum uint64, recoverables ...recoverable) error {
   337  	logger.Infof("Recommitting lost blocks - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables)
   338  	var err error
   339  	var blockAndPvtdata *ledger.BlockAndPvtData
   340  	for blockNumber := firstBlockNum; blockNumber <= lastBlockNum; blockNumber++ {
   341  		if blockAndPvtdata, err = l.GetPvtDataAndBlockByNum(blockNumber, nil); err != nil {
   342  			return err
   343  		}
   344  		for _, r := range recoverables {
   345  			if err := r.CommitLostBlock(blockAndPvtdata); err != nil {
   346  				return err
   347  			}
   348  		}
   349  	}
   350  	logger.Infof("Recommitted lost blocks - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables)
   351  	return nil
   352  }
   353  
   354  // GetTransactionByID retrieves a transaction by id
   355  func (l *kvLedger) GetTransactionByID(txID string) (*peer.ProcessedTransaction, error) {
   356  	l.blockAPIsRWLock.RLock()
   357  	defer l.blockAPIsRWLock.RUnlock()
   358  	tranEnv, err := l.blockStore.RetrieveTxByID(txID)
   359  	if err != nil {
   360  		return nil, err
   361  	}
   362  	txVResult, err := l.blockStore.RetrieveTxValidationCodeByTxID(txID)
   363  	if err != nil {
   364  		return nil, err
   365  	}
   366  	processedTran := &peer.ProcessedTransaction{TransactionEnvelope: tranEnv, ValidationCode: int32(txVResult)}
   367  	return processedTran, nil
   368  }
   369  
   370  // GetBlockchainInfo returns basic info about blockchain
   371  func (l *kvLedger) GetBlockchainInfo() (*common.BlockchainInfo, error) {
   372  	l.blockAPIsRWLock.RLock()
   373  	defer l.blockAPIsRWLock.RUnlock()
   374  	bcInfo, err := l.blockStore.GetBlockchainInfo()
   375  	return bcInfo, err
   376  }
   377  
   378  // GetBlockByNumber returns block at a given height
   379  // blockNumber of  math.MaxUint64 will return last block
   380  func (l *kvLedger) GetBlockByNumber(blockNumber uint64) (*common.Block, error) {
   381  	l.blockAPIsRWLock.RLock()
   382  	defer l.blockAPIsRWLock.RUnlock()
   383  	block, err := l.blockStore.RetrieveBlockByNumber(blockNumber)
   384  	return block, err
   385  }
   386  
   387  // GetBlocksIterator returns an iterator that starts from `startBlockNumber`(inclusive).
   388  // The iterator is a blocking iterator i.e., it blocks till the next block gets available in the ledger
   389  // ResultsIterator contains type BlockHolder
   390  func (l *kvLedger) GetBlocksIterator(startBlockNumber uint64) (commonledger.ResultsIterator, error) {
   391  	blkItr, err := l.blockStore.RetrieveBlocks(startBlockNumber)
   392  	if err != nil {
   393  		return nil, err
   394  	}
   395  	return &blocksItr{l.blockAPIsRWLock, blkItr}, nil
   396  }
   397  
   398  // GetBlockByHash returns a block given it's hash
   399  func (l *kvLedger) GetBlockByHash(blockHash []byte) (*common.Block, error) {
   400  	block, err := l.blockStore.RetrieveBlockByHash(blockHash)
   401  	l.blockAPIsRWLock.RLock()
   402  	l.blockAPIsRWLock.RUnlock()
   403  	return block, err
   404  }
   405  
   406  // GetBlockByTxID returns a block which contains a transaction
   407  func (l *kvLedger) GetBlockByTxID(txID string) (*common.Block, error) {
   408  	l.blockAPIsRWLock.RLock()
   409  	defer l.blockAPIsRWLock.RUnlock()
   410  	block, err := l.blockStore.RetrieveBlockByTxID(txID)
   411  	return block, err
   412  }
   413  
   414  func (l *kvLedger) GetTxValidationCodeByTxID(txID string) (peer.TxValidationCode, error) {
   415  	l.blockAPIsRWLock.RLock()
   416  	defer l.blockAPIsRWLock.RUnlock()
   417  	txValidationCode, err := l.blockStore.RetrieveTxValidationCodeByTxID(txID)
   418  	return txValidationCode, err
   419  }
   420  
   421  // NewTxSimulator returns new `ledger.TxSimulator`
   422  func (l *kvLedger) NewTxSimulator(txid string) (ledger.TxSimulator, error) {
   423  	return l.txmgr.NewTxSimulator(txid)
   424  }
   425  
   426  // NewQueryExecutor gives handle to a query executor.
   427  // A client can obtain more than one 'QueryExecutor's for parallel execution.
   428  // Any synchronization should be performed at the implementation level if required
   429  func (l *kvLedger) NewQueryExecutor() (ledger.QueryExecutor, error) {
   430  	return l.txmgr.NewQueryExecutor(util.GenerateUUID())
   431  }
   432  
   433  // NewHistoryQueryExecutor gives handle to a history query executor.
   434  // A client can obtain more than one 'HistoryQueryExecutor's for parallel execution.
   435  // Any synchronization should be performed at the implementation level if required
   436  // Pass the ledger blockstore so that historical values can be looked up from the chain
   437  func (l *kvLedger) NewHistoryQueryExecutor() (ledger.HistoryQueryExecutor, error) {
   438  	if l.historyDB != nil {
   439  		return l.historyDB.NewQueryExecutor(l.blockStore)
   440  	}
   441  	return nil, nil
   442  }
   443  
   444  // CommitLegacy commits the block and the corresponding pvt data in an atomic operation
   445  func (l *kvLedger) CommitLegacy(pvtdataAndBlock *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   446  	var err error
   447  	block := pvtdataAndBlock.Block
   448  	blockNo := pvtdataAndBlock.Block.Header.Number
   449  
   450  	startBlockProcessing := time.Now()
   451  	if commitOpts.FetchPvtDataFromLedger {
   452  		// when we reach here, it means that the pvtdata store has the
   453  		// pvtdata associated with this block but the stateDB might not
   454  		// have it. During the commit of this block, no update would
   455  		// happen in the pvtdata store as it already has the required data.
   456  
   457  		// if there is any missing pvtData, reconciler will fetch them
   458  		// and update both the pvtdataStore and stateDB. Hence, we can
   459  		// fetch what is available in the pvtDataStore. If any or
   460  		// all of the pvtdata associated with the block got expired
   461  		// and no longer available in pvtdataStore, eventually these
   462  		// pvtdata would get expired in the stateDB as well (though it
   463  		// would miss the pvtData until then)
   464  		txPvtData, err := l.pvtdataStore.GetPvtDataByBlockNum(blockNo, nil)
   465  		if err != nil {
   466  			return err
   467  		}
   468  		pvtdataAndBlock.PvtData = convertTxPvtDataArrayToMap(txPvtData)
   469  	}
   470  
   471  	logger.Debugf("[%s] Validating state for block [%d]", l.ledgerID, blockNo)
   472  	txstatsInfo, updateBatchBytes, err := l.txmgr.ValidateAndPrepare(pvtdataAndBlock, true)
   473  	if err != nil {
   474  		return err
   475  	}
   476  	elapsedBlockProcessing := time.Since(startBlockProcessing)
   477  
   478  	startBlockstorageAndPvtdataCommit := time.Now()
   479  	logger.Debugf("[%s] Adding CommitHash to the block [%d]", l.ledgerID, blockNo)
   480  	// we need to ensure that only after a genesis block, commitHash is computed
   481  	// and added to the block. In other words, only after joining a new channel
   482  	// or peer reset, the commitHash would be added to the block
   483  	if block.Header.Number == 1 || l.commitHash != nil {
   484  		l.addBlockCommitHash(pvtdataAndBlock.Block, updateBatchBytes)
   485  	}
   486  
   487  	logger.Debugf("[%s] Committing pvtdata and block [%d] to storage", l.ledgerID, blockNo)
   488  	l.blockAPIsRWLock.Lock()
   489  	defer l.blockAPIsRWLock.Unlock()
   490  	if err = l.commitToPvtAndBlockStore(pvtdataAndBlock); err != nil {
   491  		return err
   492  	}
   493  	elapsedBlockstorageAndPvtdataCommit := time.Since(startBlockstorageAndPvtdataCommit)
   494  
   495  	startCommitState := time.Now()
   496  	logger.Debugf("[%s] Committing block [%d] transactions to state database", l.ledgerID, blockNo)
   497  	if err = l.txmgr.Commit(); err != nil {
   498  		panic(errors.WithMessage(err, "error during commit to txmgr"))
   499  	}
   500  	elapsedCommitState := time.Since(startCommitState)
   501  
   502  	// History database could be written in parallel with state and/or async as a future optimization,
   503  	// although it has not been a bottleneck...no need to clutter the log with elapsed duration.
   504  	if l.historyDB != nil {
   505  		logger.Debugf("[%s] Committing block [%d] transactions to history database", l.ledgerID, blockNo)
   506  		if err := l.historyDB.Commit(block); err != nil {
   507  			panic(errors.WithMessage(err, "Error during commit to history db"))
   508  		}
   509  	}
   510  
   511  	logger.Infof("[%s] Committed block [%d] with %d transaction(s) in %dms (state_validation=%dms block_and_pvtdata_commit=%dms state_commit=%dms)"+
   512  		" commitHash=[%x]",
   513  		l.ledgerID, block.Header.Number, len(block.Data.Data),
   514  		time.Since(startBlockProcessing)/time.Millisecond,
   515  		elapsedBlockProcessing/time.Millisecond,
   516  		elapsedBlockstorageAndPvtdataCommit/time.Millisecond,
   517  		elapsedCommitState/time.Millisecond,
   518  		l.commitHash,
   519  	)
   520  	l.updateBlockStats(
   521  		elapsedBlockProcessing,
   522  		elapsedBlockstorageAndPvtdataCommit,
   523  		elapsedCommitState,
   524  		txstatsInfo,
   525  	)
   526  	return nil
   527  }
   528  
   529  func (l *kvLedger) commitToPvtAndBlockStore(blockAndPvtdata *ledger.BlockAndPvtData) error {
   530  	pvtdataStoreHt, err := l.pvtdataStore.LastCommittedBlockHeight()
   531  	if err != nil {
   532  		return err
   533  	}
   534  	blockNum := blockAndPvtdata.Block.Header.Number
   535  
   536  	if !l.isPvtstoreAheadOfBlkstore.Load().(bool) {
   537  		logger.Debugf("Writing block [%d] to pvt data store", blockNum)
   538  		// If a state fork occurs during a regular block commit,
   539  		// we have a mechanism to drop all blocks followed by refetching of blocks
   540  		// and re-processing them. In the current way of doing this, we only drop
   541  		// the block files (and related artifacts) but we do not drop/overwrite the
   542  		// pvtdatastorage as it might leads to data loss.
   543  		// During block reprocessing, as there is a possibility of an invalid pvtdata
   544  		// transaction to become valid, we store the pvtdata of invalid transactions
   545  		// too in the pvtdataStore as we do for the publicdata in the case of blockStore.
   546  		// Hence, we pass all pvtData present in the block to the pvtdataStore committer.
   547  		pvtData, missingPvtData := constructPvtDataAndMissingData(blockAndPvtdata)
   548  		if err := l.pvtdataStore.Commit(blockNum, pvtData, missingPvtData); err != nil {
   549  			return err
   550  		}
   551  	} else {
   552  		logger.Debugf("Skipping writing pvtData to pvt block store as it ahead of the block store")
   553  	}
   554  
   555  	if err := l.blockStore.AddBlock(blockAndPvtdata.Block); err != nil {
   556  		return err
   557  	}
   558  
   559  	if pvtdataStoreHt == blockNum+1 {
   560  		// Only when the pvtdataStore was ahead of blockStore
   561  		// during the ledger initialization time, we reach here.
   562  		// The pvtdataStore would be ahead of blockstore when
   563  		// the peer restarts after a reset of rollback.
   564  		l.isPvtstoreAheadOfBlkstore.Store(false)
   565  	}
   566  
   567  	return nil
   568  }
   569  
   570  func convertTxPvtDataArrayToMap(txPvtData []*ledger.TxPvtData) ledger.TxPvtDataMap {
   571  	txPvtDataMap := make(ledger.TxPvtDataMap)
   572  	for _, pvtData := range txPvtData {
   573  		txPvtDataMap[pvtData.SeqInBlock] = pvtData
   574  	}
   575  	return txPvtDataMap
   576  }
   577  
   578  func (l *kvLedger) updateBlockStats(
   579  	blockProcessingTime time.Duration,
   580  	blockstorageAndPvtdataCommitTime time.Duration,
   581  	statedbCommitTime time.Duration,
   582  	txstatsInfo []*validation.TxStatInfo,
   583  ) {
   584  	l.stats.updateBlockProcessingTime(blockProcessingTime)
   585  	l.stats.updateBlockstorageAndPvtdataCommitTime(blockstorageAndPvtdataCommitTime)
   586  	l.stats.updateStatedbCommitTime(statedbCommitTime)
   587  	l.stats.updateTransactionsStats(txstatsInfo)
   588  }
   589  
   590  // GetMissingPvtDataInfoForMostRecentBlocks returns the missing private data information for the
   591  // most recent `maxBlock` blocks which miss at least a private data of a eligible collection.
   592  func (l *kvLedger) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.MissingPvtDataInfo, error) {
   593  	// the missing pvtData info in the pvtdataStore could belong to a block which is yet
   594  	// to be processed and committed to the blockStore and stateDB (such a scenario is possible
   595  	// after a peer rollback). In such cases, we cannot return missing pvtData info. Otherwise,
   596  	// we would end up in an inconsistent state database.
   597  	if l.isPvtstoreAheadOfBlkstore.Load().(bool) {
   598  		return nil, nil
   599  	}
   600  	// it is safe to not acquire a read lock on l.blockAPIsRWLock. Without a lock, the value of
   601  	// lastCommittedBlock can change due to a new block commit. As a result, we may not
   602  	// be able to fetch the missing data info of truly the most recent blocks. This
   603  	// decision was made to ensure that the regular block commit rate is not affected.
   604  	return l.pvtdataStore.GetMissingPvtDataInfoForMostRecentBlocks(maxBlock)
   605  }
   606  
   607  func (l *kvLedger) addBlockCommitHash(block *common.Block, updateBatchBytes []byte) {
   608  	var valueBytes []byte
   609  
   610  	txValidationCode := block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]
   611  	valueBytes = append(valueBytes, proto.EncodeVarint(uint64(len(txValidationCode)))...)
   612  	valueBytes = append(valueBytes, txValidationCode...)
   613  	valueBytes = append(valueBytes, updateBatchBytes...)
   614  	valueBytes = append(valueBytes, l.commitHash...)
   615  
   616  	l.commitHash = util.ComputeSHA256(valueBytes)
   617  	block.Metadata.Metadata[common.BlockMetadataIndex_COMMIT_HASH] = protoutil.MarshalOrPanic(&common.Metadata{Value: l.commitHash})
   618  }
   619  
   620  // GetPvtDataAndBlockByNum returns the block and the corresponding pvt data.
   621  // The pvt data is filtered by the list of 'collections' supplied
   622  func (l *kvLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   623  	l.blockAPIsRWLock.RLock()
   624  	defer l.blockAPIsRWLock.RUnlock()
   625  
   626  	var block *common.Block
   627  	var pvtdata []*ledger.TxPvtData
   628  	var err error
   629  
   630  	if block, err = l.blockStore.RetrieveBlockByNumber(blockNum); err != nil {
   631  		return nil, err
   632  	}
   633  
   634  	if pvtdata, err = l.pvtdataStore.GetPvtDataByBlockNum(blockNum, filter); err != nil {
   635  		return nil, err
   636  	}
   637  
   638  	return &ledger.BlockAndPvtData{Block: block, PvtData: constructPvtdataMap(pvtdata)}, nil
   639  }
   640  
   641  // GetPvtDataByNum returns only the pvt data  corresponding to the given block number
   642  // The pvt data is filtered by the list of 'collections' supplied
   643  func (l *kvLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   644  	l.blockAPIsRWLock.RLock()
   645  	defer l.blockAPIsRWLock.RUnlock()
   646  	var pvtdata []*ledger.TxPvtData
   647  	var err error
   648  	if pvtdata, err = l.pvtdataStore.GetPvtDataByBlockNum(blockNum, filter); err != nil {
   649  		return nil, err
   650  	}
   651  	return pvtdata, nil
   652  }
   653  
   654  // DoesPvtDataInfoExist returns true when
   655  // (1) the ledger has pvtdata associated with the given block number (or)
   656  // (2) a few or all pvtdata associated with the given block number is missing but the
   657  //     missing info is recorded in the ledger (or)
   658  // (3) the block is committed but it does not contain even a single
   659  //     transaction with pvtData.
   660  func (l *kvLedger) DoesPvtDataInfoExist(blockNum uint64) (bool, error) {
   661  	pvtStoreHt, err := l.pvtdataStore.LastCommittedBlockHeight()
   662  	if err != nil {
   663  		return false, err
   664  	}
   665  	return blockNum+1 <= pvtStoreHt, nil
   666  }
   667  
   668  func (l *kvLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   669  	return l.configHistoryRetriever, nil
   670  }
   671  
   672  func (l *kvLedger) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata, unreconciled ledger.MissingPvtDataInfo) ([]*ledger.PvtdataHashMismatch, error) {
   673  	logger.Debugf("[%s:] Comparing pvtData of [%d] old blocks against the hashes in transaction's rwset to find valid and invalid data",
   674  		l.ledgerID, len(reconciledPvtdata))
   675  
   676  	hashVerifiedPvtData, hashMismatches, err := constructValidAndInvalidPvtData(reconciledPvtdata, l.blockStore)
   677  	if err != nil {
   678  		return nil, err
   679  	}
   680  
   681  	err = l.applyValidTxPvtDataOfOldBlocks(hashVerifiedPvtData)
   682  	if err != nil {
   683  		return nil, err
   684  	}
   685  
   686  	logger.Debugf("[%s:] Committing pvtData of [%d] old blocks to the pvtdatastore", l.ledgerID, len(reconciledPvtdata))
   687  
   688  	err = l.pvtdataStore.CommitPvtDataOfOldBlocks(hashVerifiedPvtData, unreconciled)
   689  	if err != nil {
   690  		return nil, err
   691  	}
   692  
   693  	return hashMismatches, nil
   694  }
   695  
   696  func (l *kvLedger) applyValidTxPvtDataOfOldBlocks(hashVerifiedPvtData map[uint64][]*ledger.TxPvtData) error {
   697  	logger.Debugf("[%s:] Filtering pvtData of invalidation transactions", l.ledgerID)
   698  	committedPvtData, err := filterPvtDataOfInvalidTx(hashVerifiedPvtData, l.blockStore)
   699  	if err != nil {
   700  		return err
   701  	}
   702  
   703  	// Assume the peer fails after storing the pvtData of old block in the stateDB but before
   704  	// storing it in block store. When the peer starts again, the reconciler finds that the
   705  	// pvtData is missing in the ledger store and hence, it would fetch those data again. As
   706  	// a result, RemoveStaleAndCommitPvtDataOfOldBlocks gets already existing data. In this
   707  	// scenario, RemoveStaleAndCommitPvtDataOfOldBlocks just replaces the old entry as we
   708  	// always makes the comparison between hashed version and this pvtData. There is no
   709  	// problem in terms of data consistency. However, if the reconciler is disabled before
   710  	// the peer restart, then the pvtData in stateDB may not be in sync with the pvtData in
   711  	// ledger store till the reconciler is enabled.
   712  	logger.Debugf("[%s:] Committing pvtData of [%d] old blocks to the stateDB", l.ledgerID, len(hashVerifiedPvtData))
   713  	return l.txmgr.RemoveStaleAndCommitPvtDataOfOldBlocks(committedPvtData)
   714  }
   715  
   716  func (l *kvLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   717  	return l, nil
   718  }
   719  
   720  // Close closes `KVLedger`
   721  func (l *kvLedger) Close() {
   722  	l.blockStore.Shutdown()
   723  	l.txmgr.Shutdown()
   724  }
   725  
   726  type blocksItr struct {
   727  	blockAPIsRWLock *sync.RWMutex
   728  	blocksItr       commonledger.ResultsIterator
   729  }
   730  
   731  func (itr *blocksItr) Next() (commonledger.QueryResult, error) {
   732  	block, err := itr.blocksItr.Next()
   733  	if err != nil {
   734  		return nil, err
   735  	}
   736  	itr.blockAPIsRWLock.RLock()
   737  	itr.blockAPIsRWLock.RUnlock()
   738  	return block, nil
   739  }
   740  
   741  func (itr *blocksItr) Close() {
   742  	itr.blocksItr.Close()
   743  }
   744  
   745  type collectionInfoRetriever struct {
   746  	ledgerID     string
   747  	ledger       ledger.PeerLedger
   748  	infoProvider ledger.DeployedChaincodeInfoProvider
   749  }
   750  
   751  func (r *collectionInfoRetriever) CollectionInfo(chaincodeName, collectionName string) (*peer.StaticCollectionConfig, error) {
   752  	qe, err := r.ledger.NewQueryExecutor()
   753  	if err != nil {
   754  		return nil, err
   755  	}
   756  	defer qe.Done()
   757  	return r.infoProvider.CollectionInfo(r.ledgerID, chaincodeName, collectionName, qe)
   758  }
   759  
   760  type ccEventListenerAdaptor struct {
   761  	legacyEventListener cceventmgmt.ChaincodeLifecycleEventListener
   762  }
   763  
   764  func (a *ccEventListenerAdaptor) HandleChaincodeDeploy(chaincodeDefinition *ledger.ChaincodeDefinition, dbArtifactsTar []byte) error {
   765  	return a.legacyEventListener.HandleChaincodeDeploy(&cceventmgmt.ChaincodeDefinition{
   766  		Name:              chaincodeDefinition.Name,
   767  		Hash:              chaincodeDefinition.Hash,
   768  		Version:           chaincodeDefinition.Version,
   769  		CollectionConfigs: chaincodeDefinition.CollectionConfigs,
   770  	},
   771  		dbArtifactsTar,
   772  	)
   773  }
   774  
   775  func (a *ccEventListenerAdaptor) ChaincodeDeployDone(succeeded bool) {
   776  	a.legacyEventListener.ChaincodeDeployDone(succeeded)
   777  }
   778  
   779  func filterPvtDataOfInvalidTx(hashVerifiedPvtData map[uint64][]*ledger.TxPvtData, blockStore *blkstorage.BlockStore) (map[uint64][]*ledger.TxPvtData, error) {
   780  	committedPvtData := make(map[uint64][]*ledger.TxPvtData)
   781  	for blkNum, txsPvtData := range hashVerifiedPvtData {
   782  
   783  		// TODO: Instead of retrieving the whole block, we need to retrieve only
   784  		// the TxValidationFlags from the block metadata. For that, we would need
   785  		// to add a new index for the block metadata - FAB-15808
   786  		block, err := blockStore.RetrieveBlockByNumber(blkNum)
   787  		if err != nil {
   788  			return nil, err
   789  		}
   790  		blockValidationFlags := txflags.ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
   791  
   792  		var blksPvtData []*ledger.TxPvtData
   793  		for _, pvtData := range txsPvtData {
   794  			if blockValidationFlags.IsValid(int(pvtData.SeqInBlock)) {
   795  				blksPvtData = append(blksPvtData, pvtData)
   796  			}
   797  		}
   798  		committedPvtData[blkNum] = blksPvtData
   799  	}
   800  	return committedPvtData, nil
   801  }
   802  
   803  func constructPvtdataMap(pvtdata []*ledger.TxPvtData) ledger.TxPvtDataMap {
   804  	if pvtdata == nil {
   805  		return nil
   806  	}
   807  	m := make(map[uint64]*ledger.TxPvtData)
   808  	for _, pvtdatum := range pvtdata {
   809  		m[pvtdatum.SeqInBlock] = pvtdatum
   810  	}
   811  	return m
   812  }
   813  
   814  func constructPvtDataAndMissingData(blockAndPvtData *ledger.BlockAndPvtData) ([]*ledger.TxPvtData,
   815  	ledger.TxMissingPvtDataMap) {
   816  
   817  	var pvtData []*ledger.TxPvtData
   818  	missingPvtData := make(ledger.TxMissingPvtDataMap)
   819  
   820  	numTxs := uint64(len(blockAndPvtData.Block.Data.Data))
   821  
   822  	for txNum := uint64(0); txNum < numTxs; txNum++ {
   823  		if pvtdata, ok := blockAndPvtData.PvtData[txNum]; ok {
   824  			pvtData = append(pvtData, pvtdata)
   825  		}
   826  
   827  		if missingData, ok := blockAndPvtData.MissingPvtData[txNum]; ok {
   828  			for _, missing := range missingData {
   829  				missingPvtData.Add(txNum, missing.Namespace,
   830  					missing.Collection, missing.IsEligible)
   831  			}
   832  		}
   833  	}
   834  	return pvtData, missingPvtData
   835  }