github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/core/ledger/kvledger/kv_ledger.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package kvledger
     8  
     9  import (
    10  	"encoding/hex"
    11  	"fmt"
    12  	"sync"
    13  	"sync/atomic"
    14  	"time"
    15  
    16  	"github.com/golang/protobuf/proto"
    17  	"github.com/hechain20/hechain/bccsp"
    18  	"github.com/hechain20/hechain/common/flogging"
    19  	commonledger "github.com/hechain20/hechain/common/ledger"
    20  	"github.com/hechain20/hechain/common/ledger/blkstorage"
    21  	"github.com/hechain20/hechain/common/util"
    22  	"github.com/hechain20/hechain/core/ledger"
    23  	"github.com/hechain20/hechain/core/ledger/cceventmgmt"
    24  	"github.com/hechain20/hechain/core/ledger/confighistory"
    25  	"github.com/hechain20/hechain/core/ledger/kvledger/bookkeeping"
    26  	"github.com/hechain20/hechain/core/ledger/kvledger/history"
    27  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/privacyenabledstate"
    28  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/txmgr"
    29  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/validation"
    30  	"github.com/hechain20/hechain/core/ledger/pvtdatapolicy"
    31  	"github.com/hechain20/hechain/core/ledger/pvtdatastorage"
    32  	"github.com/hechain20/hechain/internal/pkg/txflags"
    33  	"github.com/hechain20/hechain/protoutil"
    34  	"github.com/hyperledger/fabric-protos-go/common"
    35  	"github.com/hyperledger/fabric-protos-go/peer"
    36  	"github.com/pkg/errors"
    37  )
    38  
    39  var logger = flogging.MustGetLogger("kvledger")
    40  
    41  var (
    42  	rwsetHashOpts    = &bccsp.SHA256Opts{}
    43  	snapshotHashOpts = &bccsp.SHA256Opts{}
    44  )
    45  
    46  // kvLedger provides an implementation of `ledger.PeerLedger`.
    47  // This implementation provides a key-value based data model
    48  type kvLedger struct {
    49  	ledgerID               string
    50  	bootSnapshotMetadata   *SnapshotMetadata
    51  	blockStore             *blkstorage.BlockStore
    52  	pvtdataStore           *pvtdatastorage.Store
    53  	txmgr                  *txmgr.LockBasedTxMgr
    54  	historyDB              *history.DB
    55  	configHistoryRetriever *collectionConfigHistoryRetriever
    56  	snapshotMgr            *snapshotMgr
    57  	blockAPIsRWLock        *sync.RWMutex
    58  	stats                  *ledgerStats
    59  	commitHash             []byte
    60  	hashProvider           ledger.HashProvider
    61  	config                 *ledger.Config
    62  
    63  	// isPvtDataStoreAheadOfBlockStore is read during missing pvtData
    64  	// reconciliation and may be updated during a regular block commit.
    65  	// Hence, we use atomic value to ensure consistent read.
    66  	isPvtstoreAheadOfBlkstore atomic.Value
    67  
    68  	commitNotifierLock sync.Mutex
    69  	commitNotifier     *commitNotifier
    70  }
    71  
    72  type lgrInitializer struct {
    73  	ledgerID                 string
    74  	initializingFromSnapshot bool
    75  	bootSnapshotMetadata     *SnapshotMetadata
    76  	blockStore               *blkstorage.BlockStore
    77  	pvtdataStore             *pvtdatastorage.Store
    78  	stateDB                  *privacyenabledstate.DB
    79  	historyDB                *history.DB
    80  	configHistoryMgr         *confighistory.Mgr
    81  	stateListeners           []ledger.StateListener
    82  	bookkeeperProvider       *bookkeeping.Provider
    83  	ccInfoProvider           ledger.DeployedChaincodeInfoProvider
    84  	ccLifecycleEventProvider ledger.ChaincodeLifecycleEventProvider
    85  	stats                    *ledgerStats
    86  	customTxProcessors       map[common.HeaderType]ledger.CustomTxProcessor
    87  	hashProvider             ledger.HashProvider
    88  	config                   *ledger.Config
    89  }
    90  
    91  func newKVLedger(initializer *lgrInitializer) (*kvLedger, error) {
    92  	ledgerID := initializer.ledgerID
    93  	logger.Debugf("Creating KVLedger ledgerID=%s: ", ledgerID)
    94  	l := &kvLedger{
    95  		ledgerID:             ledgerID,
    96  		bootSnapshotMetadata: initializer.bootSnapshotMetadata,
    97  		blockStore:           initializer.blockStore,
    98  		pvtdataStore:         initializer.pvtdataStore,
    99  		historyDB:            initializer.historyDB,
   100  		hashProvider:         initializer.hashProvider,
   101  		config:               initializer.config,
   102  		blockAPIsRWLock:      &sync.RWMutex{},
   103  	}
   104  
   105  	btlPolicy := pvtdatapolicy.ConstructBTLPolicy(&collectionInfoRetriever{ledgerID, l, initializer.ccInfoProvider})
   106  
   107  	rwsetHashFunc := func(data []byte) ([]byte, error) {
   108  		hash, err := initializer.hashProvider.GetHash(rwsetHashOpts)
   109  		if err != nil {
   110  			return nil, err
   111  		}
   112  		if _, err = hash.Write(data); err != nil {
   113  			return nil, err
   114  		}
   115  		return hash.Sum(nil), nil
   116  	}
   117  
   118  	txmgrInitializer := &txmgr.Initializer{
   119  		LedgerID:            ledgerID,
   120  		DB:                  initializer.stateDB,
   121  		StateListeners:      initializer.stateListeners,
   122  		BtlPolicy:           btlPolicy,
   123  		BookkeepingProvider: initializer.bookkeeperProvider,
   124  		CCInfoProvider:      initializer.ccInfoProvider,
   125  		CustomTxProcessors:  initializer.customTxProcessors,
   126  		HashFunc:            rwsetHashFunc,
   127  	}
   128  	if err := l.initTxMgr(txmgrInitializer); err != nil {
   129  		return nil, err
   130  	}
   131  
   132  	// btlPolicy internally uses queryexecuter and indirectly ends up using txmgr.
   133  	// Hence, we need to init the pvtdataStore once the txmgr is initiated.
   134  	l.pvtdataStore.Init(btlPolicy)
   135  
   136  	var err error
   137  	l.commitHash, err = l.lastPersistedCommitHash()
   138  	if err != nil {
   139  		return nil, err
   140  	}
   141  
   142  	isAhead, err := l.isPvtDataStoreAheadOfBlockStore()
   143  	if err != nil {
   144  		return nil, err
   145  	}
   146  	l.isPvtstoreAheadOfBlkstore.Store(isAhead)
   147  
   148  	statedbIndexCreator := initializer.stateDB.GetChaincodeEventListener()
   149  	if statedbIndexCreator != nil {
   150  		logger.Debugf("Register state db for chaincode lifecycle events")
   151  		err := l.registerStateDBIndexCreatorForChaincodeLifecycleEvents(
   152  			statedbIndexCreator,
   153  			initializer.ccInfoProvider,
   154  			initializer.ccLifecycleEventProvider,
   155  			cceventmgmt.GetMgr(),
   156  			initializer.initializingFromSnapshot,
   157  		)
   158  		if err != nil {
   159  			return nil, err
   160  		}
   161  	}
   162  
   163  	// Recover both state DB and history DB if they are out of sync with block storage
   164  	if err := l.recoverDBs(); err != nil {
   165  		return nil, err
   166  	}
   167  	l.configHistoryRetriever = &collectionConfigHistoryRetriever{
   168  		Retriever:                     initializer.configHistoryMgr.GetRetriever(ledgerID),
   169  		DeployedChaincodeInfoProvider: txmgrInitializer.CCInfoProvider,
   170  		ledger:                        l,
   171  	}
   172  
   173  	if err := l.initSnapshotMgr(initializer); err != nil {
   174  		return nil, err
   175  	}
   176  
   177  	l.stats = initializer.stats
   178  	return l, nil
   179  }
   180  
   181  func (l *kvLedger) registerStateDBIndexCreatorForChaincodeLifecycleEvents(
   182  	stateDBIndexCreator cceventmgmt.ChaincodeLifecycleEventListener,
   183  	deployedChaincodesInfoExtractor ledger.DeployedChaincodeInfoProvider,
   184  	chaincodesLifecycleEventsProvider ledger.ChaincodeLifecycleEventProvider,
   185  	legacyChaincodesLifecycleEventsProvider *cceventmgmt.Mgr,
   186  	bootstrappingFromSnapshot bool,
   187  ) error {
   188  	if !bootstrappingFromSnapshot {
   189  		// regular opening of ledger
   190  		if err := chaincodesLifecycleEventsProvider.RegisterListener(
   191  			l.ledgerID, &ccEventListenerAdaptor{stateDBIndexCreator}, false); err != nil {
   192  			return err
   193  		}
   194  		legacyChaincodesLifecycleEventsProvider.Register(l.ledgerID, stateDBIndexCreator)
   195  		return nil
   196  	}
   197  
   198  	// opening of ledger after creating from a snapshot -
   199  	// it would have been better if we could explicitly retrieve the list of invocable chaincodes instead of
   200  	// passing the flag initializer.initializingFromSnapshot to the ccLifecycleEventProvider (which is essentially
   201  	// the _lifecycle cache) for directing ccLifecycleEventProvider to call us back. However, the lock that ensures
   202  	// the synchronization with the chaincode installer is maintained in the lifecycle cache and by design the lifecycle
   203  	// cache takes the responsibility of calling any listener under the lock
   204  	if err := chaincodesLifecycleEventsProvider.RegisterListener(
   205  		l.ledgerID, &ccEventListenerAdaptor{stateDBIndexCreator}, true); err != nil {
   206  		return errors.WithMessage(err, "error while creating statdb indexes after bootstrapping from snapshot")
   207  	}
   208  
   209  	legacyChaincodes, err := l.listLegacyChaincodesDefined(deployedChaincodesInfoExtractor)
   210  	if err != nil {
   211  		return errors.WithMessage(err, "error while creating statdb indexes after bootstrapping from snapshot")
   212  	}
   213  
   214  	if err := legacyChaincodesLifecycleEventsProvider.RegisterAndInvokeFor(
   215  		legacyChaincodes, l.ledgerID, stateDBIndexCreator); err != nil {
   216  		return errors.WithMessage(err, "error while creating statdb indexes after bootstrapping from snapshot")
   217  	}
   218  	return nil
   219  }
   220  
   221  func (l *kvLedger) listLegacyChaincodesDefined(
   222  	deployedChaincodesInfoExtractor ledger.DeployedChaincodeInfoProvider) (
   223  	[]*cceventmgmt.ChaincodeDefinition, error) {
   224  	qe, err := l.txmgr.NewQueryExecutor("")
   225  	if err != nil {
   226  		return nil, err
   227  	}
   228  	defer qe.Done()
   229  
   230  	definedChaincodes, err := deployedChaincodesInfoExtractor.AllChaincodesInfo(l.ledgerID, qe)
   231  	if err != nil {
   232  		return nil, err
   233  	}
   234  
   235  	legacyChaincodes := []*cceventmgmt.ChaincodeDefinition{}
   236  	for _, chaincodeInfo := range definedChaincodes {
   237  		if !chaincodeInfo.IsLegacy {
   238  			continue
   239  		}
   240  		legacyChaincodes = append(legacyChaincodes,
   241  			&cceventmgmt.ChaincodeDefinition{
   242  				Name:              chaincodeInfo.Name,
   243  				Version:           chaincodeInfo.Version,
   244  				Hash:              chaincodeInfo.Hash,
   245  				CollectionConfigs: chaincodeInfo.ExplicitCollectionConfigPkg,
   246  			},
   247  		)
   248  	}
   249  	return legacyChaincodes, nil
   250  }
   251  
   252  func (l *kvLedger) initTxMgr(initializer *txmgr.Initializer) error {
   253  	var err error
   254  	txmgr, err := txmgr.NewLockBasedTxMgr(initializer)
   255  	if err != nil {
   256  		return err
   257  	}
   258  	l.txmgr = txmgr
   259  	// This is a workaround for populating lifecycle cache.
   260  	// See comments on this function for details
   261  	qe, err := txmgr.NewQueryExecutorNoCollChecks()
   262  	if err != nil {
   263  		return err
   264  	}
   265  	defer qe.Done()
   266  	for _, sl := range initializer.StateListeners {
   267  		if err := sl.Initialize(l.ledgerID, qe); err != nil {
   268  			return err
   269  		}
   270  	}
   271  	return err
   272  }
   273  
   274  func (l *kvLedger) initSnapshotMgr(initializer *lgrInitializer) error {
   275  	dbHandle := initializer.bookkeeperProvider.GetDBHandle(l.ledgerID, bookkeeping.SnapshotRequest)
   276  	bookkeeper, err := newSnapshotRequestBookkeeper(l.ledgerID, dbHandle)
   277  	if err != nil {
   278  		return err
   279  	}
   280  
   281  	l.snapshotMgr = &snapshotMgr{
   282  		snapshotRequestBookkeeper: bookkeeper,
   283  		events:                    make(chan *event),
   284  		commitProceed:             make(chan struct{}),
   285  		requestResponses:          make(chan *requestResponse),
   286  	}
   287  
   288  	bcInfo, err := l.blockStore.GetBlockchainInfo()
   289  	if err != nil {
   290  		return err
   291  	}
   292  	lastCommittedBlock := bcInfo.Height - 1
   293  
   294  	// start a goroutine to synchronize commit, snapshot generation, and snapshot submission/cancellation,
   295  	go l.processSnapshotMgmtEvents(lastCommittedBlock)
   296  
   297  	if bcInfo.Height != 0 {
   298  		return l.regenrateMissedSnapshot(lastCommittedBlock)
   299  	}
   300  	return nil
   301  }
   302  
   303  func (l *kvLedger) lastPersistedCommitHash() ([]byte, error) {
   304  	bcInfo, err := l.GetBlockchainInfo()
   305  	if err != nil {
   306  		return nil, err
   307  	}
   308  	if bcInfo.Height == 0 {
   309  		logger.Debugf("Chain is empty")
   310  		return nil, nil
   311  	}
   312  
   313  	if l.bootSnapshotMetadata != nil && l.bootSnapshotMetadata.LastBlockNumber == bcInfo.Height-1 {
   314  		logger.Debugw(
   315  			"Ledger is starting first time after creation from a snapshot. Retrieveing last commit hash from boot snapshot metadata",
   316  			"ledger", l.ledgerID,
   317  		)
   318  		return hex.DecodeString(l.bootSnapshotMetadata.LastBlockCommitHashInHex)
   319  	}
   320  
   321  	logger.Debugf("Fetching block [%d] to retrieve the currentCommitHash", bcInfo.Height-1)
   322  	block, err := l.GetBlockByNumber(bcInfo.Height - 1)
   323  	if err != nil {
   324  		return nil, err
   325  	}
   326  
   327  	if len(block.Metadata.Metadata) < int(common.BlockMetadataIndex_COMMIT_HASH+1) {
   328  		logger.Debugf("Last block metadata does not contain commit hash")
   329  		return nil, nil
   330  	}
   331  
   332  	commitHash := &common.Metadata{}
   333  	err = proto.Unmarshal(block.Metadata.Metadata[common.BlockMetadataIndex_COMMIT_HASH], commitHash)
   334  	if err != nil {
   335  		return nil, errors.Wrap(err, "error unmarshalling last persisted commit hash")
   336  	}
   337  	return commitHash.Value, nil
   338  }
   339  
   340  func (l *kvLedger) isPvtDataStoreAheadOfBlockStore() (bool, error) {
   341  	blockStoreInfo, err := l.blockStore.GetBlockchainInfo()
   342  	if err != nil {
   343  		return false, err
   344  	}
   345  	pvtstoreHeight, err := l.pvtdataStore.LastCommittedBlockHeight()
   346  	if err != nil {
   347  		return false, err
   348  	}
   349  	return pvtstoreHeight > blockStoreInfo.Height, nil
   350  }
   351  
   352  func (l *kvLedger) recoverDBs() error {
   353  	logger.Debugf("Entering recoverDB()")
   354  	if err := l.syncStateAndHistoryDBWithBlockstore(); err != nil {
   355  		return err
   356  	}
   357  	return l.syncStateDBWithOldBlkPvtdata()
   358  }
   359  
   360  func (l *kvLedger) syncStateAndHistoryDBWithBlockstore() error {
   361  	// If there is no block in blockstorage, nothing to recover.
   362  	info, _ := l.blockStore.GetBlockchainInfo()
   363  	if info.Height == 0 {
   364  		logger.Debug("Block storage is empty.")
   365  		return nil
   366  	}
   367  	lastBlockInBlockStore := info.Height - 1
   368  	recoverables := []recoverable{l.txmgr}
   369  	if l.historyDB != nil {
   370  		recoverables = append(recoverables, l.historyDB)
   371  	}
   372  	recoverers := []*recoverer{}
   373  	for _, recoverable := range recoverables {
   374  		// nextRequiredBlock is nothing but the nextBlockNum expected by the state DB.
   375  		// In other words, the nextRequiredBlock is nothing but the height of stateDB.
   376  		recoverFlag, nextRequiredBlock, err := recoverable.ShouldRecover(lastBlockInBlockStore)
   377  		if err != nil {
   378  			return err
   379  		}
   380  
   381  		if l.bootSnapshotMetadata != nil {
   382  			lastBlockInSnapshot := l.bootSnapshotMetadata.LastBlockNumber
   383  			if nextRequiredBlock <= lastBlockInSnapshot {
   384  				return errors.Errorf(
   385  					"recovery for DB [%s] not possible. Ledger [%s] is created from a snapshot. Last block in snapshot = [%d], DB needs block [%d] onward",
   386  					recoverable.Name(),
   387  					l.ledgerID,
   388  					lastBlockInSnapshot,
   389  					nextRequiredBlock,
   390  				)
   391  			}
   392  		}
   393  
   394  		if nextRequiredBlock > lastBlockInBlockStore+1 {
   395  			dbName := recoverable.Name()
   396  			return fmt.Errorf("the %s database [height=%d] is ahead of the block store [height=%d]. "+
   397  				"This is possible when the %s database is not dropped after a ledger reset/rollback. "+
   398  				"The %s database can safely be dropped and will be rebuilt up to block store height upon the next peer start",
   399  				dbName, nextRequiredBlock, lastBlockInBlockStore+1, dbName, dbName)
   400  		}
   401  		if recoverFlag {
   402  			recoverers = append(recoverers, &recoverer{nextRequiredBlock, recoverable})
   403  		}
   404  	}
   405  	if len(recoverers) == 0 {
   406  		return nil
   407  	}
   408  	if len(recoverers) == 1 {
   409  		return l.recommitLostBlocks(recoverers[0].nextRequiredBlock, lastBlockInBlockStore, recoverers[0].recoverable)
   410  	}
   411  
   412  	// both dbs need to be recovered
   413  	if recoverers[0].nextRequiredBlock > recoverers[1].nextRequiredBlock {
   414  		// swap (put the lagger db at 0 index)
   415  		recoverers[0], recoverers[1] = recoverers[1], recoverers[0]
   416  	}
   417  	if recoverers[0].nextRequiredBlock != recoverers[1].nextRequiredBlock {
   418  		// bring the lagger db equal to the other db
   419  		if err := l.recommitLostBlocks(recoverers[0].nextRequiredBlock, recoverers[1].nextRequiredBlock-1,
   420  			recoverers[0].recoverable); err != nil {
   421  			return err
   422  		}
   423  	}
   424  	// get both the db upto block storage
   425  	return l.recommitLostBlocks(recoverers[1].nextRequiredBlock, lastBlockInBlockStore,
   426  		recoverers[0].recoverable, recoverers[1].recoverable)
   427  }
   428  
   429  func (l *kvLedger) syncStateDBWithOldBlkPvtdata() error {
   430  	// TODO: syncStateDBWithOldBlkPvtdata, GetLastUpdatedOldBlocksPvtData(),
   431  	// and ResetLastUpdatedOldBlocksList() can be removed in > v2 LTS.
   432  	// From v2.0 onwards, we do not store the last updatedBlksList.
   433  	// Only to support the rolling upgrade from v14 LTS to v2 LTS, we
   434  	// retain these three functions in v2.0 - FAB-16294.
   435  
   436  	blocksPvtData, err := l.pvtdataStore.GetLastUpdatedOldBlocksPvtData()
   437  	if err != nil {
   438  		return err
   439  	}
   440  
   441  	// Assume that the peer has restarted after a rollback or a reset.
   442  	// As the pvtdataStore can contain pvtData of yet to be committed blocks,
   443  	// we need to filter them before passing it to the transaction manager
   444  	// for stateDB updates.
   445  	if err := l.filterYetToCommitBlocks(blocksPvtData); err != nil {
   446  		return err
   447  	}
   448  
   449  	if err = l.applyValidTxPvtDataOfOldBlocks(blocksPvtData); err != nil {
   450  		return err
   451  	}
   452  
   453  	return l.pvtdataStore.ResetLastUpdatedOldBlocksList()
   454  }
   455  
   456  func (l *kvLedger) filterYetToCommitBlocks(blocksPvtData map[uint64][]*ledger.TxPvtData) error {
   457  	info, err := l.blockStore.GetBlockchainInfo()
   458  	if err != nil {
   459  		return err
   460  	}
   461  	for blkNum := range blocksPvtData {
   462  		if blkNum > info.Height-1 {
   463  			logger.Infof("found pvtdata associated with yet to be committed block [%d]", blkNum)
   464  			delete(blocksPvtData, blkNum)
   465  		}
   466  	}
   467  	return nil
   468  }
   469  
   470  // recommitLostBlocks retrieves blocks in specified range and commit the write set to either
   471  // state DB or history DB or both
   472  func (l *kvLedger) recommitLostBlocks(firstBlockNum uint64, lastBlockNum uint64, recoverables ...recoverable) error {
   473  	logger.Infof("Recommitting lost blocks - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables)
   474  	var err error
   475  	var blockAndPvtdata *ledger.BlockAndPvtData
   476  	for blockNumber := firstBlockNum; blockNumber <= lastBlockNum; blockNumber++ {
   477  		if blockAndPvtdata, err = l.GetPvtDataAndBlockByNum(blockNumber, nil); err != nil {
   478  			return err
   479  		}
   480  		for _, r := range recoverables {
   481  			if err := r.CommitLostBlock(blockAndPvtdata); err != nil {
   482  				return err
   483  			}
   484  		}
   485  	}
   486  	logger.Infof("Recommitted lost blocks - firstBlockNum=%d, lastBlockNum=%d, recoverables=%#v", firstBlockNum, lastBlockNum, recoverables)
   487  	return nil
   488  }
   489  
   490  // TxIDExists returns true if the specified txID is already present in one of the already committed blocks
   491  func (l *kvLedger) TxIDExists(txID string) (bool, error) {
   492  	l.blockAPIsRWLock.RLock()
   493  	defer l.blockAPIsRWLock.RUnlock()
   494  	return l.blockStore.TxIDExists(txID)
   495  }
   496  
   497  // GetTransactionByID retrieves a transaction by id
   498  func (l *kvLedger) GetTransactionByID(txID string) (*peer.ProcessedTransaction, error) {
   499  	l.blockAPIsRWLock.RLock()
   500  	defer l.blockAPIsRWLock.RUnlock()
   501  	tranEnv, err := l.blockStore.RetrieveTxByID(txID)
   502  	if err != nil {
   503  		return nil, err
   504  	}
   505  	txVResult, _, err := l.blockStore.RetrieveTxValidationCodeByTxID(txID)
   506  	if err != nil {
   507  		return nil, err
   508  	}
   509  	processedTran := &peer.ProcessedTransaction{TransactionEnvelope: tranEnv, ValidationCode: int32(txVResult)}
   510  	return processedTran, nil
   511  }
   512  
   513  // GetBlockchainInfo returns basic info about blockchain
   514  func (l *kvLedger) GetBlockchainInfo() (*common.BlockchainInfo, error) {
   515  	l.blockAPIsRWLock.RLock()
   516  	defer l.blockAPIsRWLock.RUnlock()
   517  	bcInfo, err := l.blockStore.GetBlockchainInfo()
   518  	return bcInfo, err
   519  }
   520  
   521  // GetBlockByNumber returns block at a given height
   522  // blockNumber of  math.MaxUint64 will return last block
   523  func (l *kvLedger) GetBlockByNumber(blockNumber uint64) (*common.Block, error) {
   524  	l.blockAPIsRWLock.RLock()
   525  	defer l.blockAPIsRWLock.RUnlock()
   526  	block, err := l.blockStore.RetrieveBlockByNumber(blockNumber)
   527  	return block, err
   528  }
   529  
   530  // GetBlocksIterator returns an iterator that starts from `startBlockNumber`(inclusive).
   531  // The iterator is a blocking iterator i.e., it blocks till the next block gets available in the ledger
   532  // ResultsIterator contains type BlockHolder
   533  func (l *kvLedger) GetBlocksIterator(startBlockNumber uint64) (commonledger.ResultsIterator, error) {
   534  	blkItr, err := l.blockStore.RetrieveBlocks(startBlockNumber)
   535  	if err != nil {
   536  		return nil, err
   537  	}
   538  	return &blocksItr{l.blockAPIsRWLock, blkItr}, nil
   539  }
   540  
   541  // GetBlockByHash returns a block given it's hash
   542  func (l *kvLedger) GetBlockByHash(blockHash []byte) (*common.Block, error) {
   543  	block, err := l.blockStore.RetrieveBlockByHash(blockHash)
   544  	l.blockAPIsRWLock.RLock()
   545  	l.blockAPIsRWLock.RUnlock() //lint:ignore SA2001 syncpoint
   546  	return block, err
   547  }
   548  
   549  // GetBlockByTxID returns a block which contains a transaction
   550  func (l *kvLedger) GetBlockByTxID(txID string) (*common.Block, error) {
   551  	l.blockAPIsRWLock.RLock()
   552  	defer l.blockAPIsRWLock.RUnlock()
   553  	block, err := l.blockStore.RetrieveBlockByTxID(txID)
   554  	return block, err
   555  }
   556  
   557  // GetTxValidationCodeByTxID returns transaction validation code and block number in which the transaction was committed
   558  func (l *kvLedger) GetTxValidationCodeByTxID(txID string) (peer.TxValidationCode, uint64, error) {
   559  	l.blockAPIsRWLock.RLock()
   560  	defer l.blockAPIsRWLock.RUnlock()
   561  	txValidationCode, blkNum, err := l.blockStore.RetrieveTxValidationCodeByTxID(txID)
   562  	return txValidationCode, blkNum, err
   563  }
   564  
   565  // NewTxSimulator returns new `ledger.TxSimulator`
   566  func (l *kvLedger) NewTxSimulator(txid string) (ledger.TxSimulator, error) {
   567  	return l.txmgr.NewTxSimulator(txid)
   568  }
   569  
   570  // NewQueryExecutor gives handle to a query executor.
   571  // A client can obtain more than one 'QueryExecutor's for parallel execution.
   572  // Any synchronization should be performed at the implementation level if required
   573  func (l *kvLedger) NewQueryExecutor() (ledger.QueryExecutor, error) {
   574  	return l.txmgr.NewQueryExecutor(util.GenerateUUID())
   575  }
   576  
   577  // NewHistoryQueryExecutor gives handle to a history query executor.
   578  // A client can obtain more than one 'HistoryQueryExecutor's for parallel execution.
   579  // Any synchronization should be performed at the implementation level if required
   580  // Pass the ledger blockstore so that historical values can be looked up from the chain
   581  func (l *kvLedger) NewHistoryQueryExecutor() (ledger.HistoryQueryExecutor, error) {
   582  	if l.historyDB != nil {
   583  		return l.historyDB.NewQueryExecutor(l.blockStore)
   584  	}
   585  	return nil, nil
   586  }
   587  
   588  // CommitLegacy commits the block and the corresponding pvt data in an atomic operation.
   589  // It synchronizes commit, snapshot generation and snapshot requests via events and commitProceed channels.
   590  // Before committing a block, it sends a commitStart event and waits for a message from commitProceed.
   591  // After the block is committed, it sends a commitDone event.
   592  // Refer to processEvents function to understand how the channels and events work together to handle synchronization.
   593  func (l *kvLedger) CommitLegacy(pvtdataAndBlock *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   594  	blockNumber := pvtdataAndBlock.Block.Header.Number
   595  	l.snapshotMgr.events <- &event{commitStart, blockNumber}
   596  	<-l.snapshotMgr.commitProceed
   597  
   598  	if err := l.commit(pvtdataAndBlock, commitOpts); err != nil {
   599  		return err
   600  	}
   601  
   602  	l.snapshotMgr.events <- &event{commitDone, blockNumber}
   603  	return nil
   604  }
   605  
   606  // commit commits the block and the corresponding pvt data in an atomic operation.
   607  func (l *kvLedger) commit(pvtdataAndBlock *ledger.BlockAndPvtData, commitOpts *ledger.CommitOptions) error {
   608  	var err error
   609  	block := pvtdataAndBlock.Block
   610  	blockNo := pvtdataAndBlock.Block.Header.Number
   611  
   612  	startBlockProcessing := time.Now()
   613  	if commitOpts.FetchPvtDataFromLedger {
   614  		// when we reach here, it means that the pvtdata store has the
   615  		// pvtdata associated with this block but the stateDB might not
   616  		// have it. During the commit of this block, no update would
   617  		// happen in the pvtdata store as it already has the required data.
   618  
   619  		// if there is any missing pvtData, reconciler will fetch them
   620  		// and update both the pvtdataStore and stateDB. Hence, we can
   621  		// fetch what is available in the pvtDataStore. If any or
   622  		// all of the pvtdata associated with the block got expired
   623  		// and no longer available in pvtdataStore, eventually these
   624  		// pvtdata would get expired in the stateDB as well (though it
   625  		// would miss the pvtData until then)
   626  		txPvtData, err := l.pvtdataStore.GetPvtDataByBlockNum(blockNo, nil)
   627  		if err != nil {
   628  			return err
   629  		}
   630  		pvtdataAndBlock.PvtData = convertTxPvtDataArrayToMap(txPvtData)
   631  	}
   632  
   633  	logger.Debugf("[%s] Validating state for block [%d]", l.ledgerID, blockNo)
   634  	txstatsInfo, updateBatchBytes, err := l.txmgr.ValidateAndPrepare(pvtdataAndBlock, true)
   635  	if err != nil {
   636  		return err
   637  	}
   638  	elapsedBlockProcessing := time.Since(startBlockProcessing)
   639  
   640  	startBlockstorageAndPvtdataCommit := time.Now()
   641  	logger.Debugf("[%s] Adding CommitHash to the block [%d]", l.ledgerID, blockNo)
   642  	// we need to ensure that only after a genesis block, commitHash is computed
   643  	// and added to the block. In other words, only after joining a new channel
   644  	// or peer reset, the commitHash would be added to the block
   645  	if block.Header.Number == 1 || len(l.commitHash) != 0 {
   646  		l.addBlockCommitHash(pvtdataAndBlock.Block, updateBatchBytes)
   647  	}
   648  
   649  	logger.Debugf("[%s] Committing pvtdata and block [%d] to storage", l.ledgerID, blockNo)
   650  	l.blockAPIsRWLock.Lock()
   651  	defer l.blockAPIsRWLock.Unlock()
   652  	if err = l.commitToPvtAndBlockStore(pvtdataAndBlock); err != nil {
   653  		return err
   654  	}
   655  	elapsedBlockstorageAndPvtdataCommit := time.Since(startBlockstorageAndPvtdataCommit)
   656  
   657  	startCommitState := time.Now()
   658  	logger.Debugf("[%s] Committing block [%d] transactions to state database", l.ledgerID, blockNo)
   659  	if err = l.txmgr.Commit(); err != nil {
   660  		panic(errors.WithMessage(err, "error during commit to txmgr"))
   661  	}
   662  	elapsedCommitState := time.Since(startCommitState)
   663  
   664  	// History database could be written in parallel with state and/or async as a future optimization,
   665  	// although it has not been a bottleneck...no need to clutter the log with elapsed duration.
   666  	if l.historyDB != nil {
   667  		logger.Debugf("[%s] Committing block [%d] transactions to history database", l.ledgerID, blockNo)
   668  		if err := l.historyDB.Commit(block); err != nil {
   669  			panic(errors.WithMessage(err, "Error during commit to history db"))
   670  		}
   671  	}
   672  
   673  	logger.Infof("[%s] Committed block [%d] with %d transaction(s) in %dms (state_validation=%dms block_and_pvtdata_commit=%dms state_commit=%dms)"+
   674  		" commitHash=[%x]",
   675  		l.ledgerID, block.Header.Number, len(block.Data.Data),
   676  		time.Since(startBlockProcessing)/time.Millisecond,
   677  		elapsedBlockProcessing/time.Millisecond,
   678  		elapsedBlockstorageAndPvtdataCommit/time.Millisecond,
   679  		elapsedCommitState/time.Millisecond,
   680  		l.commitHash,
   681  	)
   682  
   683  	l.updateBlockStats(
   684  		elapsedBlockProcessing,
   685  		elapsedBlockstorageAndPvtdataCommit,
   686  		elapsedCommitState,
   687  		txstatsInfo,
   688  	)
   689  
   690  	l.sendCommitNotification(blockNo, txstatsInfo)
   691  	return nil
   692  }
   693  
   694  func (l *kvLedger) commitToPvtAndBlockStore(blockAndPvtdata *ledger.BlockAndPvtData) error {
   695  	pvtdataStoreHt, err := l.pvtdataStore.LastCommittedBlockHeight()
   696  	if err != nil {
   697  		return err
   698  	}
   699  	blockNum := blockAndPvtdata.Block.Header.Number
   700  
   701  	if !l.isPvtstoreAheadOfBlkstore.Load().(bool) {
   702  		logger.Debugf("Writing block [%d] to pvt data store", blockNum)
   703  		// If a state fork occurs during a regular block commit,
   704  		// we have a mechanism to drop all blocks followed by refetching of blocks
   705  		// and re-processing them. In the current way of doing this, we only drop
   706  		// the block files (and related artifacts) but we do not drop/overwrite the
   707  		// pvtdatastorage as it might leads to data loss.
   708  		// During block reprocessing, as there is a possibility of an invalid pvtdata
   709  		// transaction to become valid, we store the pvtdata of invalid transactions
   710  		// too in the pvtdataStore as we do for the publicdata in the case of blockStore.
   711  		// Hence, we pass all pvtData present in the block to the pvtdataStore committer.
   712  		pvtData, missingPvtData := constructPvtDataAndMissingData(blockAndPvtdata)
   713  		if err := l.pvtdataStore.Commit(blockNum, pvtData, missingPvtData); err != nil {
   714  			return err
   715  		}
   716  	} else {
   717  		logger.Debugf("Skipping writing pvtData to pvt block store as it ahead of the block store")
   718  	}
   719  
   720  	if err := l.blockStore.AddBlock(blockAndPvtdata.Block); err != nil {
   721  		return err
   722  	}
   723  
   724  	if pvtdataStoreHt == blockNum+1 {
   725  		// Only when the pvtdataStore was ahead of blockStore
   726  		// during the ledger initialization time, we reach here.
   727  		// The pvtdataStore would be ahead of blockstore when
   728  		// the peer restarts after a reset of rollback.
   729  		l.isPvtstoreAheadOfBlkstore.Store(false)
   730  	}
   731  
   732  	return nil
   733  }
   734  
   735  func convertTxPvtDataArrayToMap(txPvtData []*ledger.TxPvtData) ledger.TxPvtDataMap {
   736  	txPvtDataMap := make(ledger.TxPvtDataMap)
   737  	for _, pvtData := range txPvtData {
   738  		txPvtDataMap[pvtData.SeqInBlock] = pvtData
   739  	}
   740  	return txPvtDataMap
   741  }
   742  
   743  func (l *kvLedger) updateBlockStats(
   744  	blockProcessingTime time.Duration,
   745  	blockstorageAndPvtdataCommitTime time.Duration,
   746  	statedbCommitTime time.Duration,
   747  	txstatsInfo []*validation.TxStatInfo,
   748  ) {
   749  	l.stats.updateBlockProcessingTime(blockProcessingTime)
   750  	l.stats.updateBlockstorageAndPvtdataCommitTime(blockstorageAndPvtdataCommitTime)
   751  	l.stats.updateStatedbCommitTime(statedbCommitTime)
   752  	l.stats.updateTransactionsStats(txstatsInfo)
   753  }
   754  
   755  // GetMissingPvtDataInfoForMostRecentBlocks returns the missing private data information for the
   756  // most recent `maxBlock` blocks which miss at least a private data of a eligible collection.
   757  func (l *kvLedger) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.MissingPvtDataInfo, error) {
   758  	// the missing pvtData info in the pvtdataStore could belong to a block which is yet
   759  	// to be processed and committed to the blockStore and stateDB (such a scenario is possible
   760  	// after a peer rollback). In such cases, we cannot return missing pvtData info. Otherwise,
   761  	// we would end up in an inconsistent state database.
   762  	if l.isPvtstoreAheadOfBlkstore.Load().(bool) {
   763  		return nil, nil
   764  	}
   765  	// it is safe to not acquire a read lock on l.blockAPIsRWLock. Without a lock, the value of
   766  	// lastCommittedBlock can change due to a new block commit. As a result, we may not
   767  	// be able to fetch the missing data info of truly the most recent blocks. This
   768  	// decision was made to ensure that the regular block commit rate is not affected.
   769  	return l.pvtdataStore.GetMissingPvtDataInfoForMostRecentBlocks(maxBlock)
   770  }
   771  
   772  func (l *kvLedger) addBlockCommitHash(block *common.Block, updateBatchBytes []byte) {
   773  	var valueBytes []byte
   774  
   775  	txValidationCode := block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]
   776  	valueBytes = append(valueBytes, proto.EncodeVarint(uint64(len(txValidationCode)))...)
   777  	valueBytes = append(valueBytes, txValidationCode...)
   778  	valueBytes = append(valueBytes, updateBatchBytes...)
   779  	valueBytes = append(valueBytes, l.commitHash...)
   780  
   781  	l.commitHash = util.ComputeSHA256(valueBytes)
   782  	block.Metadata.Metadata[common.BlockMetadataIndex_COMMIT_HASH] = protoutil.MarshalOrPanic(&common.Metadata{Value: l.commitHash})
   783  }
   784  
   785  // GetPvtDataAndBlockByNum returns the block and the corresponding pvt data.
   786  // The pvt data is filtered by the list of 'collections' supplied
   787  func (l *kvLedger) GetPvtDataAndBlockByNum(blockNum uint64, filter ledger.PvtNsCollFilter) (*ledger.BlockAndPvtData, error) {
   788  	l.blockAPIsRWLock.RLock()
   789  	defer l.blockAPIsRWLock.RUnlock()
   790  
   791  	var block *common.Block
   792  	var pvtdata []*ledger.TxPvtData
   793  	var err error
   794  
   795  	if block, err = l.blockStore.RetrieveBlockByNumber(blockNum); err != nil {
   796  		return nil, err
   797  	}
   798  
   799  	if pvtdata, err = l.pvtdataStore.GetPvtDataByBlockNum(blockNum, filter); err != nil {
   800  		return nil, err
   801  	}
   802  
   803  	return &ledger.BlockAndPvtData{Block: block, PvtData: constructPvtdataMap(pvtdata)}, nil
   804  }
   805  
   806  // GetPvtDataByNum returns only the pvt data  corresponding to the given block number
   807  // The pvt data is filtered by the list of 'collections' supplied
   808  func (l *kvLedger) GetPvtDataByNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   809  	l.blockAPIsRWLock.RLock()
   810  	defer l.blockAPIsRWLock.RUnlock()
   811  	var pvtdata []*ledger.TxPvtData
   812  	var err error
   813  	if pvtdata, err = l.pvtdataStore.GetPvtDataByBlockNum(blockNum, filter); err != nil {
   814  		return nil, err
   815  	}
   816  	return pvtdata, nil
   817  }
   818  
   819  // DoesPvtDataInfoExist returns true when
   820  // (1) the ledger has pvtdata associated with the given block number (or)
   821  // (2) a few or all pvtdata associated with the given block number is missing but the
   822  //     missing info is recorded in the ledger (or)
   823  // (3) the block is committed but it does not contain even a single
   824  //     transaction with pvtData.
   825  func (l *kvLedger) DoesPvtDataInfoExist(blockNum uint64) (bool, error) {
   826  	pvtStoreHt, err := l.pvtdataStore.LastCommittedBlockHeight()
   827  	if err != nil {
   828  		return false, err
   829  	}
   830  	return blockNum+1 <= pvtStoreHt, nil
   831  }
   832  
   833  func (l *kvLedger) GetConfigHistoryRetriever() (ledger.ConfigHistoryRetriever, error) {
   834  	return l.configHistoryRetriever, nil
   835  }
   836  
   837  func (l *kvLedger) CommitPvtDataOfOldBlocks(reconciledPvtdata []*ledger.ReconciledPvtdata, unreconciled ledger.MissingPvtDataInfo) ([]*ledger.PvtdataHashMismatch, error) {
   838  	logger.Debugf("[%s:] Comparing pvtData of [%d] old blocks against the hashes in transaction's rwset to find valid and invalid data",
   839  		l.ledgerID, len(reconciledPvtdata))
   840  
   841  	lastBlockInBootstrapSnapshot := uint64(0)
   842  	if l.bootSnapshotMetadata != nil {
   843  		lastBlockInBootstrapSnapshot = l.bootSnapshotMetadata.LastBlockNumber
   844  	}
   845  
   846  	hashVerifiedPvtData, hashMismatches, err := constructValidAndInvalidPvtData(
   847  		reconciledPvtdata, l.blockStore, l.pvtdataStore, lastBlockInBootstrapSnapshot,
   848  	)
   849  	if err != nil {
   850  		return nil, err
   851  	}
   852  
   853  	err = l.applyValidTxPvtDataOfOldBlocks(hashVerifiedPvtData)
   854  	if err != nil {
   855  		return nil, err
   856  	}
   857  
   858  	logger.Debugf("[%s:] Committing pvtData of [%d] old blocks to the pvtdatastore", l.ledgerID, len(reconciledPvtdata))
   859  
   860  	err = l.pvtdataStore.CommitPvtDataOfOldBlocks(hashVerifiedPvtData, unreconciled)
   861  	if err != nil {
   862  		return nil, err
   863  	}
   864  
   865  	return hashMismatches, nil
   866  }
   867  
   868  func (l *kvLedger) applyValidTxPvtDataOfOldBlocks(hashVerifiedPvtData map[uint64][]*ledger.TxPvtData) error {
   869  	logger.Debugf("[%s:] Filtering pvtData of invalidation transactions", l.ledgerID)
   870  
   871  	lastBlockInBootstrapSnapshot := uint64(0)
   872  	if l.bootSnapshotMetadata != nil {
   873  		lastBlockInBootstrapSnapshot = l.bootSnapshotMetadata.LastBlockNumber
   874  	}
   875  	committedPvtData, err := filterPvtDataOfInvalidTx(hashVerifiedPvtData, l.blockStore, lastBlockInBootstrapSnapshot)
   876  	if err != nil {
   877  		return err
   878  	}
   879  
   880  	// Assume the peer fails after storing the pvtData of old block in the stateDB but before
   881  	// storing it in block store. When the peer starts again, the reconciler finds that the
   882  	// pvtData is missing in the ledger store and hence, it would fetch those data again. As
   883  	// a result, RemoveStaleAndCommitPvtDataOfOldBlocks gets already existing data. In this
   884  	// scenario, RemoveStaleAndCommitPvtDataOfOldBlocks just replaces the old entry as we
   885  	// always makes the comparison between hashed version and this pvtData. There is no
   886  	// problem in terms of data consistency. However, if the reconciler is disabled before
   887  	// the peer restart, then the pvtData in stateDB may not be in sync with the pvtData in
   888  	// ledger store till the reconciler is enabled.
   889  	logger.Debugf("[%s:] Committing pvtData of [%d] old blocks to the stateDB", l.ledgerID, len(hashVerifiedPvtData))
   890  	return l.txmgr.RemoveStaleAndCommitPvtDataOfOldBlocks(committedPvtData)
   891  }
   892  
   893  func (l *kvLedger) GetMissingPvtDataTracker() (ledger.MissingPvtDataTracker, error) {
   894  	return l, nil
   895  }
   896  
   897  type commitNotifier struct {
   898  	dataChannel chan *ledger.CommitNotification
   899  	doneChannel <-chan struct{}
   900  }
   901  
   902  // CommitNotificationsChannel returns a read-only channel on which ledger sends a `CommitNotification`
   903  // when a block is committed. The CommitNotification contains entries for the transactions from the committed block,
   904  // which are not malformed, carry a legitimate TxID, and in addition, are not marked as a duplicate transaction.
   905  // The consumer can close the 'done' channel to signal that the notifications are no longer needed. This will cause the
   906  // CommitNotifications channel to close. There is expected to be only one consumer at a time. The function returns error
   907  // if already a CommitNotification channel is active.
   908  func (l *kvLedger) CommitNotificationsChannel(done <-chan struct{}) (<-chan *ledger.CommitNotification, error) {
   909  	l.commitNotifierLock.Lock()
   910  	defer l.commitNotifierLock.Unlock()
   911  
   912  	if l.commitNotifier != nil {
   913  		return nil, errors.New("only one commit notifications channel is allowed at a time")
   914  	}
   915  
   916  	l.commitNotifier = &commitNotifier{
   917  		dataChannel: make(chan *ledger.CommitNotification, 10),
   918  		doneChannel: done,
   919  	}
   920  
   921  	return l.commitNotifier.dataChannel, nil
   922  }
   923  
   924  func (l *kvLedger) sendCommitNotification(blockNum uint64, txStatsInfo []*validation.TxStatInfo) {
   925  	l.commitNotifierLock.Lock()
   926  	defer l.commitNotifierLock.Unlock()
   927  
   928  	if l.commitNotifier == nil {
   929  		return
   930  	}
   931  
   932  	select {
   933  	case <-l.commitNotifier.doneChannel:
   934  		close(l.commitNotifier.dataChannel)
   935  		l.commitNotifier = nil
   936  	default:
   937  		txsByID := map[string]struct{}{}
   938  		txs := []*ledger.CommitNotificationTxInfo{}
   939  		for _, t := range txStatsInfo {
   940  			txID := t.TxIDFromChannelHeader
   941  			_, ok := txsByID[txID]
   942  
   943  			if txID == "" || ok {
   944  				continue
   945  			}
   946  			txsByID[txID] = struct{}{}
   947  
   948  			txs = append(txs, &ledger.CommitNotificationTxInfo{
   949  				TxType:             t.TxType,
   950  				TxID:               t.TxIDFromChannelHeader,
   951  				ValidationCode:     t.ValidationCode,
   952  				ChaincodeID:        t.ChaincodeID,
   953  				ChaincodeEventData: t.ChaincodeEventData,
   954  			})
   955  		}
   956  
   957  		l.commitNotifier.dataChannel <- &ledger.CommitNotification{
   958  			BlockNumber: blockNum,
   959  			TxsInfo:     txs,
   960  		}
   961  	}
   962  }
   963  
   964  // Close closes `KVLedger`.
   965  // Currently this function is only used by test code. The caller should make sure no in-progress commit
   966  // or snapshot generation before calling this function. Otherwise, the ledger may have unknown behavior
   967  // and cause panic.
   968  func (l *kvLedger) Close() {
   969  	l.blockStore.Shutdown()
   970  	l.txmgr.Shutdown()
   971  	l.snapshotMgr.shutdown()
   972  }
   973  
   974  type blocksItr struct {
   975  	blockAPIsRWLock *sync.RWMutex
   976  	blocksItr       commonledger.ResultsIterator
   977  }
   978  
   979  func (itr *blocksItr) Next() (commonledger.QueryResult, error) {
   980  	block, err := itr.blocksItr.Next()
   981  	if err != nil {
   982  		return nil, err
   983  	}
   984  	itr.blockAPIsRWLock.RLock()
   985  	itr.blockAPIsRWLock.RUnlock() //lint:ignore SA2001 syncpoint
   986  	return block, nil
   987  }
   988  
   989  func (itr *blocksItr) Close() {
   990  	itr.blocksItr.Close()
   991  }
   992  
   993  type collectionInfoRetriever struct {
   994  	ledgerID     string
   995  	ledger       ledger.PeerLedger
   996  	infoProvider ledger.DeployedChaincodeInfoProvider
   997  }
   998  
   999  func (r *collectionInfoRetriever) CollectionInfo(chaincodeName, collectionName string) (*peer.StaticCollectionConfig, error) {
  1000  	qe, err := r.ledger.NewQueryExecutor()
  1001  	if err != nil {
  1002  		return nil, err
  1003  	}
  1004  	defer qe.Done()
  1005  	return r.infoProvider.CollectionInfo(r.ledgerID, chaincodeName, collectionName, qe)
  1006  }
  1007  
  1008  type collectionConfigHistoryRetriever struct {
  1009  	*confighistory.Retriever
  1010  	ledger.DeployedChaincodeInfoProvider
  1011  
  1012  	ledger *kvLedger
  1013  }
  1014  
  1015  func (r *collectionConfigHistoryRetriever) MostRecentCollectionConfigBelow(
  1016  	blockNum uint64,
  1017  	chaincodeName string,
  1018  ) (*ledger.CollectionConfigInfo, error) {
  1019  	explicitCollections, err := r.Retriever.MostRecentCollectionConfigBelow(blockNum, chaincodeName)
  1020  	if err != nil {
  1021  		return nil, errors.WithMessage(err, "error while retrieving explicit collections")
  1022  	}
  1023  	qe, err := r.ledger.NewQueryExecutor()
  1024  	if err != nil {
  1025  		return nil, err
  1026  	}
  1027  	defer qe.Done()
  1028  	implicitCollections, err := r.ImplicitCollections(r.ledger.ledgerID, chaincodeName, qe)
  1029  	if err != nil {
  1030  		return nil, errors.WithMessage(err, "error while retrieving implicit collections")
  1031  	}
  1032  
  1033  	combinedCollections := explicitCollections
  1034  	if combinedCollections == nil {
  1035  		if implicitCollections == nil {
  1036  			return nil, nil
  1037  		}
  1038  		combinedCollections = &ledger.CollectionConfigInfo{
  1039  			CollectionConfig: &peer.CollectionConfigPackage{},
  1040  		}
  1041  	}
  1042  
  1043  	for _, c := range implicitCollections {
  1044  		cc := &peer.CollectionConfig{}
  1045  		cc.Payload = &peer.CollectionConfig_StaticCollectionConfig{StaticCollectionConfig: c}
  1046  		combinedCollections.CollectionConfig.Config = append(
  1047  			combinedCollections.CollectionConfig.Config,
  1048  			cc,
  1049  		)
  1050  	}
  1051  	return combinedCollections, nil
  1052  }
  1053  
  1054  type ccEventListenerAdaptor struct {
  1055  	legacyEventListener cceventmgmt.ChaincodeLifecycleEventListener
  1056  }
  1057  
  1058  func (a *ccEventListenerAdaptor) HandleChaincodeDeploy(chaincodeDefinition *ledger.ChaincodeDefinition, dbArtifactsTar []byte) error {
  1059  	return a.legacyEventListener.HandleChaincodeDeploy(&cceventmgmt.ChaincodeDefinition{
  1060  		Name:              chaincodeDefinition.Name,
  1061  		Hash:              chaincodeDefinition.Hash,
  1062  		Version:           chaincodeDefinition.Version,
  1063  		CollectionConfigs: chaincodeDefinition.CollectionConfigs,
  1064  	},
  1065  		dbArtifactsTar,
  1066  	)
  1067  }
  1068  
  1069  func (a *ccEventListenerAdaptor) ChaincodeDeployDone(succeeded bool) {
  1070  	a.legacyEventListener.ChaincodeDeployDone(succeeded)
  1071  }
  1072  
  1073  func filterPvtDataOfInvalidTx(
  1074  	hashVerifiedPvtData map[uint64][]*ledger.TxPvtData,
  1075  	blockStore *blkstorage.BlockStore,
  1076  	lastBlockInBootstrapSnapshot uint64,
  1077  ) (map[uint64][]*ledger.TxPvtData, error) {
  1078  	committedPvtData := make(map[uint64][]*ledger.TxPvtData)
  1079  	for blkNum, txsPvtData := range hashVerifiedPvtData {
  1080  		if blkNum <= lastBlockInBootstrapSnapshot {
  1081  			committedPvtData[blkNum] = txsPvtData
  1082  			continue
  1083  		}
  1084  		// TODO: Instead of retrieving the whole block, we need to retrieve only
  1085  		// the TxValidationFlags from the block metadata. For that, we would need
  1086  		// to add a new index for the block metadata - FAB-15808
  1087  		block, err := blockStore.RetrieveBlockByNumber(blkNum)
  1088  		if err != nil {
  1089  			return nil, err
  1090  		}
  1091  		blockValidationFlags := txflags.ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
  1092  
  1093  		var blksPvtData []*ledger.TxPvtData
  1094  		for _, pvtData := range txsPvtData {
  1095  			if blockValidationFlags.IsValid(int(pvtData.SeqInBlock)) {
  1096  				blksPvtData = append(blksPvtData, pvtData)
  1097  			}
  1098  		}
  1099  		committedPvtData[blkNum] = blksPvtData
  1100  	}
  1101  	return committedPvtData, nil
  1102  }
  1103  
  1104  func constructPvtdataMap(pvtdata []*ledger.TxPvtData) ledger.TxPvtDataMap {
  1105  	if pvtdata == nil {
  1106  		return nil
  1107  	}
  1108  	m := make(map[uint64]*ledger.TxPvtData)
  1109  	for _, pvtdatum := range pvtdata {
  1110  		m[pvtdatum.SeqInBlock] = pvtdatum
  1111  	}
  1112  	return m
  1113  }
  1114  
  1115  func constructPvtDataAndMissingData(blockAndPvtData *ledger.BlockAndPvtData) ([]*ledger.TxPvtData,
  1116  	ledger.TxMissingPvtData) {
  1117  	var pvtData []*ledger.TxPvtData
  1118  	missingPvtData := make(ledger.TxMissingPvtData)
  1119  
  1120  	numTxs := uint64(len(blockAndPvtData.Block.Data.Data))
  1121  
  1122  	for txNum := uint64(0); txNum < numTxs; txNum++ {
  1123  		if pvtdata, ok := blockAndPvtData.PvtData[txNum]; ok {
  1124  			pvtData = append(pvtData, pvtdata)
  1125  		}
  1126  
  1127  		if missingData, ok := blockAndPvtData.MissingPvtData[txNum]; ok {
  1128  			for _, missing := range missingData {
  1129  				missingPvtData.Add(txNum, missing.Namespace,
  1130  					missing.Collection, missing.IsEligible)
  1131  			}
  1132  		}
  1133  	}
  1134  	return pvtData, missingPvtData
  1135  }