github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package txmgr
     8  
     9  import (
    10  	"bytes"
    11  	"sync"
    12  
    13  	"github.com/golang/protobuf/proto"
    14  	"github.com/hyperledger/fabric-protos-go/common"
    15  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    16  	"github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset"
    17  	"github.com/osdi23p228/fabric/common/flogging"
    18  	"github.com/osdi23p228/fabric/common/ledger/snapshot"
    19  	"github.com/osdi23p228/fabric/core/ledger"
    20  	"github.com/osdi23p228/fabric/core/ledger/internal/version"
    21  	"github.com/osdi23p228/fabric/core/ledger/kvledger/bookkeeping"
    22  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
    23  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt"
    24  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/queryutil"
    25  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
    26  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/validation"
    27  	"github.com/osdi23p228/fabric/core/ledger/pvtdatapolicy"
    28  	"github.com/osdi23p228/fabric/core/ledger/util"
    29  	"github.com/pkg/errors"
    30  )
    31  
    32  var logger = flogging.MustGetLogger("lockbasedtxmgr")
    33  
    34  // LockBasedTxMgr a simple implementation of interface `txmgmt.TxMgr`.
    35  // This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing
    36  type LockBasedTxMgr struct {
    37  	ledgerid            string
    38  	db                  *privacyenabledstate.DB
    39  	pvtdataPurgeMgr     *pvtdataPurgeMgr
    40  	commitBatchPreparer *validation.CommitBatchPreparer
    41  	stateListeners      []ledger.StateListener
    42  	ccInfoProvider      ledger.DeployedChaincodeInfoProvider
    43  	commitRWLock        sync.RWMutex
    44  	oldBlockCommit      sync.Mutex
    45  	current             *current
    46  	hashFunc            rwsetutil.HashFunc
    47  }
    48  
    49  // pvtdataPurgeMgr wraps the actual purge manager and an additional flag 'usedOnce'
    50  // for usage of this additional flag, see the relevant comments in the txmgr.Commit() function above
    51  type pvtdataPurgeMgr struct {
    52  	*pvtstatepurgemgmt.PurgeMgr
    53  	usedOnce bool
    54  }
    55  
    56  // ErrUnsupportedTransaction is expected to be thrown if a unsupported query is performed in an update transaction
    57  type ErrUnsupportedTransaction struct {
    58  	Msg string
    59  }
    60  
    61  func (e *ErrUnsupportedTransaction) Error() string {
    62  	return e.Msg
    63  }
    64  
    65  // ErrPvtdataNotAvailable is to be thrown when an application seeks a private data item
    66  // during simulation and the simulator is not capable of returning the version of the
    67  // private data item consistent with the snapshot exposed to the simulation
    68  type ErrPvtdataNotAvailable struct {
    69  	Msg string
    70  }
    71  
    72  func (e *ErrPvtdataNotAvailable) Error() string {
    73  	return e.Msg
    74  }
    75  
    76  type current struct {
    77  	block     *common.Block
    78  	batch     *privacyenabledstate.UpdateBatch
    79  	listeners []ledger.StateListener
    80  }
    81  
    82  func (c *current) blockNum() uint64 {
    83  	return c.block.Header.Number
    84  }
    85  
    86  func (c *current) maxTxNumber() uint64 {
    87  	return uint64(len(c.block.Data.Data)) - 1
    88  }
    89  
    90  // Initializer captures the dependencies for tx manager
    91  type Initializer struct {
    92  	LedgerID            string
    93  	DB                  *privacyenabledstate.DB
    94  	StateListeners      []ledger.StateListener
    95  	BtlPolicy           pvtdatapolicy.BTLPolicy
    96  	BookkeepingProvider bookkeeping.Provider
    97  	CCInfoProvider      ledger.DeployedChaincodeInfoProvider
    98  	CustomTxProcessors  map[common.HeaderType]ledger.CustomTxProcessor
    99  	HashFunc            rwsetutil.HashFunc
   100  }
   101  
   102  // NewLockBasedTxMgr constructs a new instance of NewLockBasedTxMgr
   103  func NewLockBasedTxMgr(initializer *Initializer) (*LockBasedTxMgr, error) {
   104  
   105  	if initializer.HashFunc == nil {
   106  		return nil, errors.New("create new lock based TxMgr failed: passed in nil ledger hasher")
   107  	}
   108  
   109  	if err := initializer.DB.Open(); err != nil {
   110  		return nil, err
   111  	}
   112  	txmgr := &LockBasedTxMgr{
   113  		ledgerid:       initializer.LedgerID,
   114  		db:             initializer.DB,
   115  		stateListeners: initializer.StateListeners,
   116  		ccInfoProvider: initializer.CCInfoProvider,
   117  		hashFunc:       initializer.HashFunc,
   118  	}
   119  	pvtstatePurgeMgr, err := pvtstatepurgemgmt.InstantiatePurgeMgr(
   120  		initializer.LedgerID,
   121  		initializer.DB,
   122  		initializer.BtlPolicy,
   123  		initializer.BookkeepingProvider)
   124  	if err != nil {
   125  		return nil, err
   126  	}
   127  	txmgr.pvtdataPurgeMgr = &pvtdataPurgeMgr{pvtstatePurgeMgr, false}
   128  	txmgr.commitBatchPreparer = validation.NewCommitBatchPreparer(
   129  		txmgr,
   130  		initializer.DB,
   131  		initializer.CustomTxProcessors,
   132  		initializer.HashFunc)
   133  	return txmgr, nil
   134  }
   135  
   136  // GetLastSavepoint returns the block num recorded in savepoint,
   137  // returns 0 if NO savepoint is found
   138  func (txmgr *LockBasedTxMgr) GetLastSavepoint() (*version.Height, error) {
   139  	return txmgr.db.GetLatestSavePoint()
   140  }
   141  
   142  // NewQueryExecutor implements method in interface `txmgmt.TxMgr`
   143  func (txmgr *LockBasedTxMgr) NewQueryExecutor(txid string) (ledger.QueryExecutor, error) {
   144  	qe := newQueryExecutor(txmgr, txid, nil, true, txmgr.hashFunc)
   145  	txmgr.commitRWLock.RLock()
   146  	return qe, nil
   147  }
   148  
   149  // NewQueryExecutorNoCollChecks is a workaround to make the initilization of lifecycle cache
   150  // work. The issue is that in the current lifecycle code the cache is initialized via Initialize
   151  // function of a statelistener which gets invoked during ledger opening. This invovation eventually
   152  // leads to a call to DeployedChaincodeInfoProvider which inturn needs the channel config in order
   153  // to varify the name of the implicit collection. And the channelconfig is loaded only after the
   154  // ledger is opened. So, as a workaround, we skip the check of collection name in this function
   155  // by supplying a relaxed query executor - This is perfectly safe otherwise.
   156  // As a proper fix, the initialization of other components should take place outside ledger by explicit
   157  // querying the ledger state so that the sequence of initialization is explicitly controlled.
   158  // However that needs a bigger refactoring of code.
   159  func (txmgr *LockBasedTxMgr) NewQueryExecutorNoCollChecks() (ledger.QueryExecutor, error) {
   160  	qe := newQueryExecutor(txmgr, "", nil, false, txmgr.hashFunc)
   161  	txmgr.commitRWLock.RLock()
   162  	return qe, nil
   163  }
   164  
   165  // NewTxSimulator implements method in interface `txmgmt.TxMgr`
   166  func (txmgr *LockBasedTxMgr) NewTxSimulator(txid string) (ledger.TxSimulator, error) {
   167  	logger.Debugf("constructing new tx simulator")
   168  	s, err := newTxSimulator(txmgr, txid, txmgr.hashFunc)
   169  	if err != nil {
   170  		return nil, err
   171  	}
   172  	txmgr.commitRWLock.RLock()
   173  	return s, nil
   174  }
   175  
   176  // ValidateAndPrepare implements method in interface `txmgmt.TxMgr`
   177  func (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAndPvtData, doMVCCValidation bool) (
   178  	[]*validation.TxStatInfo, []byte, error,
   179  ) {
   180  	// Among ValidateAndPrepare(), PrepareExpiringKeys(), and
   181  	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
   182  	// function to execute at a time. The reason is that each function calls
   183  	// LoadCommittedVersions() which would clear the existing entries in the
   184  	// transient buffer and load new entries (such a transient buffer is not
   185  	// applicable for the golevelDB). As a result, these three functions can
   186  	// interleave and nullify the optimization provided by the bulk read API.
   187  	// Once the ledger cache (FAB-103) is introduced and existing
   188  	// LoadCommittedVersions() is refactored to return a map, we can allow
   189  	// these three functions to execute parallelly.
   190  	logger.Debugf("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
   191  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   192  	txmgr.oldBlockCommit.Lock()
   193  	defer txmgr.oldBlockCommit.Unlock()
   194  	logger.Debug("lock acquired on oldBlockCommit for validating read set version against the committed version")
   195  
   196  	block := blockAndPvtdata.Block
   197  	logger.Debugf("Validating new block with num trans = [%d]", len(block.Data.Data))
   198  	batch, txstatsInfo, err := txmgr.commitBatchPreparer.ValidateAndPrepareBatch(blockAndPvtdata, doMVCCValidation)
   199  	if err != nil {
   200  		txmgr.reset()
   201  		return nil, nil, err
   202  	}
   203  	txmgr.current = &current{block: block, batch: batch}
   204  	if err := txmgr.invokeNamespaceListeners(); err != nil {
   205  		txmgr.reset()
   206  		return nil, nil, err
   207  	}
   208  
   209  	updateBytes, err := deterministicBytesForPubAndHashUpdates(batch)
   210  	return txstatsInfo, updateBytes, err
   211  }
   212  
   213  // RemoveStaleAndCommitPvtDataOfOldBlocks implements method in interface `txmgmt.TxMgr`
   214  // The following six operations are performed:
   215  // (1) constructs the unique pvt data from the passed reconciledPvtdata
   216  // (2) acquire a lock on oldBlockCommit
   217  // (3) checks for stale pvtData by comparing [version, valueHash] and removes stale data
   218  // (4) creates update batch from the the non-stale pvtData
   219  // (5) update the BTL bookkeeping managed by the purge manager and update expiring keys.
   220  // (6) commit the non-stale pvt data to the stateDB
   221  // This function assumes that the passed input contains only transactions that had been
   222  // marked "Valid". In the current design, kvledger (a single consumer of this function),
   223  // filters out the data of "invalid" transactions and supplies the data for "valid" transactions only.
   224  func (txmgr *LockBasedTxMgr) RemoveStaleAndCommitPvtDataOfOldBlocks(reconciledPvtdata map[uint64][]*ledger.TxPvtData) error {
   225  	// (0) Among ValidateAndPrepare(), PrepareExpiringKeys(), and
   226  	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
   227  	// function to execute at a time. The reason is that each function calls
   228  	// LoadCommittedVersions() which would clear the existing entries in the
   229  	// transient buffer and load new entries (such a transient buffer is not
   230  	// applicable for the golevelDB). As a result, these three functions can
   231  	// interleave and nullify the optimization provided by the bulk read API.
   232  	// Once the ledger cache (FAB-103) is introduced and existing
   233  	// LoadCommittedVersions() is refactored to return a map, we can allow
   234  	// these three functions to execute parallelly. However, we cannot remove
   235  	// the lock on oldBlockCommit as it is also used to avoid interleaving
   236  	// between Commit() and execution of this function for the correctness.
   237  	logger.Debug("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
   238  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   239  	txmgr.oldBlockCommit.Lock()
   240  	defer txmgr.oldBlockCommit.Unlock()
   241  	logger.Debug("lock acquired on oldBlockCommit for committing pvtData of old blocks to state database")
   242  
   243  	// (1) as the reconciledPvtdata can contain multiple versions of pvtData for
   244  	// a given <ns, coll, key>, we need to find duplicate tuples with different
   245  	// versions and use the one with the higher version
   246  	logger.Debug("Constructing unique pvtData by removing duplicate entries")
   247  	uniquePvtData, err := constructUniquePvtData(reconciledPvtdata)
   248  	if len(uniquePvtData) == 0 || err != nil {
   249  		return err
   250  	}
   251  
   252  	// (3) remove the pvt data which does not matches the hashed
   253  	// value stored in the public state
   254  	logger.Debug("Finding and removing stale pvtData")
   255  	if err := uniquePvtData.findAndRemoveStalePvtData(txmgr.db); err != nil {
   256  		return err
   257  	}
   258  
   259  	// (4) create the update batch from the uniquePvtData
   260  	batch := uniquePvtData.transformToUpdateBatch()
   261  
   262  	// (5) update bookkeeping in the purge manager and update toPurgeList
   263  	// (i.e., the list of expiry keys). As the expiring keys would have
   264  	// been constructed during last PrepareExpiringKeys from commit, we need
   265  	// to update the list. This is because RemoveStaleAndCommitPvtDataOfOldBlocks
   266  	// may have added new data which might be eligible for expiry during the
   267  	// next regular block commit.
   268  	logger.Debug("Updating expiry info in the purge manager")
   269  	if err := txmgr.pvtdataPurgeMgr.UpdateExpiryInfoOfPvtDataOfOldBlocks(batch.PvtUpdates); err != nil {
   270  		return err
   271  	}
   272  
   273  	// (6) commit the pvt data to the stateDB
   274  	logger.Debug("Committing updates to state database")
   275  	if err := txmgr.db.ApplyPrivacyAwareUpdates(batch, nil); err != nil {
   276  		return err
   277  	}
   278  	return nil
   279  }
   280  
   281  type uniquePvtDataMap map[privacyenabledstate.HashedCompositeKey]*privacyenabledstate.PvtKVWrite
   282  
   283  func constructUniquePvtData(reconciledPvtdata map[uint64][]*ledger.TxPvtData) (uniquePvtDataMap, error) {
   284  	uniquePvtData := make(uniquePvtDataMap)
   285  	// go over the reconciledPvtdata to find duplicate <ns, coll, key>
   286  	// in the pvtWrites and use the one with the higher version number
   287  	for blkNum, blockPvtData := range reconciledPvtdata {
   288  		if err := uniquePvtData.updateUsingBlockPvtdata(blockPvtData, blkNum); err != nil {
   289  			return nil, err
   290  		}
   291  	} // for each block
   292  	return uniquePvtData, nil
   293  }
   294  
   295  func (uniquePvtData uniquePvtDataMap) updateUsingBlockPvtdata(blockPvtData []*ledger.TxPvtData, blkNum uint64) error {
   296  	for _, txPvtData := range blockPvtData {
   297  		ver := version.NewHeight(blkNum, txPvtData.SeqInBlock)
   298  		if err := uniquePvtData.updateUsingTxPvtData(txPvtData, ver); err != nil {
   299  			return err
   300  		}
   301  	} // for each tx
   302  	return nil
   303  }
   304  func (uniquePvtData uniquePvtDataMap) updateUsingTxPvtData(txPvtData *ledger.TxPvtData, ver *version.Height) error {
   305  	for _, nsPvtData := range txPvtData.WriteSet.NsPvtRwset {
   306  		if err := uniquePvtData.updateUsingNsPvtData(nsPvtData, ver); err != nil {
   307  			return err
   308  		}
   309  	} // for each ns
   310  	return nil
   311  }
   312  func (uniquePvtData uniquePvtDataMap) updateUsingNsPvtData(nsPvtData *rwset.NsPvtReadWriteSet, ver *version.Height) error {
   313  	for _, collPvtData := range nsPvtData.CollectionPvtRwset {
   314  		if err := uniquePvtData.updateUsingCollPvtData(collPvtData, nsPvtData.Namespace, ver); err != nil {
   315  			return err
   316  		}
   317  	} // for each coll
   318  	return nil
   319  }
   320  
   321  func (uniquePvtData uniquePvtDataMap) updateUsingCollPvtData(collPvtData *rwset.CollectionPvtReadWriteSet,
   322  	ns string, ver *version.Height) error {
   323  
   324  	kvRWSet := &kvrwset.KVRWSet{}
   325  	if err := proto.Unmarshal(collPvtData.Rwset, kvRWSet); err != nil {
   326  		return err
   327  	}
   328  
   329  	hashedCompositeKey := privacyenabledstate.HashedCompositeKey{
   330  		Namespace:      ns,
   331  		CollectionName: collPvtData.CollectionName,
   332  	}
   333  
   334  	for _, kvWrite := range kvRWSet.Writes { // for each kv pair
   335  		hashedCompositeKey.KeyHash = string(util.ComputeStringHash(kvWrite.Key))
   336  		uniquePvtData.updateUsingPvtWrite(kvWrite, hashedCompositeKey, ver)
   337  	} // for each kv pair
   338  
   339  	return nil
   340  }
   341  
   342  func (uniquePvtData uniquePvtDataMap) updateUsingPvtWrite(pvtWrite *kvrwset.KVWrite,
   343  	hashedCompositeKey privacyenabledstate.HashedCompositeKey, ver *version.Height) {
   344  
   345  	pvtData, ok := uniquePvtData[hashedCompositeKey]
   346  	if !ok || pvtData.Version.Compare(ver) < 0 {
   347  		uniquePvtData[hashedCompositeKey] =
   348  			&privacyenabledstate.PvtKVWrite{
   349  				Key:      pvtWrite.Key,
   350  				IsDelete: rwsetutil.IsKVWriteDelete(pvtWrite),
   351  				Value:    pvtWrite.Value,
   352  				Version:  ver,
   353  			}
   354  	}
   355  }
   356  
   357  func (uniquePvtData uniquePvtDataMap) findAndRemoveStalePvtData(db *privacyenabledstate.DB) error {
   358  	// (1) load all committed versions
   359  	if err := uniquePvtData.loadCommittedVersionIntoCache(db); err != nil {
   360  		return err
   361  	}
   362  
   363  	// (2) find and remove the stale data
   364  	for hashedCompositeKey, pvtWrite := range uniquePvtData {
   365  		isStale, err := checkIfPvtWriteIsStale(&hashedCompositeKey, pvtWrite, db)
   366  		if err != nil {
   367  			return err
   368  		}
   369  		if isStale {
   370  			delete(uniquePvtData, hashedCompositeKey)
   371  		}
   372  	}
   373  	return nil
   374  }
   375  
   376  func (uniquePvtData uniquePvtDataMap) loadCommittedVersionIntoCache(db *privacyenabledstate.DB) error {
   377  	// Note that ClearCachedVersions would not be called till we validate and commit these
   378  	// pvt data of old blocks. This is because only during the exclusive lock duration, we
   379  	// clear the cache and we have already acquired one before reaching here.
   380  	var hashedCompositeKeys []*privacyenabledstate.HashedCompositeKey
   381  	for hashedCompositeKey := range uniquePvtData {
   382  		// tempKey ensures a different pointer is added to the slice for each key
   383  		tempKey := hashedCompositeKey
   384  		hashedCompositeKeys = append(hashedCompositeKeys, &tempKey)
   385  	}
   386  
   387  	err := db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedCompositeKeys)
   388  	if err != nil {
   389  		return err
   390  	}
   391  	return nil
   392  }
   393  
   394  func checkIfPvtWriteIsStale(hashedKey *privacyenabledstate.HashedCompositeKey,
   395  	kvWrite *privacyenabledstate.PvtKVWrite, db *privacyenabledstate.DB) (bool, error) {
   396  
   397  	ns := hashedKey.Namespace
   398  	coll := hashedKey.CollectionName
   399  	keyHashBytes := []byte(hashedKey.KeyHash)
   400  	committedVersion, err := db.GetKeyHashVersion(ns, coll, keyHashBytes)
   401  	if err != nil {
   402  		return true, err
   403  	}
   404  
   405  	// for a deleted hashedKey, we would get a nil committed version. Note that
   406  	// the hashedKey was deleted because either it got expired or was deleted by
   407  	// the chaincode itself.
   408  	if committedVersion == nil {
   409  		return !kvWrite.IsDelete, nil
   410  	}
   411  
   412  	/*
   413  		TODO: FAB-12922
   414  		In the first round, we need to the check version of passed pvtData
   415  		against the version of pvtdata stored in the stateDB. In second round,
   416  		for the remaining pvtData, we need to check for staleness using hashed
   417  		version. In the third round, for the still remaining pvtdata, we need
   418  		to check against hashed values. In each phase we would require to
   419  		perform bulkload of relevant data from the stateDB.
   420  		committedPvtData, err := db.GetPrivateData(ns, coll, kvWrite.Key)
   421  		if err != nil {
   422  			return false, err
   423  		}
   424  		if committedPvtData.Version.Compare(kvWrite.Version) > 0 {
   425  			return false, nil
   426  		}
   427  	*/
   428  	if version.AreSame(committedVersion, kvWrite.Version) {
   429  		return false, nil
   430  	}
   431  
   432  	// due to metadata updates, we could get a version
   433  	// mismatch between pvt kv write and the committed
   434  	// hashedKey. In this case, we must compare the hash
   435  	// of the value. If the hash matches, we should update
   436  	// the version number in the pvt kv write and return
   437  	// true as the validation result
   438  	vv, err := db.GetValueHash(ns, coll, keyHashBytes)
   439  	if err != nil {
   440  		return true, err
   441  	}
   442  	if bytes.Equal(vv.Value, util.ComputeHash(kvWrite.Value)) {
   443  		// if hash of value matches, update version
   444  		// and return true
   445  		kvWrite.Version = vv.Version // side effect
   446  		// (checkIfPvtWriteIsStale should not be updating the state)
   447  		return false, nil
   448  	}
   449  	return true, nil
   450  }
   451  
   452  func (uniquePvtData uniquePvtDataMap) transformToUpdateBatch() *privacyenabledstate.UpdateBatch {
   453  	batch := privacyenabledstate.NewUpdateBatch()
   454  	for hashedCompositeKey, pvtWrite := range uniquePvtData {
   455  		ns := hashedCompositeKey.Namespace
   456  		coll := hashedCompositeKey.CollectionName
   457  		if pvtWrite.IsDelete {
   458  			batch.PvtUpdates.Delete(ns, coll, pvtWrite.Key, pvtWrite.Version)
   459  		} else {
   460  			batch.PvtUpdates.Put(ns, coll, pvtWrite.Key, pvtWrite.Value, pvtWrite.Version)
   461  		}
   462  	}
   463  	return batch
   464  }
   465  
   466  func (txmgr *LockBasedTxMgr) invokeNamespaceListeners() error {
   467  	for _, listener := range txmgr.stateListeners {
   468  		stateUpdatesForListener := extractStateUpdates(txmgr.current.batch, listener.InterestedInNamespaces())
   469  		if len(stateUpdatesForListener) == 0 {
   470  			continue
   471  		}
   472  		txmgr.current.listeners = append(txmgr.current.listeners, listener)
   473  
   474  		committedStateQueryExecuter := &queryutil.QECombiner{
   475  			QueryExecuters: []queryutil.QueryExecuter{txmgr.db}}
   476  
   477  		postCommitQueryExecuter := &queryutil.QECombiner{
   478  			QueryExecuters: []queryutil.QueryExecuter{
   479  				&queryutil.UpdateBatchBackedQueryExecuter{
   480  					UpdateBatch:      txmgr.current.batch.PubUpdates.UpdateBatch,
   481  					HashUpdatesBatch: txmgr.current.batch.HashUpdates,
   482  				},
   483  				txmgr.db,
   484  			},
   485  		}
   486  
   487  		trigger := &ledger.StateUpdateTrigger{
   488  			LedgerID:                    txmgr.ledgerid,
   489  			StateUpdates:                stateUpdatesForListener,
   490  			CommittingBlockNum:          txmgr.current.blockNum(),
   491  			CommittedStateQueryExecutor: committedStateQueryExecuter,
   492  			PostCommitQueryExecutor:     postCommitQueryExecuter,
   493  		}
   494  		if err := listener.HandleStateUpdates(trigger); err != nil {
   495  			return err
   496  		}
   497  		logger.Debugf("Invoking listener for state changes:%s", listener.Name())
   498  	}
   499  	return nil
   500  }
   501  
   502  // Shutdown implements method in interface `txmgmt.TxMgr`
   503  func (txmgr *LockBasedTxMgr) Shutdown() {
   504  	// wait for background go routine to finish else the timing issue causes a nil pointer inside goleveldb code
   505  	// see FAB-11974
   506  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   507  	txmgr.db.Close()
   508  }
   509  
   510  // Commit implements method in interface `txmgmt.TxMgr`
   511  func (txmgr *LockBasedTxMgr) Commit() error {
   512  	// we need to acquire a lock on oldBlockCommit. The following are the two reasons:
   513  	// (1) the DeleteExpiredAndUpdateBookkeeping() would perform incorrect operation if
   514  	//        toPurgeList is updated by RemoveStaleAndCommitPvtDataOfOldBlocks().
   515  	// (2) RemoveStaleAndCommitPvtDataOfOldBlocks computes the update
   516  	//     batch based on the current state and if we allow regular block commits at the
   517  	//     same time, the former may overwrite the newer versions of the data and we may
   518  	//     end up with an incorrect update batch.
   519  	txmgr.oldBlockCommit.Lock()
   520  	defer txmgr.oldBlockCommit.Unlock()
   521  	logger.Debug("lock acquired on oldBlockCommit for committing regular updates to state database")
   522  
   523  	// When using the purge manager for the first block commit after peer start, the asynchronous function
   524  	// 'PrepareForExpiringKeys' is invoked in-line. However, for the subsequent blocks commits, this function is invoked
   525  	// in advance for the next block
   526  	if !txmgr.pvtdataPurgeMgr.usedOnce {
   527  		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum())
   528  		txmgr.pvtdataPurgeMgr.usedOnce = true
   529  	}
   530  	defer func() {
   531  		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum() + 1)
   532  		logger.Debugf("launched the background routine for preparing keys to purge with the next block")
   533  		txmgr.reset()
   534  	}()
   535  
   536  	logger.Debugf("Committing updates to state database")
   537  	if txmgr.current == nil {
   538  		panic("validateAndPrepare() method should have been called before calling commit()")
   539  	}
   540  
   541  	if err := txmgr.pvtdataPurgeMgr.UpdateExpiryInfo(
   542  		txmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {
   543  		return err
   544  	}
   545  
   546  	if err := txmgr.pvtdataPurgeMgr.AddExpiredEntriesToUpdateBatch(
   547  		txmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {
   548  		return err
   549  	}
   550  
   551  	commitHeight := version.NewHeight(txmgr.current.blockNum(), txmgr.current.maxTxNumber())
   552  	txmgr.commitRWLock.Lock()
   553  	logger.Debugf("Write lock acquired for committing updates to state database")
   554  	if err := txmgr.db.ApplyPrivacyAwareUpdates(txmgr.current.batch, commitHeight); err != nil {
   555  		txmgr.commitRWLock.Unlock()
   556  		return err
   557  	}
   558  	txmgr.commitRWLock.Unlock()
   559  	// only while holding a lock on oldBlockCommit, we should clear the cache as the
   560  	// cache is being used by the old pvtData committer to load the version of
   561  	// hashedKeys. Also, note that the PrepareForExpiringKeys uses the cache.
   562  	txmgr.clearCache()
   563  	logger.Debugf("Updates committed to state database and the write lock is released")
   564  
   565  	// purge manager should be called (in this call the purge mgr removes the expiry entries from schedules) after committing to statedb
   566  	if err := txmgr.pvtdataPurgeMgr.BlockCommitDone(); err != nil {
   567  		return err
   568  	}
   569  	// In the case of error state listeners will not receive this call - instead a peer panic is caused by the ledger upon receiving
   570  	// an error from this function
   571  	txmgr.updateStateListeners()
   572  	return nil
   573  }
   574  
   575  // Rollback implements method in interface `txmgmt.TxMgr`
   576  func (txmgr *LockBasedTxMgr) Rollback() {
   577  	txmgr.reset()
   578  }
   579  
   580  // clearCache empty the cache maintained by the statedb implementation
   581  func (txmgr *LockBasedTxMgr) clearCache() {
   582  	if txmgr.db.IsBulkOptimizable() {
   583  		txmgr.db.ClearCachedVersions()
   584  	}
   585  }
   586  
   587  // ShouldRecover implements method in interface kvledger.Recoverer
   588  func (txmgr *LockBasedTxMgr) ShouldRecover(lastAvailableBlock uint64) (bool, uint64, error) {
   589  	savepoint, err := txmgr.GetLastSavepoint()
   590  	if err != nil {
   591  		return false, 0, err
   592  	}
   593  	if savepoint == nil {
   594  		return true, 0, nil
   595  	}
   596  	return savepoint.BlockNum != lastAvailableBlock, savepoint.BlockNum + 1, nil
   597  }
   598  
   599  // Name returns the name of the database that manages all active states.
   600  func (txmgr *LockBasedTxMgr) Name() string {
   601  	return "state"
   602  }
   603  
   604  // CommitLostBlock implements method in interface kvledger.Recoverer
   605  func (txmgr *LockBasedTxMgr) CommitLostBlock(blockAndPvtdata *ledger.BlockAndPvtData) error {
   606  	block := blockAndPvtdata.Block
   607  	logger.Debugf("Constructing updateSet for the block %d", block.Header.Number)
   608  	if _, _, err := txmgr.ValidateAndPrepare(blockAndPvtdata, false); err != nil {
   609  		return err
   610  	}
   611  
   612  	// log every 1000th block at Info level so that statedb rebuild progress can be tracked in production envs.
   613  	if block.Header.Number%1000 == 0 {
   614  		logger.Infof("Recommitting block [%d] to state database", block.Header.Number)
   615  	} else {
   616  		logger.Debugf("Recommitting block [%d] to state database", block.Header.Number)
   617  	}
   618  
   619  	return txmgr.Commit()
   620  }
   621  
   622  // ExportPubStateAndPvtStateHashes simply delegates the call to the statedb for exporting the data for a snapshot.
   623  // It is assumed that the consumer would invoke this function when the commits are paused
   624  func (txmgr *LockBasedTxMgr) ExportPubStateAndPvtStateHashes(dir string, newHashFunc snapshot.NewHashFunc) (map[string][]byte, error) {
   625  	// no need to acuqire any lock in this function, as the commits would be paused
   626  	return txmgr.db.ExportPubStateAndPvtStateHashes(dir, newHashFunc)
   627  }
   628  
   629  func extractStateUpdates(batch *privacyenabledstate.UpdateBatch, namespaces []string) ledger.StateUpdates {
   630  	su := make(ledger.StateUpdates)
   631  	for _, namespace := range namespaces {
   632  		nsu := &ledger.KVStateUpdates{}
   633  		// include public updates
   634  		for key, versionedValue := range batch.PubUpdates.GetUpdates(namespace) {
   635  			nsu.PublicUpdates = append(nsu.PublicUpdates,
   636  				&kvrwset.KVWrite{
   637  					Key:      key,
   638  					IsDelete: versionedValue.Value == nil,
   639  					Value:    versionedValue.Value,
   640  				},
   641  			)
   642  		}
   643  		// include colls hashes updates
   644  		if hashUpdates, ok := batch.HashUpdates.UpdateMap[namespace]; ok {
   645  			nsu.CollHashUpdates = make(map[string][]*kvrwset.KVWriteHash)
   646  			for _, collName := range hashUpdates.GetCollectionNames() {
   647  				for key, vv := range hashUpdates.GetUpdates(collName) {
   648  					nsu.CollHashUpdates[collName] = append(
   649  						nsu.CollHashUpdates[collName],
   650  						&kvrwset.KVWriteHash{
   651  							KeyHash:   []byte(key),
   652  							IsDelete:  vv.Value == nil,
   653  							ValueHash: vv.Value,
   654  						},
   655  					)
   656  				}
   657  			}
   658  		}
   659  		if len(nsu.PublicUpdates)+len(nsu.CollHashUpdates) > 0 {
   660  			su[namespace] = nsu
   661  		}
   662  	}
   663  	return su
   664  }
   665  
   666  func (txmgr *LockBasedTxMgr) updateStateListeners() {
   667  	for _, l := range txmgr.current.listeners {
   668  		l.StateCommitDone(txmgr.ledgerid)
   669  	}
   670  }
   671  
   672  func (txmgr *LockBasedTxMgr) reset() {
   673  	txmgr.current = nil
   674  }