github.com/ewagmig/fabric@v2.1.1+incompatible/core/ledger/kvledger/txmgmt/txmgr/lockbasedtxmgr/lockbased_txmgr.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package lockbasedtxmgr
     8  
     9  import (
    10  	"bytes"
    11  	"sync"
    12  
    13  	"github.com/golang/protobuf/proto"
    14  	"github.com/hyperledger/fabric-protos-go/common"
    15  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    16  	"github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset"
    17  	"github.com/hyperledger/fabric/common/flogging"
    18  	"github.com/hyperledger/fabric/core/ledger"
    19  	"github.com/hyperledger/fabric/core/ledger/kvledger/bookkeeping"
    20  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
    21  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt"
    22  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/queryutil"
    23  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/txmgr"
    24  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/validator"
    25  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/validator/valimpl"
    26  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version"
    27  	"github.com/hyperledger/fabric/core/ledger/pvtdatapolicy"
    28  	"github.com/hyperledger/fabric/core/ledger/util"
    29  	"github.com/pkg/errors"
    30  )
    31  
    32  var logger = flogging.MustGetLogger("lockbasedtxmgr")
    33  
    34  // LockBasedTxMgr a simple implementation of interface `txmgmt.TxMgr`.
    35  // This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing
    36  type LockBasedTxMgr struct {
    37  	ledgerid        string
    38  	db              privacyenabledstate.DB
    39  	pvtdataPurgeMgr *pvtdataPurgeMgr
    40  	validator       validator.Validator
    41  	stateListeners  []ledger.StateListener
    42  	ccInfoProvider  ledger.DeployedChaincodeInfoProvider
    43  	commitRWLock    sync.RWMutex
    44  	oldBlockCommit  sync.Mutex
    45  	current         *current
    46  	hasher          ledger.Hasher
    47  }
    48  
    49  type current struct {
    50  	block     *common.Block
    51  	batch     *privacyenabledstate.UpdateBatch
    52  	listeners []ledger.StateListener
    53  }
    54  
    55  func (c *current) blockNum() uint64 {
    56  	return c.block.Header.Number
    57  }
    58  
    59  func (c *current) maxTxNumber() uint64 {
    60  	return uint64(len(c.block.Data.Data)) - 1
    61  }
    62  
    63  // NewLockBasedTxMgr constructs a new instance of NewLockBasedTxMgr
    64  func NewLockBasedTxMgr(
    65  	ledgerid string,
    66  	db privacyenabledstate.DB,
    67  	stateListeners []ledger.StateListener,
    68  	btlPolicy pvtdatapolicy.BTLPolicy,
    69  	bookkeepingProvider bookkeeping.Provider,
    70  	ccInfoProvider ledger.DeployedChaincodeInfoProvider,
    71  	customTxProcessors map[common.HeaderType]ledger.CustomTxProcessor,
    72  	hasher ledger.Hasher,
    73  ) (*LockBasedTxMgr, error) {
    74  
    75  	if hasher == nil {
    76  		return nil, errors.New("create new lock based TxMgr failed: passed in nil ledger hasher")
    77  	}
    78  
    79  	db.Open()
    80  	txmgr := &LockBasedTxMgr{
    81  		ledgerid:       ledgerid,
    82  		db:             db,
    83  		stateListeners: stateListeners,
    84  		ccInfoProvider: ccInfoProvider,
    85  		hasher:         hasher,
    86  	}
    87  	pvtstatePurgeMgr, err := pvtstatepurgemgmt.InstantiatePurgeMgr(ledgerid, db, btlPolicy, bookkeepingProvider)
    88  	if err != nil {
    89  		return nil, err
    90  	}
    91  	txmgr.pvtdataPurgeMgr = &pvtdataPurgeMgr{pvtstatePurgeMgr, false}
    92  	txmgr.validator = valimpl.NewStatebasedValidator(txmgr, db, customTxProcessors, hasher)
    93  	return txmgr, nil
    94  }
    95  
    96  // GetLastSavepoint returns the block num recorded in savepoint,
    97  // returns 0 if NO savepoint is found
    98  func (txmgr *LockBasedTxMgr) GetLastSavepoint() (*version.Height, error) {
    99  	return txmgr.db.GetLatestSavePoint()
   100  }
   101  
   102  // NewQueryExecutor implements method in interface `txmgmt.TxMgr`
   103  func (txmgr *LockBasedTxMgr) NewQueryExecutor(txid string) (ledger.QueryExecutor, error) {
   104  	qe := newQueryExecutor(txmgr, txid, true, txmgr.hasher)
   105  	txmgr.commitRWLock.RLock()
   106  	return qe, nil
   107  }
   108  
   109  // NewQueryExecutorWithNoCollChecks is a workaround to make the initilization of lifecycle cache
   110  // work. The issue is that in the current lifecycle code the cache is initialized via Initialize
   111  // function of a statelistener which gets invoked during ledger opening. This invovation eventually
   112  // leads to a call to DeployedChaincodeInfoProvider which inturn needs the channel config in order
   113  // to varify the name of the implicit collection. And the channelconfig is loaded only after the
   114  // ledger is opened. So, as a workaround, we skip the check of collection name in this function
   115  // by supplying a relaxed query executor - This is perfectly safe otherwise.
   116  // As a proper fix, the initialization of other components should take place outside ledger by explicit
   117  // querying the ledger state so that the sequence of initialization is explicitly controlled.
   118  // However that needs a bigger refactoring of code.
   119  func (txmgr *LockBasedTxMgr) NewQueryExecutorNoCollChecks() (ledger.QueryExecutor, error) {
   120  	qe := newQueryExecutor(txmgr, "", false, txmgr.hasher)
   121  	txmgr.commitRWLock.RLock()
   122  	return qe, nil
   123  }
   124  
   125  // NewTxSimulator implements method in interface `txmgmt.TxMgr`
   126  func (txmgr *LockBasedTxMgr) NewTxSimulator(txid string) (ledger.TxSimulator, error) {
   127  	logger.Debugf("constructing new tx simulator")
   128  	s, err := newLockBasedTxSimulator(txmgr, txid, txmgr.hasher)
   129  	if err != nil {
   130  		return nil, err
   131  	}
   132  	txmgr.commitRWLock.RLock()
   133  	return s, nil
   134  }
   135  
   136  // ValidateAndPrepare implements method in interface `txmgmt.TxMgr`
   137  func (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAndPvtData, doMVCCValidation bool) (
   138  	[]*txmgr.TxStatInfo, []byte, error,
   139  ) {
   140  	// Among ValidateAndPrepare(), PrepareExpiringKeys(), and
   141  	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
   142  	// function to execute at a time. The reason is that each function calls
   143  	// LoadCommittedVersions() which would clear the existing entries in the
   144  	// transient buffer and load new entries (such a transient buffer is not
   145  	// applicable for the golevelDB). As a result, these three functions can
   146  	// interleave and nullify the optimization provided by the bulk read API.
   147  	// Once the ledger cache (FAB-103) is introduced and existing
   148  	// LoadCommittedVersions() is refactored to return a map, we can allow
   149  	// these three functions to execute parallely.
   150  	logger.Debugf("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
   151  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   152  	txmgr.oldBlockCommit.Lock()
   153  	defer txmgr.oldBlockCommit.Unlock()
   154  	logger.Debug("lock acquired on oldBlockCommit for validating read set version against the committed version")
   155  
   156  	block := blockAndPvtdata.Block
   157  	logger.Debugf("Validating new block with num trans = [%d]", len(block.Data.Data))
   158  	batch, txstatsInfo, err := txmgr.validator.ValidateAndPrepareBatch(blockAndPvtdata, doMVCCValidation)
   159  	if err != nil {
   160  		txmgr.reset()
   161  		return nil, nil, err
   162  	}
   163  	txmgr.current = &current{block: block, batch: batch}
   164  	if err := txmgr.invokeNamespaceListeners(); err != nil {
   165  		txmgr.reset()
   166  		return nil, nil, err
   167  	}
   168  
   169  	updateBytesBuilder := &privacyenabledstate.UpdatesBytesBuilder{}
   170  	updateBytes, err := updateBytesBuilder.DeterministicBytesForPubAndHashUpdates(batch)
   171  	return txstatsInfo, updateBytes, err
   172  }
   173  
   174  // RemoveStaleAndCommitPvtDataOfOldBlocks implements method in interface `txmgmt.TxMgr`
   175  // The following six operations are performed:
   176  // (1) constructs the unique pvt data from the passed reconciledPvtdata
   177  // (2) acquire a lock on oldBlockCommit
   178  // (3) checks for stale pvtData by comparing [version, valueHash] and removes stale data
   179  // (4) creates update batch from the the non-stale pvtData
   180  // (5) update the BTL bookkeeping managed by the purge manager and update expiring keys.
   181  // (6) commit the non-stale pvt data to the stateDB
   182  // This function assumes that the passed input contains only transactions that had been
   183  // marked "Valid". In the current design, kvledger (a single consumer of this function),
   184  // filters out the data of "invalid" transactions and supplies the data for "valid" transactions only.
   185  func (txmgr *LockBasedTxMgr) RemoveStaleAndCommitPvtDataOfOldBlocks(reconciledPvtdata map[uint64][]*ledger.TxPvtData) error {
   186  	// (0) Among ValidateAndPrepare(), PrepareExpiringKeys(), and
   187  	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
   188  	// function to execute at a time. The reason is that each function calls
   189  	// LoadCommittedVersions() which would clear the existing entries in the
   190  	// transient buffer and load new entries (such a transient buffer is not
   191  	// applicable for the golevelDB). As a result, these three functions can
   192  	// interleave and nullify the optimization provided by the bulk read API.
   193  	// Once the ledger cache (FAB-103) is introduced and existing
   194  	// LoadCommittedVersions() is refactored to return a map, we can allow
   195  	// these three functions to execute parallely. However, we cannot remove
   196  	// the lock on oldBlockCommit as it is also used to avoid interleaving
   197  	// between Commit() and execution of this function for the correctness.
   198  	logger.Debug("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
   199  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   200  	txmgr.oldBlockCommit.Lock()
   201  	defer txmgr.oldBlockCommit.Unlock()
   202  	logger.Debug("lock acquired on oldBlockCommit for committing pvtData of old blocks to state database")
   203  
   204  	// (1) as the reconciledPvtdata can contain multiple versions of pvtData for
   205  	// a given <ns, coll, key>, we need to find duplicate tuples with different
   206  	// versions and use the one with the higher version
   207  	logger.Debug("Constructing unique pvtData by removing duplicate entries")
   208  	uniquePvtData, err := constructUniquePvtData(reconciledPvtdata)
   209  	if len(uniquePvtData) == 0 || err != nil {
   210  		return err
   211  	}
   212  
   213  	// (3) remove the pvt data which does not matches the hashed
   214  	// value stored in the public state
   215  	logger.Debug("Finding and removing stale pvtData")
   216  	if err := uniquePvtData.findAndRemoveStalePvtData(txmgr.db); err != nil {
   217  		return err
   218  	}
   219  
   220  	// (4) create the update batch from the uniquePvtData
   221  	batch := uniquePvtData.transformToUpdateBatch()
   222  
   223  	// (5) update bookkeeping in the purge manager and update toPurgeList
   224  	// (i.e., the list of expiry keys). As the expiring keys would have
   225  	// been constructed during last PrepareExpiringKeys from commit, we need
   226  	// to update the list. This is because RemoveStaleAndCommitPvtDataOfOldBlocks
   227  	// may have added new data which might be eligible for expiry during the
   228  	// next regular block commit.
   229  	logger.Debug("Updating bookkeeping info in the purge manager")
   230  	if err := txmgr.pvtdataPurgeMgr.UpdateBookkeepingForPvtDataOfOldBlocks(batch.PvtUpdates); err != nil {
   231  		return err
   232  	}
   233  
   234  	// (6) commit the pvt data to the stateDB
   235  	logger.Debug("Committing updates to state database")
   236  	if err := txmgr.db.ApplyPrivacyAwareUpdates(batch, nil); err != nil {
   237  		return err
   238  	}
   239  	return nil
   240  }
   241  
   242  type uniquePvtDataMap map[privacyenabledstate.HashedCompositeKey]*privacyenabledstate.PvtKVWrite
   243  
   244  func constructUniquePvtData(reconciledPvtdata map[uint64][]*ledger.TxPvtData) (uniquePvtDataMap, error) {
   245  	uniquePvtData := make(uniquePvtDataMap)
   246  	// go over the reconciledPvtdata to find duplicate <ns, coll, key>
   247  	// in the pvtWrites and use the one with the higher version number
   248  	for blkNum, blockPvtData := range reconciledPvtdata {
   249  		if err := uniquePvtData.updateUsingBlockPvtdata(blockPvtData, blkNum); err != nil {
   250  			return nil, err
   251  		}
   252  	} // for each block
   253  	return uniquePvtData, nil
   254  }
   255  
   256  func (uniquePvtData uniquePvtDataMap) updateUsingBlockPvtdata(blockPvtData []*ledger.TxPvtData, blkNum uint64) error {
   257  	for _, txPvtData := range blockPvtData {
   258  		ver := version.NewHeight(blkNum, txPvtData.SeqInBlock)
   259  		if err := uniquePvtData.updateUsingTxPvtData(txPvtData, ver); err != nil {
   260  			return err
   261  		}
   262  	} // for each tx
   263  	return nil
   264  }
   265  func (uniquePvtData uniquePvtDataMap) updateUsingTxPvtData(txPvtData *ledger.TxPvtData, ver *version.Height) error {
   266  	for _, nsPvtData := range txPvtData.WriteSet.NsPvtRwset {
   267  		if err := uniquePvtData.updateUsingNsPvtData(nsPvtData, ver); err != nil {
   268  			return err
   269  		}
   270  	} // for each ns
   271  	return nil
   272  }
   273  func (uniquePvtData uniquePvtDataMap) updateUsingNsPvtData(nsPvtData *rwset.NsPvtReadWriteSet, ver *version.Height) error {
   274  	for _, collPvtData := range nsPvtData.CollectionPvtRwset {
   275  		if err := uniquePvtData.updateUsingCollPvtData(collPvtData, nsPvtData.Namespace, ver); err != nil {
   276  			return err
   277  		}
   278  	} // for each coll
   279  	return nil
   280  }
   281  
   282  func (uniquePvtData uniquePvtDataMap) updateUsingCollPvtData(collPvtData *rwset.CollectionPvtReadWriteSet,
   283  	ns string, ver *version.Height) error {
   284  
   285  	kvRWSet := &kvrwset.KVRWSet{}
   286  	if err := proto.Unmarshal(collPvtData.Rwset, kvRWSet); err != nil {
   287  		return err
   288  	}
   289  
   290  	hashedCompositeKey := privacyenabledstate.HashedCompositeKey{
   291  		Namespace:      ns,
   292  		CollectionName: collPvtData.CollectionName,
   293  	}
   294  
   295  	for _, kvWrite := range kvRWSet.Writes { // for each kv pair
   296  		hashedCompositeKey.KeyHash = string(util.ComputeStringHash(kvWrite.Key))
   297  		uniquePvtData.updateUsingPvtWrite(kvWrite, hashedCompositeKey, ver)
   298  	} // for each kv pair
   299  
   300  	return nil
   301  }
   302  
   303  func (uniquePvtData uniquePvtDataMap) updateUsingPvtWrite(pvtWrite *kvrwset.KVWrite,
   304  	hashedCompositeKey privacyenabledstate.HashedCompositeKey, ver *version.Height) {
   305  
   306  	pvtData, ok := uniquePvtData[hashedCompositeKey]
   307  	if !ok || pvtData.Version.Compare(ver) < 0 {
   308  		uniquePvtData[hashedCompositeKey] =
   309  			&privacyenabledstate.PvtKVWrite{
   310  				Key:      pvtWrite.Key,
   311  				IsDelete: pvtWrite.IsDelete,
   312  				Value:    pvtWrite.Value,
   313  				Version:  ver,
   314  			}
   315  	}
   316  }
   317  
   318  func (uniquePvtData uniquePvtDataMap) findAndRemoveStalePvtData(db privacyenabledstate.DB) error {
   319  	// (1) load all committed versions
   320  	if err := uniquePvtData.loadCommittedVersionIntoCache(db); err != nil {
   321  		return err
   322  	}
   323  
   324  	// (2) find and remove the stale data
   325  	for hashedCompositeKey, pvtWrite := range uniquePvtData {
   326  		isStale, err := checkIfPvtWriteIsStale(&hashedCompositeKey, pvtWrite, db)
   327  		if err != nil {
   328  			return err
   329  		}
   330  		if isStale {
   331  			delete(uniquePvtData, hashedCompositeKey)
   332  		}
   333  	}
   334  	return nil
   335  }
   336  
   337  func (uniquePvtData uniquePvtDataMap) loadCommittedVersionIntoCache(db privacyenabledstate.DB) error {
   338  	// Note that ClearCachedVersions would not be called till we validate and commit these
   339  	// pvt data of old blocks. This is because only during the exclusive lock duration, we
   340  	// clear the cache and we have already acquired one before reaching here.
   341  	var hashedCompositeKeys []*privacyenabledstate.HashedCompositeKey
   342  	for hashedCompositeKey := range uniquePvtData {
   343  		// tempKey ensures a different pointer is added to the slice for each key
   344  		tempKey := hashedCompositeKey
   345  		hashedCompositeKeys = append(hashedCompositeKeys, &tempKey)
   346  	}
   347  
   348  	err := db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedCompositeKeys)
   349  	if err != nil {
   350  		return err
   351  	}
   352  	return nil
   353  }
   354  
   355  func checkIfPvtWriteIsStale(hashedKey *privacyenabledstate.HashedCompositeKey,
   356  	kvWrite *privacyenabledstate.PvtKVWrite, db privacyenabledstate.DB) (bool, error) {
   357  
   358  	ns := hashedKey.Namespace
   359  	coll := hashedKey.CollectionName
   360  	keyHashBytes := []byte(hashedKey.KeyHash)
   361  	committedVersion, err := db.GetKeyHashVersion(ns, coll, keyHashBytes)
   362  	if err != nil {
   363  		return true, err
   364  	}
   365  
   366  	// for a deleted hashedKey, we would get a nil committed version. Note that
   367  	// the hashedKey was deleted because either it got expired or was deleted by
   368  	// the chaincode itself.
   369  	if committedVersion == nil {
   370  		return !kvWrite.IsDelete, nil
   371  	}
   372  
   373  	/*
   374  		TODO: FAB-12922
   375  		In the first round, we need to the check version of passed pvtData
   376  		against the version of pvtdata stored in the stateDB. In second round,
   377  		for the remaining pvtData, we need to check for staleness using hashed
   378  		version. In the third round, for the still remaining pvtdata, we need
   379  		to check against hashed values. In each phase we would require to
   380  		perform bulkload of relevant data from the stateDB.
   381  		committedPvtData, err := db.GetPrivateData(ns, coll, kvWrite.Key)
   382  		if err != nil {
   383  			return false, err
   384  		}
   385  		if committedPvtData.Version.Compare(kvWrite.Version) > 0 {
   386  			return false, nil
   387  		}
   388  	*/
   389  	if version.AreSame(committedVersion, kvWrite.Version) {
   390  		return false, nil
   391  	}
   392  
   393  	// due to metadata updates, we could get a version
   394  	// mismatch between pvt kv write and the committed
   395  	// hashedKey. In this case, we must compare the hash
   396  	// of the value. If the hash matches, we should update
   397  	// the version number in the pvt kv write and return
   398  	// true as the validation result
   399  	vv, err := db.GetValueHash(ns, coll, keyHashBytes)
   400  	if err != nil {
   401  		return true, err
   402  	}
   403  	if bytes.Equal(vv.Value, util.ComputeHash(kvWrite.Value)) {
   404  		// if hash of value matches, update version
   405  		// and return true
   406  		kvWrite.Version = vv.Version // side effect
   407  		// (checkIfPvtWriteIsStale should not be updating the state)
   408  		return false, nil
   409  	}
   410  	return true, nil
   411  }
   412  
   413  func (uniquePvtData uniquePvtDataMap) transformToUpdateBatch() *privacyenabledstate.UpdateBatch {
   414  	batch := privacyenabledstate.NewUpdateBatch()
   415  	for hashedCompositeKey, pvtWrite := range uniquePvtData {
   416  		ns := hashedCompositeKey.Namespace
   417  		coll := hashedCompositeKey.CollectionName
   418  		if pvtWrite.IsDelete {
   419  			batch.PvtUpdates.Delete(ns, coll, pvtWrite.Key, pvtWrite.Version)
   420  		} else {
   421  			batch.PvtUpdates.Put(ns, coll, pvtWrite.Key, pvtWrite.Value, pvtWrite.Version)
   422  		}
   423  	}
   424  	return batch
   425  }
   426  
   427  func (txmgr *LockBasedTxMgr) invokeNamespaceListeners() error {
   428  	for _, listener := range txmgr.stateListeners {
   429  		stateUpdatesForListener := extractStateUpdates(txmgr.current.batch, listener.InterestedInNamespaces())
   430  		if len(stateUpdatesForListener) == 0 {
   431  			continue
   432  		}
   433  		txmgr.current.listeners = append(txmgr.current.listeners, listener)
   434  
   435  		committedStateQueryExecuter := &queryutil.QECombiner{
   436  			QueryExecuters: []queryutil.QueryExecuter{txmgr.db}}
   437  
   438  		postCommitQueryExecuter := &queryutil.QECombiner{
   439  			QueryExecuters: []queryutil.QueryExecuter{
   440  				&queryutil.UpdateBatchBackedQueryExecuter{
   441  					UpdateBatch:      txmgr.current.batch.PubUpdates.UpdateBatch,
   442  					HashUpdatesBatch: txmgr.current.batch.HashUpdates,
   443  				},
   444  				txmgr.db,
   445  			},
   446  		}
   447  
   448  		trigger := &ledger.StateUpdateTrigger{
   449  			LedgerID:                    txmgr.ledgerid,
   450  			StateUpdates:                stateUpdatesForListener,
   451  			CommittingBlockNum:          txmgr.current.blockNum(),
   452  			CommittedStateQueryExecutor: committedStateQueryExecuter,
   453  			PostCommitQueryExecutor:     postCommitQueryExecuter,
   454  		}
   455  		if err := listener.HandleStateUpdates(trigger); err != nil {
   456  			return err
   457  		}
   458  		logger.Debugf("Invoking listener for state changes:%s", listener)
   459  	}
   460  	return nil
   461  }
   462  
   463  // Shutdown implements method in interface `txmgmt.TxMgr`
   464  func (txmgr *LockBasedTxMgr) Shutdown() {
   465  	// wait for background go routine to finish else the timing issue causes a nil pointer inside goleveldb code
   466  	// see FAB-11974
   467  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   468  	txmgr.db.Close()
   469  }
   470  
   471  // Commit implements method in interface `txmgmt.TxMgr`
   472  func (txmgr *LockBasedTxMgr) Commit() error {
   473  	// we need to acquire a lock on oldBlockCommit. The following are the two reasons:
   474  	// (1) the DeleteExpiredAndUpdateBookkeeping() would perform incorrect operation if
   475  	//        toPurgeList is updated by RemoveStaleAndCommitPvtDataOfOldBlocks().
   476  	// (2) RemoveStaleAndCommitPvtDataOfOldBlocks computes the update
   477  	//     batch based on the current state and if we allow regular block commits at the
   478  	//     same time, the former may overwrite the newer versions of the data and we may
   479  	//     end up with an incorrect update batch.
   480  	txmgr.oldBlockCommit.Lock()
   481  	defer txmgr.oldBlockCommit.Unlock()
   482  	logger.Debug("lock acquired on oldBlockCommit for committing regular updates to state database")
   483  
   484  	// When using the purge manager for the first block commit after peer start, the asynchronous function
   485  	// 'PrepareForExpiringKeys' is invoked in-line. However, for the subsequent blocks commits, this function is invoked
   486  	// in advance for the next block
   487  	if !txmgr.pvtdataPurgeMgr.usedOnce {
   488  		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum())
   489  		txmgr.pvtdataPurgeMgr.usedOnce = true
   490  	}
   491  	defer func() {
   492  		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum() + 1)
   493  		logger.Debugf("launched the background routine for preparing keys to purge with the next block")
   494  		txmgr.reset()
   495  	}()
   496  
   497  	logger.Debugf("Committing updates to state database")
   498  	if txmgr.current == nil {
   499  		panic("validateAndPrepare() method should have been called before calling commit()")
   500  	}
   501  
   502  	if err := txmgr.pvtdataPurgeMgr.DeleteExpiredAndUpdateBookkeeping(
   503  		txmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {
   504  		return err
   505  	}
   506  
   507  	commitHeight := version.NewHeight(txmgr.current.blockNum(), txmgr.current.maxTxNumber())
   508  	txmgr.commitRWLock.Lock()
   509  	logger.Debugf("Write lock acquired for committing updates to state database")
   510  	if err := txmgr.db.ApplyPrivacyAwareUpdates(txmgr.current.batch, commitHeight); err != nil {
   511  		txmgr.commitRWLock.Unlock()
   512  		return err
   513  	}
   514  	txmgr.commitRWLock.Unlock()
   515  	// only while holding a lock on oldBlockCommit, we should clear the cache as the
   516  	// cache is being used by the old pvtData committer to load the version of
   517  	// hashedKeys. Also, note that the PrepareForExpiringKeys uses the cache.
   518  	txmgr.clearCache()
   519  	logger.Debugf("Updates committed to state database and the write lock is released")
   520  
   521  	// purge manager should be called (in this call the purge mgr removes the expiry entries from schedules) after committing to statedb
   522  	if err := txmgr.pvtdataPurgeMgr.BlockCommitDone(); err != nil {
   523  		return err
   524  	}
   525  	// In the case of error state listeners will not receive this call - instead a peer panic is caused by the ledger upon receiving
   526  	// an error from this function
   527  	txmgr.updateStateListeners()
   528  	return nil
   529  }
   530  
   531  // Rollback implements method in interface `txmgmt.TxMgr`
   532  func (txmgr *LockBasedTxMgr) Rollback() {
   533  	txmgr.reset()
   534  }
   535  
   536  // clearCache empty the cache maintained by the statedb implementation
   537  func (txmgr *LockBasedTxMgr) clearCache() {
   538  	if txmgr.db.IsBulkOptimizable() {
   539  		txmgr.db.ClearCachedVersions()
   540  	}
   541  }
   542  
   543  // ShouldRecover implements method in interface kvledger.Recoverer
   544  func (txmgr *LockBasedTxMgr) ShouldRecover(lastAvailableBlock uint64) (bool, uint64, error) {
   545  	savepoint, err := txmgr.GetLastSavepoint()
   546  	if err != nil {
   547  		return false, 0, err
   548  	}
   549  	if savepoint == nil {
   550  		return true, 0, nil
   551  	}
   552  	return savepoint.BlockNum != lastAvailableBlock, savepoint.BlockNum + 1, nil
   553  }
   554  
   555  // Name returns the name of the database that manages all active states.
   556  func (txmgr *LockBasedTxMgr) Name() string {
   557  	return "state"
   558  }
   559  
   560  // CommitLostBlock implements method in interface kvledger.Recoverer
   561  func (txmgr *LockBasedTxMgr) CommitLostBlock(blockAndPvtdata *ledger.BlockAndPvtData) error {
   562  	block := blockAndPvtdata.Block
   563  	logger.Debugf("Constructing updateSet for the block %d", block.Header.Number)
   564  	if _, _, err := txmgr.ValidateAndPrepare(blockAndPvtdata, false); err != nil {
   565  		return err
   566  	}
   567  
   568  	// log every 1000th block at Info level so that statedb rebuild progress can be tracked in production envs.
   569  	if block.Header.Number%1000 == 0 {
   570  		logger.Infof("Recommitting block [%d] to state database", block.Header.Number)
   571  	} else {
   572  		logger.Debugf("Recommitting block [%d] to state database", block.Header.Number)
   573  	}
   574  
   575  	return txmgr.Commit()
   576  }
   577  
   578  func extractStateUpdates(batch *privacyenabledstate.UpdateBatch, namespaces []string) ledger.StateUpdates {
   579  	su := make(ledger.StateUpdates)
   580  	for _, namespace := range namespaces {
   581  		nsu := &ledger.KVStateUpdates{}
   582  		// include public updates
   583  		for key, versionedValue := range batch.PubUpdates.GetUpdates(namespace) {
   584  			nsu.PublicUpdates = append(nsu.PublicUpdates,
   585  				&kvrwset.KVWrite{
   586  					Key:      key,
   587  					IsDelete: versionedValue.Value == nil,
   588  					Value:    versionedValue.Value,
   589  				},
   590  			)
   591  		}
   592  		// include colls hashes updates
   593  		if hashUpdates, ok := batch.HashUpdates.UpdateMap[namespace]; ok {
   594  			nsu.CollHashUpdates = make(map[string][]*kvrwset.KVWriteHash)
   595  			for _, collName := range hashUpdates.GetCollectionNames() {
   596  				for key, vv := range hashUpdates.GetUpdates(collName) {
   597  					nsu.CollHashUpdates[collName] = append(
   598  						nsu.CollHashUpdates[collName],
   599  						&kvrwset.KVWriteHash{
   600  							KeyHash:   []byte(key),
   601  							IsDelete:  vv.Value == nil,
   602  							ValueHash: vv.Value,
   603  						},
   604  					)
   605  				}
   606  			}
   607  		}
   608  		if len(nsu.PublicUpdates)+len(nsu.CollHashUpdates) > 0 {
   609  			su[namespace] = nsu
   610  		}
   611  	}
   612  	return su
   613  }
   614  
   615  func (txmgr *LockBasedTxMgr) updateStateListeners() {
   616  	for _, l := range txmgr.current.listeners {
   617  		l.StateCommitDone(txmgr.ledgerid)
   618  	}
   619  }
   620  
   621  func (txmgr *LockBasedTxMgr) reset() {
   622  	txmgr.current = nil
   623  }
   624  
   625  // pvtdataPurgeMgr wraps the actual purge manager and an additional flag 'usedOnce'
   626  // for usage of this additional flag, see the relevant comments in the txmgr.Commit() function above
   627  type pvtdataPurgeMgr struct {
   628  	pvtstatepurgemgmt.PurgeMgr
   629  	usedOnce bool
   630  }