github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/core/ledger/kvledger/txmgmt/txmgr/lockbased_txmgr.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package txmgr
     8  
     9  import (
    10  	"bytes"
    11  	"sync"
    12  
    13  	"github.com/golang/protobuf/proto"
    14  	"github.com/hechain20/hechain/common/flogging"
    15  	"github.com/hechain20/hechain/common/ledger/snapshot"
    16  	"github.com/hechain20/hechain/core/ledger"
    17  	"github.com/hechain20/hechain/core/ledger/internal/version"
    18  	"github.com/hechain20/hechain/core/ledger/kvledger/bookkeeping"
    19  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/privacyenabledstate"
    20  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt"
    21  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/queryutil"
    22  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/rwsetutil"
    23  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/validation"
    24  	"github.com/hechain20/hechain/core/ledger/pvtdatapolicy"
    25  	"github.com/hechain20/hechain/core/ledger/util"
    26  	"github.com/hyperledger/fabric-protos-go/common"
    27  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    28  	"github.com/hyperledger/fabric-protos-go/ledger/rwset/kvrwset"
    29  	"github.com/pkg/errors"
    30  )
    31  
    32  var logger = flogging.MustGetLogger("lockbasedtxmgr")
    33  
    34  // LockBasedTxMgr a simple implementation of interface `txmgmt.TxMgr`.
    35  // This implementation uses a read-write lock to prevent conflicts between transaction simulation and committing
    36  type LockBasedTxMgr struct {
    37  	ledgerid            string
    38  	db                  *privacyenabledstate.DB
    39  	pvtdataPurgeMgr     *pvtdataPurgeMgr
    40  	commitBatchPreparer *validation.CommitBatchPreparer
    41  	stateListeners      []ledger.StateListener
    42  	ccInfoProvider      ledger.DeployedChaincodeInfoProvider
    43  	commitRWLock        sync.RWMutex
    44  	oldBlockCommit      sync.Mutex
    45  	current             *current
    46  	hashFunc            rwsetutil.HashFunc
    47  }
    48  
    49  // pvtdataPurgeMgr wraps the actual purge manager and an additional flag 'usedOnce'
    50  // for usage of this additional flag, see the relevant comments in the txmgr.Commit() function above
    51  type pvtdataPurgeMgr struct {
    52  	*pvtstatepurgemgmt.PurgeMgr
    53  	usedOnce bool
    54  }
    55  
    56  type current struct {
    57  	block     *common.Block
    58  	batch     *privacyenabledstate.UpdateBatch
    59  	listeners []ledger.StateListener
    60  }
    61  
    62  func (c *current) blockNum() uint64 {
    63  	return c.block.Header.Number
    64  }
    65  
    66  func (c *current) maxTxNumber() uint64 {
    67  	return uint64(len(c.block.Data.Data)) - 1
    68  }
    69  
    70  // Initializer captures the dependencies for tx manager
    71  type Initializer struct {
    72  	LedgerID            string
    73  	DB                  *privacyenabledstate.DB
    74  	StateListeners      []ledger.StateListener
    75  	BtlPolicy           pvtdatapolicy.BTLPolicy
    76  	BookkeepingProvider *bookkeeping.Provider
    77  	CCInfoProvider      ledger.DeployedChaincodeInfoProvider
    78  	CustomTxProcessors  map[common.HeaderType]ledger.CustomTxProcessor
    79  	HashFunc            rwsetutil.HashFunc
    80  }
    81  
    82  // NewLockBasedTxMgr constructs a new instance of NewLockBasedTxMgr
    83  func NewLockBasedTxMgr(initializer *Initializer) (*LockBasedTxMgr, error) {
    84  	if initializer.HashFunc == nil {
    85  		return nil, errors.New("create new lock based TxMgr failed: passed in nil ledger hasher")
    86  	}
    87  
    88  	if err := initializer.DB.Open(); err != nil {
    89  		return nil, err
    90  	}
    91  	txmgr := &LockBasedTxMgr{
    92  		ledgerid:       initializer.LedgerID,
    93  		db:             initializer.DB,
    94  		stateListeners: initializer.StateListeners,
    95  		ccInfoProvider: initializer.CCInfoProvider,
    96  		hashFunc:       initializer.HashFunc,
    97  	}
    98  	pvtstatePurgeMgr, err := pvtstatepurgemgmt.InstantiatePurgeMgr(
    99  		initializer.LedgerID,
   100  		initializer.DB,
   101  		initializer.BtlPolicy,
   102  		initializer.BookkeepingProvider)
   103  	if err != nil {
   104  		return nil, err
   105  	}
   106  	txmgr.pvtdataPurgeMgr = &pvtdataPurgeMgr{pvtstatePurgeMgr, false}
   107  	txmgr.commitBatchPreparer = validation.NewCommitBatchPreparer(
   108  		txmgr,
   109  		initializer.DB,
   110  		initializer.CustomTxProcessors,
   111  		initializer.HashFunc)
   112  	return txmgr, nil
   113  }
   114  
   115  // GetLastSavepoint returns the block num recorded in savepoint,
   116  // returns 0 if NO savepoint is found
   117  func (txmgr *LockBasedTxMgr) GetLastSavepoint() (*version.Height, error) {
   118  	return txmgr.db.GetLatestSavePoint()
   119  }
   120  
   121  // NewQueryExecutor implements method in interface `txmgmt.TxMgr`
   122  func (txmgr *LockBasedTxMgr) NewQueryExecutor(txid string) (ledger.QueryExecutor, error) {
   123  	qe := newQueryExecutor(txmgr, txid, nil, true, txmgr.hashFunc)
   124  	txmgr.commitRWLock.RLock()
   125  	return qe, nil
   126  }
   127  
   128  // NewQueryExecutorNoCollChecks is a workaround to make the initilization of lifecycle cache
   129  // work. The issue is that in the current lifecycle code the cache is initialized via Initialize
   130  // function of a statelistener which gets invoked during ledger opening. This invovation eventually
   131  // leads to a call to DeployedChaincodeInfoProvider which inturn needs the channel config in order
   132  // to varify the name of the implicit collection. And the channelconfig is loaded only after the
   133  // ledger is opened. So, as a workaround, we skip the check of collection name in this function
   134  // by supplying a relaxed query executor - This is perfectly safe otherwise.
   135  // As a proper fix, the initialization of other components should take place outside ledger by explicit
   136  // querying the ledger state so that the sequence of initialization is explicitly controlled.
   137  // However that needs a bigger refactoring of code.
   138  func (txmgr *LockBasedTxMgr) NewQueryExecutorNoCollChecks() (ledger.QueryExecutor, error) {
   139  	qe := newQueryExecutor(txmgr, "", nil, false, txmgr.hashFunc)
   140  	txmgr.commitRWLock.RLock()
   141  	return qe, nil
   142  }
   143  
   144  // NewTxSimulator implements method in interface `txmgmt.TxMgr`
   145  func (txmgr *LockBasedTxMgr) NewTxSimulator(txid string) (ledger.TxSimulator, error) {
   146  	logger.Debugf("constructing new tx simulator")
   147  	s, err := newTxSimulator(txmgr, txid, txmgr.hashFunc)
   148  	if err != nil {
   149  		return nil, err
   150  	}
   151  	txmgr.commitRWLock.RLock()
   152  	return s, nil
   153  }
   154  
   155  // ValidateAndPrepare implements method in interface `txmgmt.TxMgr`
   156  func (txmgr *LockBasedTxMgr) ValidateAndPrepare(blockAndPvtdata *ledger.BlockAndPvtData, doMVCCValidation bool) (
   157  	[]*validation.TxStatInfo, []byte, error,
   158  ) {
   159  	// Among ValidateAndPrepare(), PrepareForExpiringKeys(), and
   160  	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
   161  	// function to execute at a time. The reason is that each function calls
   162  	// LoadCommittedVersions() which would clear the existing entries in the
   163  	// transient buffer and load new entries (such a transient buffer is not
   164  	// applicable for the golevelDB). As a result, these three functions can
   165  	// interleave and nullify the optimization provided by the bulk read API.
   166  	// Once the ledger cache (FAB-103) is introduced and existing
   167  	// LoadCommittedVersions() is refactored to return a map, we can allow
   168  	// these three functions to execute parallelly.
   169  	logger.Debugf("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
   170  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   171  	txmgr.oldBlockCommit.Lock()
   172  	defer txmgr.oldBlockCommit.Unlock()
   173  	logger.Debug("lock acquired on oldBlockCommit for validating read set version against the committed version")
   174  
   175  	block := blockAndPvtdata.Block
   176  	logger.Debugf("Validating new block with num trans = [%d]", len(block.Data.Data))
   177  	batch, txstatsInfo, err := txmgr.commitBatchPreparer.ValidateAndPrepareBatch(blockAndPvtdata, doMVCCValidation)
   178  	if err != nil {
   179  		txmgr.reset()
   180  		return nil, nil, err
   181  	}
   182  	txmgr.current = &current{block: block, batch: batch}
   183  	if err := txmgr.invokeNamespaceListeners(); err != nil {
   184  		txmgr.reset()
   185  		return nil, nil, err
   186  	}
   187  
   188  	updateBytes, err := deterministicBytesForPubAndHashUpdates(batch)
   189  	return txstatsInfo, updateBytes, err
   190  }
   191  
   192  // RemoveStaleAndCommitPvtDataOfOldBlocks implements method in interface `txmgmt.TxMgr`
   193  // The following six operations are performed:
   194  // (1) constructs the unique pvt data from the passed reconciledPvtdata
   195  // (2) acquire a lock on oldBlockCommit
   196  // (3) checks for stale pvtData by comparing [version, valueHash] and removes stale data
   197  // (4) creates update batch from the the non-stale pvtData
   198  // (5) update the BTL bookkeeping managed by the purge manager and update expiring keys.
   199  // (6) commit the non-stale pvt data to the stateDB
   200  // This function assumes that the passed input contains only transactions that had been
   201  // marked "Valid". In the current design, kvledger (a single consumer of this function),
   202  // filters out the data of "invalid" transactions and supplies the data for "valid" transactions only.
   203  func (txmgr *LockBasedTxMgr) RemoveStaleAndCommitPvtDataOfOldBlocks(reconciledPvtdata map[uint64][]*ledger.TxPvtData) error {
   204  	// (0) Among ValidateAndPrepare(), PrepareForExpiringKeys(), and
   205  	// RemoveStaleAndCommitPvtDataOfOldBlocks(), we can allow only one
   206  	// function to execute at a time. The reason is that each function calls
   207  	// LoadCommittedVersions() which would clear the existing entries in the
   208  	// transient buffer and load new entries (such a transient buffer is not
   209  	// applicable for the golevelDB). As a result, these three functions can
   210  	// interleave and nullify the optimization provided by the bulk read API.
   211  	// Once the ledger cache (FAB-103) is introduced and existing
   212  	// LoadCommittedVersions() is refactored to return a map, we can allow
   213  	// these three functions to execute parallelly. However, we cannot remove
   214  	// the lock on oldBlockCommit as it is also used to avoid interleaving
   215  	// between Commit() and execution of this function for the correctness.
   216  	logger.Debug("Waiting for purge mgr to finish the background job of computing expirying keys for the block")
   217  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   218  	txmgr.oldBlockCommit.Lock()
   219  	defer txmgr.oldBlockCommit.Unlock()
   220  	logger.Debug("lock acquired on oldBlockCommit for committing pvtData of old blocks to state database")
   221  
   222  	// (1) as the reconciledPvtdata can contain multiple versions of pvtData for
   223  	// a given <ns, coll, key>, we need to find duplicate tuples with different
   224  	// versions and use the one with the higher version
   225  	logger.Debug("Constructing unique pvtData by removing duplicate entries")
   226  	uniquePvtData, err := constructUniquePvtData(reconciledPvtdata)
   227  	if len(uniquePvtData) == 0 || err != nil {
   228  		return err
   229  	}
   230  
   231  	// (3) remove the pvt data which does not matches the hashed
   232  	// value stored in the public state
   233  	logger.Debug("Finding and removing stale pvtData")
   234  	if err := uniquePvtData.findAndRemoveStalePvtData(txmgr.db); err != nil {
   235  		return err
   236  	}
   237  
   238  	// (4) create the update batch from the uniquePvtData
   239  	batch := uniquePvtData.transformToUpdateBatch()
   240  
   241  	// (5) update bookkeeping in the purge manager and update toPurgeList
   242  	// (i.e., the list of expiry keys). As the expiring keys would have
   243  	// been constructed during last PrepareForExpiringKeys from commit, we need
   244  	// to update the list. This is because RemoveStaleAndCommitPvtDataOfOldBlocks
   245  	// may have added new data which might be eligible for expiry during the
   246  	// next regular block commit.
   247  	logger.Debug("Updating expiry info in the purge manager")
   248  	if err := txmgr.pvtdataPurgeMgr.UpdateExpiryInfoOfPvtDataOfOldBlocks(batch.PvtUpdates); err != nil {
   249  		return err
   250  	}
   251  
   252  	// (6) commit the pvt data to the stateDB
   253  	logger.Debug("Committing updates to state database")
   254  	return txmgr.db.ApplyPrivacyAwareUpdates(batch, nil)
   255  }
   256  
   257  type uniquePvtDataMap map[privacyenabledstate.HashedCompositeKey]*privacyenabledstate.PvtKVWrite
   258  
   259  func constructUniquePvtData(reconciledPvtdata map[uint64][]*ledger.TxPvtData) (uniquePvtDataMap, error) {
   260  	uniquePvtData := make(uniquePvtDataMap)
   261  	// go over the reconciledPvtdata to find duplicate <ns, coll, key>
   262  	// in the pvtWrites and use the one with the higher version number
   263  	for blkNum, blockPvtData := range reconciledPvtdata {
   264  		if err := uniquePvtData.updateUsingBlockPvtdata(blockPvtData, blkNum); err != nil {
   265  			return nil, err
   266  		}
   267  	} // for each block
   268  	return uniquePvtData, nil
   269  }
   270  
   271  func (uniquePvtData uniquePvtDataMap) updateUsingBlockPvtdata(blockPvtData []*ledger.TxPvtData, blkNum uint64) error {
   272  	for _, txPvtData := range blockPvtData {
   273  		ver := version.NewHeight(blkNum, txPvtData.SeqInBlock)
   274  		if err := uniquePvtData.updateUsingTxPvtData(txPvtData, ver); err != nil {
   275  			return err
   276  		}
   277  	} // for each tx
   278  	return nil
   279  }
   280  
   281  func (uniquePvtData uniquePvtDataMap) updateUsingTxPvtData(txPvtData *ledger.TxPvtData, ver *version.Height) error {
   282  	for _, nsPvtData := range txPvtData.WriteSet.NsPvtRwset {
   283  		if err := uniquePvtData.updateUsingNsPvtData(nsPvtData, ver); err != nil {
   284  			return err
   285  		}
   286  	} // for each ns
   287  	return nil
   288  }
   289  
   290  func (uniquePvtData uniquePvtDataMap) updateUsingNsPvtData(nsPvtData *rwset.NsPvtReadWriteSet, ver *version.Height) error {
   291  	for _, collPvtData := range nsPvtData.CollectionPvtRwset {
   292  		if err := uniquePvtData.updateUsingCollPvtData(collPvtData, nsPvtData.Namespace, ver); err != nil {
   293  			return err
   294  		}
   295  	} // for each coll
   296  	return nil
   297  }
   298  
   299  func (uniquePvtData uniquePvtDataMap) updateUsingCollPvtData(collPvtData *rwset.CollectionPvtReadWriteSet,
   300  	ns string, ver *version.Height) error {
   301  	kvRWSet := &kvrwset.KVRWSet{}
   302  	if err := proto.Unmarshal(collPvtData.Rwset, kvRWSet); err != nil {
   303  		return err
   304  	}
   305  
   306  	hashedCompositeKey := privacyenabledstate.HashedCompositeKey{
   307  		Namespace:      ns,
   308  		CollectionName: collPvtData.CollectionName,
   309  	}
   310  
   311  	for _, kvWrite := range kvRWSet.Writes { // for each kv pair
   312  		hashedCompositeKey.KeyHash = string(util.ComputeStringHash(kvWrite.Key))
   313  		uniquePvtData.updateUsingPvtWrite(kvWrite, hashedCompositeKey, ver)
   314  	} // for each kv pair
   315  
   316  	return nil
   317  }
   318  
   319  func (uniquePvtData uniquePvtDataMap) updateUsingPvtWrite(pvtWrite *kvrwset.KVWrite,
   320  	hashedCompositeKey privacyenabledstate.HashedCompositeKey, ver *version.Height) {
   321  	pvtData, ok := uniquePvtData[hashedCompositeKey]
   322  	if !ok || pvtData.Version.Compare(ver) < 0 {
   323  		uniquePvtData[hashedCompositeKey] =
   324  			&privacyenabledstate.PvtKVWrite{
   325  				Key:      pvtWrite.Key,
   326  				IsDelete: rwsetutil.IsKVWriteDelete(pvtWrite),
   327  				Value:    pvtWrite.Value,
   328  				Version:  ver,
   329  			}
   330  	}
   331  }
   332  
   333  func (uniquePvtData uniquePvtDataMap) findAndRemoveStalePvtData(db *privacyenabledstate.DB) error {
   334  	// (1) load all committed versions
   335  	if err := uniquePvtData.loadCommittedVersionIntoCache(db); err != nil {
   336  		return err
   337  	}
   338  
   339  	// (2) find and remove the stale data
   340  	for hashedCompositeKey, pvtWrite := range uniquePvtData {
   341  		isStale, err := checkIfPvtWriteIsStale(&hashedCompositeKey, pvtWrite, db)
   342  		if err != nil {
   343  			return err
   344  		}
   345  		if isStale {
   346  			delete(uniquePvtData, hashedCompositeKey)
   347  		}
   348  	}
   349  	return nil
   350  }
   351  
   352  func (uniquePvtData uniquePvtDataMap) loadCommittedVersionIntoCache(db *privacyenabledstate.DB) error {
   353  	// Note that ClearCachedVersions would not be called till we validate and commit these
   354  	// pvt data of old blocks. This is because only during the exclusive lock duration, we
   355  	// clear the cache and we have already acquired one before reaching here.
   356  	var hashedCompositeKeys []*privacyenabledstate.HashedCompositeKey
   357  	for hashedCompositeKey := range uniquePvtData {
   358  		// tempKey ensures a different pointer is added to the slice for each key
   359  		tempKey := hashedCompositeKey
   360  		hashedCompositeKeys = append(hashedCompositeKeys, &tempKey)
   361  	}
   362  
   363  	err := db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedCompositeKeys)
   364  	if err != nil {
   365  		return err
   366  	}
   367  	return nil
   368  }
   369  
   370  func checkIfPvtWriteIsStale(hashedKey *privacyenabledstate.HashedCompositeKey,
   371  	kvWrite *privacyenabledstate.PvtKVWrite, db *privacyenabledstate.DB) (bool, error) {
   372  	ns := hashedKey.Namespace
   373  	coll := hashedKey.CollectionName
   374  	keyHashBytes := []byte(hashedKey.KeyHash)
   375  	committedVersion, err := db.GetKeyHashVersion(ns, coll, keyHashBytes)
   376  	if err != nil {
   377  		return true, err
   378  	}
   379  
   380  	// for a deleted hashedKey, we would get a nil committed version. Note that
   381  	// the hashedKey was deleted because either it got expired or was deleted by
   382  	// the chaincode itself.
   383  	if committedVersion == nil {
   384  		return !kvWrite.IsDelete, nil
   385  	}
   386  
   387  	/*
   388  		TODO: FAB-12922
   389  		In the first round, we need to the check version of passed pvtData
   390  		against the version of pvtdata stored in the stateDB. In second round,
   391  		for the remaining pvtData, we need to check for staleness using hashed
   392  		version. In the third round, for the still remaining pvtdata, we need
   393  		to check against hashed values. In each phase we would require to
   394  		perform bulkload of relevant data from the stateDB.
   395  		committedPvtData, err := db.GetPrivateData(ns, coll, kvWrite.Key)
   396  		if err != nil {
   397  			return false, err
   398  		}
   399  		if committedPvtData.Version.Compare(kvWrite.Version) > 0 {
   400  			return false, nil
   401  		}
   402  	*/
   403  	if version.AreSame(committedVersion, kvWrite.Version) {
   404  		return false, nil
   405  	}
   406  
   407  	// due to metadata updates, we could get a version
   408  	// mismatch between pvt kv write and the committed
   409  	// hashedKey. In this case, we must compare the hash
   410  	// of the value. If the hash matches, we should update
   411  	// the version number in the pvt kv write and return
   412  	// true as the validation result
   413  	vv, err := db.GetValueHash(ns, coll, keyHashBytes)
   414  	if err != nil {
   415  		return true, err
   416  	}
   417  	if bytes.Equal(vv.Value, util.ComputeHash(kvWrite.Value)) {
   418  		// if hash of value matches, update version
   419  		// and return true
   420  		kvWrite.Version = vv.Version // side effect
   421  		// (checkIfPvtWriteIsStale should not be updating the state)
   422  		return false, nil
   423  	}
   424  	return true, nil
   425  }
   426  
   427  func (uniquePvtData uniquePvtDataMap) transformToUpdateBatch() *privacyenabledstate.UpdateBatch {
   428  	batch := privacyenabledstate.NewUpdateBatch()
   429  	for hashedCompositeKey, pvtWrite := range uniquePvtData {
   430  		ns := hashedCompositeKey.Namespace
   431  		coll := hashedCompositeKey.CollectionName
   432  		if pvtWrite.IsDelete {
   433  			batch.PvtUpdates.Delete(ns, coll, pvtWrite.Key, pvtWrite.Version)
   434  		} else {
   435  			batch.PvtUpdates.Put(ns, coll, pvtWrite.Key, pvtWrite.Value, pvtWrite.Version)
   436  		}
   437  	}
   438  	return batch
   439  }
   440  
   441  func (txmgr *LockBasedTxMgr) invokeNamespaceListeners() error {
   442  	for _, listener := range txmgr.stateListeners {
   443  		stateUpdatesForListener := extractStateUpdates(txmgr.current.batch, listener.InterestedInNamespaces())
   444  		if len(stateUpdatesForListener) == 0 {
   445  			continue
   446  		}
   447  		txmgr.current.listeners = append(txmgr.current.listeners, listener)
   448  
   449  		committedStateQueryExecuter := &queryutil.QECombiner{
   450  			QueryExecuters: []queryutil.QueryExecuter{txmgr.db},
   451  		}
   452  
   453  		postCommitQueryExecuter := &queryutil.QECombiner{
   454  			QueryExecuters: []queryutil.QueryExecuter{
   455  				&queryutil.UpdateBatchBackedQueryExecuter{
   456  					UpdateBatch:      txmgr.current.batch.PubUpdates.UpdateBatch,
   457  					HashUpdatesBatch: txmgr.current.batch.HashUpdates,
   458  				},
   459  				txmgr.db,
   460  			},
   461  		}
   462  
   463  		trigger := &ledger.StateUpdateTrigger{
   464  			LedgerID:                    txmgr.ledgerid,
   465  			StateUpdates:                stateUpdatesForListener,
   466  			CommittingBlockNum:          txmgr.current.blockNum(),
   467  			CommittedStateQueryExecutor: committedStateQueryExecuter,
   468  			PostCommitQueryExecutor:     postCommitQueryExecuter,
   469  		}
   470  		if err := listener.HandleStateUpdates(trigger); err != nil {
   471  			return err
   472  		}
   473  		logger.Debugf("Invoking listener for state changes:%s", listener.Name())
   474  	}
   475  	return nil
   476  }
   477  
   478  // Shutdown implements method in interface `txmgmt.TxMgr`
   479  func (txmgr *LockBasedTxMgr) Shutdown() {
   480  	// wait for background go routine to finish else the timing issue causes a nil pointer inside goleveldb code
   481  	// see FAB-11974
   482  	txmgr.pvtdataPurgeMgr.WaitForPrepareToFinish()
   483  	txmgr.db.Close()
   484  }
   485  
   486  // Commit implements method in interface `txmgmt.TxMgr`
   487  func (txmgr *LockBasedTxMgr) Commit() error {
   488  	// we need to acquire a lock on oldBlockCommit. The following are the two reasons:
   489  	// (1) the DeleteExpiredAndUpdateBookkeeping() would perform incorrect operation if
   490  	//        toPurgeList is updated by RemoveStaleAndCommitPvtDataOfOldBlocks().
   491  	// (2) RemoveStaleAndCommitPvtDataOfOldBlocks computes the update
   492  	//     batch based on the current state and if we allow regular block commits at the
   493  	//     same time, the former may overwrite the newer versions of the data and we may
   494  	//     end up with an incorrect update batch.
   495  	txmgr.oldBlockCommit.Lock()
   496  	defer txmgr.oldBlockCommit.Unlock()
   497  	logger.Debug("lock acquired on oldBlockCommit for committing regular updates to state database")
   498  
   499  	// When using the purge manager for the first block commit after peer start, the asynchronous function
   500  	// 'PrepareForExpiringKeys' is invoked in-line. However, for the subsequent blocks commits, this function is invoked
   501  	// in advance for the next block
   502  	if !txmgr.pvtdataPurgeMgr.usedOnce {
   503  		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum())
   504  		txmgr.pvtdataPurgeMgr.usedOnce = true
   505  	}
   506  	defer func() {
   507  		txmgr.pvtdataPurgeMgr.PrepareForExpiringKeys(txmgr.current.blockNum() + 1)
   508  		logger.Debugf("launched the background routine for preparing keys to purge with the next block")
   509  		txmgr.reset()
   510  	}()
   511  
   512  	logger.Debugf("Committing updates to state database")
   513  	if txmgr.current == nil {
   514  		panic("validateAndPrepare() method should have been called before calling commit()")
   515  	}
   516  
   517  	if err := txmgr.pvtdataPurgeMgr.UpdateExpiryInfo(
   518  		txmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {
   519  		return err
   520  	}
   521  
   522  	if err := txmgr.pvtdataPurgeMgr.AddExpiredEntriesToUpdateBatch(
   523  		txmgr.current.batch.PvtUpdates, txmgr.current.batch.HashUpdates); err != nil {
   524  		return err
   525  	}
   526  
   527  	commitHeight := version.NewHeight(txmgr.current.blockNum(), txmgr.current.maxTxNumber())
   528  	txmgr.commitRWLock.Lock()
   529  	logger.Debugf("Write lock acquired for committing updates to state database")
   530  	if err := txmgr.db.ApplyPrivacyAwareUpdates(txmgr.current.batch, commitHeight); err != nil {
   531  		txmgr.commitRWLock.Unlock()
   532  		return err
   533  	}
   534  	txmgr.commitRWLock.Unlock()
   535  	// only while holding a lock on oldBlockCommit, we should clear the cache as the
   536  	// cache is being used by the old pvtData committer to load the version of
   537  	// hashedKeys. Also, note that the PrepareForExpiringKeys uses the cache.
   538  	txmgr.clearCache()
   539  	logger.Debugf("Updates committed to state database and the write lock is released")
   540  
   541  	// purge manager should be called (in this call the purge mgr removes the expiry entries from schedules) after committing to statedb
   542  	if err := txmgr.pvtdataPurgeMgr.BlockCommitDone(); err != nil {
   543  		return err
   544  	}
   545  	// In the case of error state listeners will not receive this call - instead a peer panic is caused by the ledger upon receiving
   546  	// an error from this function
   547  	txmgr.updateStateListeners()
   548  	return nil
   549  }
   550  
   551  // Rollback implements method in interface `txmgmt.TxMgr`
   552  func (txmgr *LockBasedTxMgr) Rollback() {
   553  	txmgr.reset()
   554  }
   555  
   556  // clearCache empty the cache maintained by the statedb implementation
   557  func (txmgr *LockBasedTxMgr) clearCache() {
   558  	if txmgr.db.IsBulkOptimizable() {
   559  		txmgr.db.ClearCachedVersions()
   560  	}
   561  }
   562  
   563  // ShouldRecover implements method in interface kvledger.Recoverer
   564  func (txmgr *LockBasedTxMgr) ShouldRecover(lastAvailableBlock uint64) (bool, uint64, error) {
   565  	savepoint, err := txmgr.GetLastSavepoint()
   566  	if err != nil {
   567  		return false, 0, err
   568  	}
   569  	if savepoint == nil {
   570  		return true, 0, nil
   571  	}
   572  	return savepoint.BlockNum != lastAvailableBlock, savepoint.BlockNum + 1, nil
   573  }
   574  
   575  // Name returns the name of the database that manages all active states.
   576  func (txmgr *LockBasedTxMgr) Name() string {
   577  	return "state"
   578  }
   579  
   580  // CommitLostBlock implements method in interface kvledger.Recoverer
   581  func (txmgr *LockBasedTxMgr) CommitLostBlock(blockAndPvtdata *ledger.BlockAndPvtData) error {
   582  	block := blockAndPvtdata.Block
   583  	logger.Debugf("Constructing updateSet for the block %d", block.Header.Number)
   584  	if _, _, err := txmgr.ValidateAndPrepare(blockAndPvtdata, false); err != nil {
   585  		return err
   586  	}
   587  
   588  	// log every 1000th block at Info level so that statedb rebuild progress can be tracked in production envs.
   589  	if block.Header.Number%1000 == 0 {
   590  		logger.Infof("Recommitting block [%d] to state database", block.Header.Number)
   591  	} else {
   592  		logger.Debugf("Recommitting block [%d] to state database", block.Header.Number)
   593  	}
   594  
   595  	return txmgr.Commit()
   596  }
   597  
   598  // ExportPubStateAndPvtStateHashes simply delegates the call to the statedb for exporting the data for a snapshot.
   599  // It is assumed that the consumer would invoke this function when the commits are paused
   600  func (txmgr *LockBasedTxMgr) ExportPubStateAndPvtStateHashes(dir string, newHashFunc snapshot.NewHashFunc) (map[string][]byte, error) {
   601  	// no need to acuqire any lock in this function, as the commits would be paused
   602  	return txmgr.db.ExportPubStateAndPvtStateHashes(dir, newHashFunc)
   603  }
   604  
   605  func extractStateUpdates(batch *privacyenabledstate.UpdateBatch, namespaces []string) ledger.StateUpdates {
   606  	su := make(ledger.StateUpdates)
   607  	for _, namespace := range namespaces {
   608  		nsu := &ledger.KVStateUpdates{}
   609  		// include public updates
   610  		for key, versionedValue := range batch.PubUpdates.GetUpdates(namespace) {
   611  			nsu.PublicUpdates = append(nsu.PublicUpdates,
   612  				&kvrwset.KVWrite{
   613  					Key:      key,
   614  					IsDelete: versionedValue.Value == nil,
   615  					Value:    versionedValue.Value,
   616  				},
   617  			)
   618  		}
   619  		// include colls hashes updates
   620  		if hashUpdates, ok := batch.HashUpdates.UpdateMap[namespace]; ok {
   621  			nsu.CollHashUpdates = make(map[string][]*kvrwset.KVWriteHash)
   622  			for _, collName := range hashUpdates.GetCollectionNames() {
   623  				for key, vv := range hashUpdates.GetUpdates(collName) {
   624  					nsu.CollHashUpdates[collName] = append(
   625  						nsu.CollHashUpdates[collName],
   626  						&kvrwset.KVWriteHash{
   627  							KeyHash:   []byte(key),
   628  							IsDelete:  vv.Value == nil,
   629  							ValueHash: vv.Value,
   630  						},
   631  					)
   632  				}
   633  			}
   634  		}
   635  		if len(nsu.PublicUpdates)+len(nsu.CollHashUpdates) > 0 {
   636  			su[namespace] = nsu
   637  		}
   638  	}
   639  	return su
   640  }
   641  
   642  func (txmgr *LockBasedTxMgr) updateStateListeners() {
   643  	for _, l := range txmgr.current.listeners {
   644  		l.StateCommitDone(txmgr.ledgerid)
   645  	}
   646  }
   647  
   648  func (txmgr *LockBasedTxMgr) reset() {
   649  	txmgr.current = nil
   650  }