github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/core/transientstore/store.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package transientstore
     8  
     9  import (
    10  	"path/filepath"
    11  
    12  	"github.com/golang/protobuf/proto"
    13  	"github.com/hechain20/hechain/common/flogging"
    14  	"github.com/hechain20/hechain/common/ledger/util/leveldbhelper"
    15  	"github.com/hechain20/hechain/common/util"
    16  	"github.com/hechain20/hechain/core/ledger"
    17  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    18  	"github.com/hyperledger/fabric-protos-go/transientstore"
    19  	"github.com/pkg/errors"
    20  	"github.com/syndtr/goleveldb/leveldb/iterator"
    21  )
    22  
    23  var logger = flogging.MustGetLogger("transientstore")
    24  
    25  var (
    26  	emptyValue = []byte{}
    27  	nilByte    = byte('\x00')
    28  	// ErrStoreEmpty is used to indicate that there are no entries in transient store
    29  	ErrStoreEmpty = errors.New("Transient store is empty")
    30  	// transient system namespace is the name of a db used for storage bookkeeping metadata.
    31  	systemNamespace          = ""
    32  	underDeletionKey         = []byte("UNDER_DELETION")
    33  	transientStorageLockName = "transientStoreFileLock"
    34  )
    35  
    36  //////////////////////////////////////////////
    37  // Interfaces and data types
    38  /////////////////////////////////////////////
    39  
    40  // StoreProvider provides an instance of a TransientStore
    41  type StoreProvider interface {
    42  	OpenStore(ledgerID string) (*Store, error)
    43  	Close()
    44  }
    45  
    46  // RWSetScanner provides an iterator for EndorserPvtSimulationResults
    47  type RWSetScanner interface {
    48  	// Next returns the next EndorserPvtSimulationResults from the RWSetScanner.
    49  	// It may return nil, nil when it has no further data, and also may return an error
    50  	// on failure
    51  	Next() (*EndorserPvtSimulationResults, error)
    52  	// Close frees the resources associated with this RWSetScanner
    53  	Close()
    54  }
    55  
    56  // EndorserPvtSimulationResults captures the details of the simulation results specific to an endorser
    57  type EndorserPvtSimulationResults struct {
    58  	ReceivedAtBlockHeight          uint64
    59  	PvtSimulationResultsWithConfig *transientstore.TxPvtReadWriteSetWithConfigInfo
    60  }
    61  
    62  //////////////////////////////////////////////
    63  // Implementation
    64  /////////////////////////////////////////////
    65  
    66  // storeProvider encapsulates a leveldb provider which is used to store
    67  // private write sets of simulated transactions, and implements TransientStoreProvider
    68  // interface.
    69  type storeProvider struct {
    70  	dbProvider *leveldbhelper.Provider
    71  	fileLock   *leveldbhelper.FileLock
    72  }
    73  
    74  // store holds an instance of a levelDB.
    75  type Store struct {
    76  	db       *leveldbhelper.DBHandle
    77  	ledgerID string
    78  }
    79  
    80  // RwsetScanner helps iterating over results
    81  type RwsetScanner struct {
    82  	txid   string
    83  	dbItr  iterator.Iterator
    84  	filter ledger.PvtNsCollFilter
    85  }
    86  
    87  // NewStoreProvider instantiates TransientStoreProvider
    88  func NewStoreProvider(path string) (StoreProvider, error) {
    89  	// Ensure the routine is invoked while the peer is down.
    90  	lockPath := filepath.Join(filepath.Dir(path), transientStorageLockName)
    91  	lock := leveldbhelper.NewFileLock(lockPath)
    92  	if err := lock.Lock(); err != nil {
    93  		return nil, errors.WithMessage(err, "as another peer node command is executing,"+
    94  			" wait for that command to complete its execution or terminate it before retrying")
    95  	}
    96  
    97  	provider, err := newStoreProvider(path, lock)
    98  	if err != nil {
    99  		lock.Unlock()
   100  		return nil, errors.WithMessagef(err, "could not construct storage provider in folder [%s]", path)
   101  	}
   102  
   103  	return provider, nil
   104  }
   105  
   106  // Private method used to unwind a dependency between the package level Drop and NewStoreProvider routines.
   107  // This routine must be invoked while holding the newStoreProvider file lock.
   108  func newStoreProvider(providerPath string, fileLock *leveldbhelper.FileLock) (*storeProvider, error) {
   109  	logger.Debugw("opening provider", "providerPath", providerPath)
   110  
   111  	if !fileLock.IsLocked() {
   112  		panic("newStoreProvider invoked without holding 'fileLock'")
   113  	}
   114  
   115  	dbProvider, err := leveldbhelper.NewProvider(&leveldbhelper.Conf{DBPath: providerPath})
   116  	if err != nil {
   117  		return nil, errors.WithMessage(err, "could not open dbprovider")
   118  	}
   119  
   120  	provider := &storeProvider{dbProvider: dbProvider, fileLock: fileLock}
   121  
   122  	// purge any databases marked for deletion.  This may occur at the next peer init after a
   123  	// transient storage deletion failed due to a crash or system error.
   124  	if err = provider.processPendingStorageDeletions(); err != nil {
   125  		return nil, errors.WithMessagef(err, "processing pending storage deletions in folder [%s]", providerPath)
   126  	}
   127  
   128  	return provider, nil
   129  }
   130  
   131  // OpenStore returns a handle to a ledgerId in Store
   132  func (provider *storeProvider) OpenStore(ledgerID string) (*Store, error) {
   133  	dbHandle := provider.dbProvider.GetDBHandle(ledgerID)
   134  	return &Store{db: dbHandle, ledgerID: ledgerID}, nil
   135  }
   136  
   137  // Close closes the TransientStoreProvider
   138  func (provider *storeProvider) Close() {
   139  	if provider.dbProvider != nil {
   140  		provider.dbProvider.Close()
   141  	}
   142  	if provider.fileLock != nil {
   143  		provider.fileLock.Unlock()
   144  	}
   145  }
   146  
   147  // delete the transient storage for a given ledger.
   148  func (provider *storeProvider) deleteStore(ledgerID string) error {
   149  	return provider.dbProvider.Drop(ledgerID)
   150  }
   151  
   152  func (provider *storeProvider) markStorageForDelete(ledgerID string) error {
   153  	marked, err := provider.getStorageMarkedForDeletion()
   154  	if err != nil {
   155  		return errors.WithMessage(err, "while listing delete marked storage")
   156  	}
   157  
   158  	// don't update if the storage is already marked for deletion.
   159  	for _, l := range marked.List {
   160  		if ledgerID == l {
   161  			logger.Infow("Transient storage was already marked for delete", "ledgerID", ledgerID)
   162  			return nil
   163  		}
   164  	}
   165  
   166  	marked.List = append(marked.List, ledgerID)
   167  
   168  	err = provider.markStorageListForDelete(marked)
   169  	if err != nil {
   170  		return errors.WithMessagef(err, "while updating storage list %v for deletion", marked)
   171  	}
   172  
   173  	return nil
   174  }
   175  
   176  // Write the UNDER_DELETE ledger set as a proto.Message array in the transient system catalog.
   177  func (provider *storeProvider) markStorageListForDelete(deleteList *PendingDeleteStorageList) error {
   178  	b, err := proto.Marshal(deleteList)
   179  	if err != nil {
   180  		return errors.WithMessage(err, "error while marshaling PendingDeleteStorageList")
   181  	}
   182  
   183  	db := provider.dbProvider.GetDBHandle(systemNamespace)
   184  	defer db.Close()
   185  
   186  	if err = db.Put(underDeletionKey, b, true); err != nil {
   187  		return errors.WithMessage(err, "writing delete list to system storage")
   188  	}
   189  
   190  	return nil
   191  }
   192  
   193  // Find the set of ledgers tagged as UNDER_DELETE.
   194  func (provider *storeProvider) getStorageMarkedForDeletion() (*PendingDeleteStorageList, error) {
   195  	deleteList := &PendingDeleteStorageList{}
   196  
   197  	db := provider.dbProvider.GetDBHandle(systemNamespace)
   198  	defer db.Close()
   199  
   200  	val, err := db.Get(underDeletionKey)
   201  	if err != nil {
   202  		return nil, errors.WithMessage(err, "retrieving storage marked for deletion")
   203  	}
   204  
   205  	// no storage previously marked as delete
   206  	if val == nil {
   207  		return deleteList, nil
   208  	}
   209  
   210  	if err = proto.Unmarshal(val, deleteList); err != nil {
   211  		return nil, errors.WithMessagef(err, "unmarshalling proto delete list: %s", string(val))
   212  	}
   213  
   214  	return deleteList, nil
   215  }
   216  
   217  // Remove a ledger ID from the list of transient storages currently marked for delete.
   218  func (provider *storeProvider) clearStorageDeletionStatus(ledgerID string) error {
   219  	dl, err := provider.getStorageMarkedForDeletion()
   220  	if err != nil {
   221  		return errors.WithMessagef(err, "clearing storage flag for ledger [%s]", ledgerID)
   222  	}
   223  
   224  	newDeletes := &PendingDeleteStorageList{}
   225  
   226  	// retain all entries other than the one to be cleared.
   227  	for _, l := range dl.List {
   228  		if ledgerID == l {
   229  			continue
   230  		}
   231  		newDeletes.List = append(newDeletes.List, l)
   232  	}
   233  
   234  	// Nothing to do: ledgerID was not in the current delete list
   235  	if len(dl.List) == len(newDeletes.List) {
   236  		return nil
   237  	}
   238  
   239  	if err = provider.markStorageListForDelete(newDeletes); err != nil {
   240  		return errors.WithMessagef(err, "subtracting [%s] from delete tag list", ledgerID)
   241  	}
   242  
   243  	return nil
   244  }
   245  
   246  // Delete any transient storages that are marked as UNDER_DELETION.  This routine may be called
   247  // at provider construction to purge any partially deleted transient stores.
   248  func (provider *storeProvider) processPendingStorageDeletions() error {
   249  	dl, err := provider.getStorageMarkedForDeletion()
   250  	if err != nil {
   251  		return errors.WithMessage(err, "processing pending deletion list")
   252  	}
   253  
   254  	for _, l := range dl.List {
   255  		err = provider.deleteStore(l)
   256  		if err != nil {
   257  			return errors.WithMessagef(err, "processing delete for storage [%s]", l)
   258  		}
   259  
   260  		err = provider.clearStorageDeletionStatus(l)
   261  		if err != nil {
   262  			return errors.WithMessagef(err, "clearing deletion status for storage [%s]", l)
   263  		}
   264  	}
   265  
   266  	return nil
   267  }
   268  
   269  // Drop removes a transient storage associated with an input channel/ledger.
   270  // This function must be invoked while the peer is shut down.  To recover from partial deletion
   271  // due to a crash, the storage will be marked with an UNDER_DELETION status in the a system
   272  // namespace db.  At the next peer startup, transient storage marked with UNDER_DELETION will
   273  // be scrubbed from the system.
   274  func Drop(providerPath, ledgerID string) error {
   275  	logger.Infow("Dropping ledger from transient storage", "ledgerID", ledgerID, "providerPath", providerPath)
   276  
   277  	// Ensure the routine is invoked while the peer is down.
   278  	lockPath := filepath.Join(filepath.Dir(providerPath), transientStorageLockName)
   279  	lock := leveldbhelper.NewFileLock(lockPath)
   280  	if err := lock.Lock(); err != nil {
   281  		return errors.New("as another peer node command is executing," +
   282  			" wait for that command to complete its execution or terminate it before retrying")
   283  	}
   284  	defer lock.Unlock()
   285  
   286  	// Set up a StoreProvider
   287  	provider, err := newStoreProvider(providerPath, lock)
   288  	if err != nil {
   289  		return errors.WithMessagef(err, "constructing provider from path [%s]", providerPath)
   290  	}
   291  	defer provider.Close()
   292  
   293  	// Mark the storage as UNDER_DELETION so that it can be purged if an error occurred during the drop
   294  	err = provider.markStorageForDelete(ledgerID)
   295  	if err != nil {
   296  		return errors.WithMessagef(err, "marking storage [%s] for deletion", ledgerID)
   297  	}
   298  
   299  	// actually delete the storage.
   300  	if err = provider.deleteStore(ledgerID); err != nil {
   301  		return errors.WithMessagef(err, "dropping ledger [%s] from transient storage", ledgerID)
   302  	}
   303  
   304  	// reset the deletion flag
   305  	if err = provider.clearStorageDeletionStatus(ledgerID); err != nil {
   306  		return errors.WithMessagef(err, "clearing deletion state for transient storage [%s]", ledgerID)
   307  	}
   308  
   309  	logger.Infow("Successfully dropped ledger from transient storage", "ledgerID", ledgerID)
   310  
   311  	return nil
   312  }
   313  
   314  // Persist stores the private write set of a transaction along with the collection config
   315  // in the transient store based on txid and the block height the private data was received at
   316  func (s *Store) Persist(txid string, blockHeight uint64,
   317  	privateSimulationResultsWithConfig *transientstore.TxPvtReadWriteSetWithConfigInfo) error {
   318  	logger.Debugf("Persisting private data to transient store for txid [%s] at block height [%d]", txid, blockHeight)
   319  
   320  	dbBatch := s.db.NewUpdateBatch()
   321  
   322  	// Create compositeKey with appropriate prefix, txid, uuid and blockHeight
   323  	// Due to the fact that the txid may have multiple private write sets persisted from different
   324  	// endorsers (via Gossip), we postfix an uuid with the txid to avoid collision.
   325  	uuid := util.GenerateUUID()
   326  	compositeKeyPvtRWSet := createCompositeKeyForPvtRWSet(txid, uuid, blockHeight)
   327  	privateSimulationResultsWithConfigBytes, err := proto.Marshal(privateSimulationResultsWithConfig)
   328  	if err != nil {
   329  		return err
   330  	}
   331  
   332  	// Note that some rwset.TxPvtReadWriteSet may exist in the transient store immediately after
   333  	// upgrading the peer to v1.2. In order to differentiate between new proto and old proto while
   334  	// retrieving, a nil byte is prepended to the new proto, i.e., privateSimulationResultsWithConfigBytes,
   335  	// as a marshaled message can never start with a nil byte. In v1.3, we can avoid prepending the
   336  	// nil byte.
   337  	value := append([]byte{nilByte}, privateSimulationResultsWithConfigBytes...)
   338  	dbBatch.Put(compositeKeyPvtRWSet, value)
   339  
   340  	// Create two index: (i) by txid, and (ii) by height
   341  
   342  	// Create compositeKey for purge index by height with appropriate prefix, blockHeight,
   343  	// txid, uuid and store the compositeKey (purge index) with a nil byte as value. Note that
   344  	// the purge index is used to remove orphan entries in the transient store (which are not removed
   345  	// by PurgeTxids()) using BTL policy by PurgeBelowHeight(). Note that orphan entries are due to transaction
   346  	// that gets endorsed but not submitted by the client for commit)
   347  	compositeKeyPurgeIndexByHeight := createCompositeKeyForPurgeIndexByHeight(blockHeight, txid, uuid)
   348  	dbBatch.Put(compositeKeyPurgeIndexByHeight, emptyValue)
   349  
   350  	// Create compositeKey for purge index by txid with appropriate prefix, txid, uuid,
   351  	// blockHeight and store the compositeKey (purge index) with a nil byte as value.
   352  	// Though compositeKeyPvtRWSet itself can be used to purge private write set by txid,
   353  	// we create a separate composite key with a nil byte as value. The reason is that
   354  	// if we use compositeKeyPvtRWSet, we unnecessarily read (potentially large) private write
   355  	// set associated with the key from db. Note that this purge index is used to remove non-orphan
   356  	// entries in the transient store and is used by PurgeTxids()
   357  	// Note: We can create compositeKeyPurgeIndexByTxid by just replacing the prefix of compositeKeyPvtRWSet
   358  	// with purgeIndexByTxidPrefix. For code readability and to be expressive, we use a
   359  	// createCompositeKeyForPurgeIndexByTxid() instead.
   360  	compositeKeyPurgeIndexByTxid := createCompositeKeyForPurgeIndexByTxid(txid, uuid, blockHeight)
   361  	dbBatch.Put(compositeKeyPurgeIndexByTxid, emptyValue)
   362  
   363  	return s.db.WriteBatch(dbBatch, true)
   364  }
   365  
   366  // GetTxPvtRWSetByTxid returns an iterator due to the fact that the txid may have multiple private
   367  // write sets persisted from different endorsers.
   368  func (s *Store) GetTxPvtRWSetByTxid(txid string, filter ledger.PvtNsCollFilter) (RWSetScanner, error) {
   369  	logger.Debugf("Getting private data from transient store for transaction %s", txid)
   370  
   371  	// Construct startKey and endKey to do an range query
   372  	startKey := createTxidRangeStartKey(txid)
   373  	endKey := createTxidRangeEndKey(txid)
   374  
   375  	iter, err := s.db.GetIterator(startKey, endKey)
   376  	if err != nil {
   377  		return nil, err
   378  	}
   379  	return &RwsetScanner{txid, iter, filter}, nil
   380  }
   381  
   382  // PurgeByTxids removes private write sets of a given set of transactions from the
   383  // transient store. PurgeByTxids() is expected to be called by coordinator after
   384  // committing a block to ledger.
   385  func (s *Store) PurgeByTxids(txids []string) error {
   386  	logger.Debug("Purging private data from transient store for committed txids")
   387  
   388  	dbBatch := s.db.NewUpdateBatch()
   389  
   390  	for _, txid := range txids {
   391  		// Construct startKey and endKey to do an range query
   392  		startKey := createPurgeIndexByTxidRangeStartKey(txid)
   393  		endKey := createPurgeIndexByTxidRangeEndKey(txid)
   394  
   395  		iter, err := s.db.GetIterator(startKey, endKey)
   396  		if err != nil {
   397  			return err
   398  		}
   399  
   400  		// Get all txid and uuid from above result and remove it from transient store (both
   401  		// write set and the corresponding indexes.
   402  		for iter.Next() {
   403  			// For each entry, remove the private read-write set and corresponding indexes
   404  
   405  			// Remove private write set
   406  			compositeKeyPurgeIndexByTxid := iter.Key()
   407  			// Note: We can create compositeKeyPvtRWSet by just replacing the prefix of compositeKeyPurgeIndexByTxid
   408  			// with  prwsetPrefix. For code readability and to be expressive, we split and create again.
   409  			uuid, blockHeight, err := splitCompositeKeyOfPurgeIndexByTxid(compositeKeyPurgeIndexByTxid)
   410  			if err != nil {
   411  				return err
   412  			}
   413  			compositeKeyPvtRWSet := createCompositeKeyForPvtRWSet(txid, uuid, blockHeight)
   414  			dbBatch.Delete(compositeKeyPvtRWSet)
   415  
   416  			// Remove purge index -- purgeIndexByHeight
   417  			compositeKeyPurgeIndexByHeight := createCompositeKeyForPurgeIndexByHeight(blockHeight, txid, uuid)
   418  			dbBatch.Delete(compositeKeyPurgeIndexByHeight)
   419  
   420  			// Remove purge index -- purgeIndexByTxid
   421  			dbBatch.Delete(compositeKeyPurgeIndexByTxid)
   422  		}
   423  		iter.Release()
   424  	}
   425  	// If peer fails before/while writing the batch to golevelDB, these entries will be
   426  	// removed as per BTL policy later by PurgeBelowHeight()
   427  	return s.db.WriteBatch(dbBatch, true)
   428  }
   429  
   430  // PurgeBelowHeight removes private write sets at block height lesser than
   431  // a given maxBlockNumToRetain. In other words, Purge only retains private write sets
   432  // that were persisted at block height of maxBlockNumToRetain or higher. Though the private
   433  // write sets stored in transient store is removed by coordinator using PurgebyTxids()
   434  // after successful block commit, PurgeBelowHeight() is still required to remove orphan entries (as
   435  // transaction that gets endorsed may not be submitted by the client for commit)
   436  func (s *Store) PurgeBelowHeight(maxBlockNumToRetain uint64) error {
   437  	logger.Debugf("Purging orphaned private data from transient store received prior to block [%d]", maxBlockNumToRetain)
   438  
   439  	// Do a range query with 0 as startKey and maxBlockNumToRetain-1 as endKey
   440  	startKey := createPurgeIndexByHeightRangeStartKey(0)
   441  	endKey := createPurgeIndexByHeightRangeEndKey(maxBlockNumToRetain - 1)
   442  	iter, err := s.db.GetIterator(startKey, endKey)
   443  	if err != nil {
   444  		return err
   445  	}
   446  
   447  	dbBatch := s.db.NewUpdateBatch()
   448  
   449  	// Get all txid and uuid from above result and remove it from transient store (both
   450  	// write set and the corresponding index.
   451  	for iter.Next() {
   452  		// For each entry, remove the private read-write set and corresponding indexes
   453  
   454  		// Remove private write set
   455  		compositeKeyPurgeIndexByHeight := iter.Key()
   456  		txid, uuid, blockHeight, err := splitCompositeKeyOfPurgeIndexByHeight(compositeKeyPurgeIndexByHeight)
   457  		if err != nil {
   458  			return err
   459  		}
   460  		logger.Debugf("Purging from transient store private data simulated at block [%d]: txid [%s] uuid [%s]", blockHeight, txid, uuid)
   461  
   462  		compositeKeyPvtRWSet := createCompositeKeyForPvtRWSet(txid, uuid, blockHeight)
   463  		dbBatch.Delete(compositeKeyPvtRWSet)
   464  
   465  		// Remove purge index -- purgeIndexByTxid
   466  		compositeKeyPurgeIndexByTxid := createCompositeKeyForPurgeIndexByTxid(txid, uuid, blockHeight)
   467  		dbBatch.Delete(compositeKeyPurgeIndexByTxid)
   468  
   469  		// Remove purge index -- purgeIndexByHeight
   470  		dbBatch.Delete(compositeKeyPurgeIndexByHeight)
   471  	}
   472  	iter.Release()
   473  
   474  	return s.db.WriteBatch(dbBatch, true)
   475  }
   476  
   477  // GetMinTransientBlkHt returns the lowest block height remaining in transient store
   478  func (s *Store) GetMinTransientBlkHt() (uint64, error) {
   479  	// Current approach performs a range query on purgeIndex with startKey
   480  	// as 0 (i.e., blockHeight) and returns the first key which denotes
   481  	// the lowest block height remaining in transient store. An alternative approach
   482  	// is to explicitly store the minBlockHeight in the transientStore.
   483  	startKey := createPurgeIndexByHeightRangeStartKey(0)
   484  	iter, err := s.db.GetIterator(startKey, nil)
   485  	if err != nil {
   486  		return 0, err
   487  	}
   488  	defer iter.Release()
   489  	// Fetch the minimum transient block height
   490  	if iter.Next() {
   491  		dbKey := iter.Key()
   492  		_, _, blockHeight, err := splitCompositeKeyOfPurgeIndexByHeight(dbKey)
   493  		return blockHeight, err
   494  	}
   495  	// Returning an error may not be the right thing to do here. May be
   496  	// return a bool. -1 is not possible due to unsigned int as first
   497  	// return value
   498  	return 0, ErrStoreEmpty
   499  }
   500  
   501  func (s *Store) Shutdown() {
   502  	// do nothing because shared db is used
   503  }
   504  
   505  // Next moves the iterator to the next key/value pair.
   506  // It returns <nil, nil> when the iterator is exhausted.
   507  func (scanner *RwsetScanner) Next() (*EndorserPvtSimulationResults, error) {
   508  	if !scanner.dbItr.Next() {
   509  		return nil, nil
   510  	}
   511  	dbKey := scanner.dbItr.Key()
   512  	dbVal := scanner.dbItr.Value()
   513  	_, blockHeight, err := splitCompositeKeyOfPvtRWSet(dbKey)
   514  	if err != nil {
   515  		return nil, err
   516  	}
   517  
   518  	txPvtRWSet := &rwset.TxPvtReadWriteSet{}
   519  	txPvtRWSetWithConfig := &transientstore.TxPvtReadWriteSetWithConfigInfo{}
   520  
   521  	var filteredTxPvtRWSet *rwset.TxPvtReadWriteSet
   522  	if dbVal[0] == nilByte {
   523  		// new proto, i.e., TxPvtReadWriteSetWithConfigInfo
   524  		if err := proto.Unmarshal(dbVal[1:], txPvtRWSetWithConfig); err != nil {
   525  			return nil, err
   526  		}
   527  
   528  		// trim the tx rwset based on the current collection filter,
   529  		// nil will be returned to filteredTxPvtRWSet if the transient store txid entry does not contain the data for the collection
   530  		filteredTxPvtRWSet = trimPvtWSet(txPvtRWSetWithConfig.GetPvtRwset(), scanner.filter)
   531  		configs, err := trimPvtCollectionConfigs(txPvtRWSetWithConfig.CollectionConfigs, scanner.filter)
   532  		if err != nil {
   533  			return nil, err
   534  		}
   535  		txPvtRWSetWithConfig.CollectionConfigs = configs
   536  	} else {
   537  		// old proto, i.e., TxPvtReadWriteSet
   538  		if err := proto.Unmarshal(dbVal, txPvtRWSet); err != nil {
   539  			return nil, err
   540  		}
   541  		filteredTxPvtRWSet = trimPvtWSet(txPvtRWSet, scanner.filter)
   542  	}
   543  
   544  	txPvtRWSetWithConfig.PvtRwset = filteredTxPvtRWSet
   545  
   546  	return &EndorserPvtSimulationResults{
   547  		ReceivedAtBlockHeight:          blockHeight,
   548  		PvtSimulationResultsWithConfig: txPvtRWSetWithConfig,
   549  	}, nil
   550  }
   551  
   552  // Close releases resource held by the iterator
   553  func (scanner *RwsetScanner) Close() {
   554  	scanner.dbItr.Release()
   555  }