github.com/defanghe/fabric@v2.1.1+incompatible/core/ledger/pvtdatastorage/store_impl.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package pvtdatastorage
     8  
     9  import (
    10  	"fmt"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/golang/protobuf/proto"
    16  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    17  	"github.com/hyperledger/fabric/common/flogging"
    18  	"github.com/hyperledger/fabric/common/ledger/util/leveldbhelper"
    19  	"github.com/hyperledger/fabric/core/ledger"
    20  	"github.com/hyperledger/fabric/core/ledger/pvtdatapolicy"
    21  	"github.com/willf/bitset"
    22  )
    23  
    24  var logger = flogging.MustGetLogger("pvtdatastorage")
    25  
    26  type provider struct {
    27  	dbProvider *leveldbhelper.Provider
    28  	pvtData    *PrivateDataConfig
    29  }
    30  
    31  type store struct {
    32  	db              *leveldbhelper.DBHandle
    33  	ledgerid        string
    34  	btlPolicy       pvtdatapolicy.BTLPolicy
    35  	batchesInterval int
    36  	maxBatchSize    int
    37  	purgeInterval   uint64
    38  
    39  	isEmpty            bool
    40  	lastCommittedBlock uint64
    41  	purgerLock         sync.Mutex
    42  	collElgProcSync    *collElgProcSync
    43  	// After committing the pvtdata of old blocks,
    44  	// the `isLastUpdatedOldBlocksSet` is set to true.
    45  	// Once the stateDB is updated with these pvtdata,
    46  	// the `isLastUpdatedOldBlocksSet` is set to false.
    47  	// isLastUpdatedOldBlocksSet is mainly used during the
    48  	// recovery process. During the peer startup, if the
    49  	// isLastUpdatedOldBlocksSet is set to true, the pvtdata
    50  	// in the stateDB needs to be updated before finishing the
    51  	// recovery operation.
    52  	isLastUpdatedOldBlocksSet bool
    53  }
    54  
    55  type blkTranNumKey []byte
    56  
    57  type dataEntry struct {
    58  	key   *dataKey
    59  	value *rwset.CollectionPvtReadWriteSet
    60  }
    61  
    62  type expiryEntry struct {
    63  	key   *expiryKey
    64  	value *ExpiryData
    65  }
    66  
    67  type expiryKey struct {
    68  	expiringBlk   uint64
    69  	committingBlk uint64
    70  }
    71  
    72  type nsCollBlk struct {
    73  	ns, coll string
    74  	blkNum   uint64
    75  }
    76  
    77  type dataKey struct {
    78  	nsCollBlk
    79  	txNum uint64
    80  }
    81  
    82  type missingDataKey struct {
    83  	nsCollBlk
    84  	isEligible bool
    85  }
    86  
    87  type storeEntries struct {
    88  	dataEntries        []*dataEntry
    89  	expiryEntries      []*expiryEntry
    90  	missingDataEntries map[missingDataKey]*bitset.BitSet
    91  }
    92  
    93  // lastUpdatedOldBlocksList keeps the list of last updated blocks
    94  // and is stored as the value of lastUpdatedOldBlocksKey (defined in kv_encoding.go)
    95  type lastUpdatedOldBlocksList []uint64
    96  
    97  type entriesForPvtDataOfOldBlocks struct {
    98  	// for each <ns, coll, blkNum, txNum>, store the dataEntry, i.e., pvtData
    99  	dataEntries map[dataKey]*rwset.CollectionPvtReadWriteSet
   100  	// store the retrieved (& updated) expiryData in expiryEntries
   101  	expiryEntries map[expiryKey]*ExpiryData
   102  	// for each <ns, coll, blkNum>, store the retrieved (& updated) bitmap in the missingDataEntries
   103  	missingDataEntries map[nsCollBlk]*bitset.BitSet
   104  }
   105  
   106  //////// Provider functions  /////////////
   107  //////////////////////////////////////////
   108  
   109  // NewProvider instantiates a StoreProvider
   110  func NewProvider(conf *PrivateDataConfig) (Provider, error) {
   111  	dbProvider, err := leveldbhelper.NewProvider(&leveldbhelper.Conf{DBPath: conf.StorePath})
   112  	if err != nil {
   113  		return nil, err
   114  	}
   115  	return &provider{
   116  		dbProvider: dbProvider,
   117  		pvtData:    conf,
   118  	}, nil
   119  }
   120  
   121  // OpenStore returns a handle to a store
   122  func (p *provider) OpenStore(ledgerid string) (Store, error) {
   123  	dbHandle := p.dbProvider.GetDBHandle(ledgerid)
   124  	s := &store{
   125  		db:              dbHandle,
   126  		ledgerid:        ledgerid,
   127  		batchesInterval: p.pvtData.BatchesInterval,
   128  		maxBatchSize:    p.pvtData.MaxBatchSize,
   129  		purgeInterval:   uint64(p.pvtData.PurgeInterval),
   130  		collElgProcSync: &collElgProcSync{
   131  			notification: make(chan bool, 1),
   132  			procComplete: make(chan bool, 1),
   133  		},
   134  	}
   135  	if err := s.initState(); err != nil {
   136  		return nil, err
   137  	}
   138  	s.launchCollElgProc()
   139  	logger.Debugf("Pvtdata store opened. Initial state: isEmpty [%t], lastCommittedBlock [%d]",
   140  		s.isEmpty, s.lastCommittedBlock)
   141  	return s, nil
   142  }
   143  
   144  // Close closes the store
   145  func (p *provider) Close() {
   146  	p.dbProvider.Close()
   147  }
   148  
   149  //////// store functions  ////////////////
   150  //////////////////////////////////////////
   151  
   152  func (s *store) initState() error {
   153  	var err error
   154  	var blist lastUpdatedOldBlocksList
   155  	if s.isEmpty, s.lastCommittedBlock, err = s.getLastCommittedBlockNum(); err != nil {
   156  		return err
   157  	}
   158  
   159  	// TODO: FAB-16298 -- the concept of pendingBatch is no longer valid
   160  	// for pvtdataStore. We can remove it v2.1. We retain the concept in
   161  	// v2.0 to allow rolling upgrade from v142 to v2.0
   162  	batchPending, err := s.hasPendingCommit()
   163  	if err != nil {
   164  		return err
   165  	}
   166  
   167  	if batchPending {
   168  		committingBlockNum := s.nextBlockNum()
   169  		batch := leveldbhelper.NewUpdateBatch()
   170  		batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum))
   171  		batch.Delete(pendingCommitKey)
   172  		if err := s.db.WriteBatch(batch, true); err != nil {
   173  			return err
   174  		}
   175  		s.isEmpty = false
   176  		s.lastCommittedBlock = committingBlockNum
   177  	}
   178  
   179  	if blist, err = s.getLastUpdatedOldBlocksList(); err != nil {
   180  		return err
   181  	}
   182  	if len(blist) > 0 {
   183  		s.isLastUpdatedOldBlocksSet = true
   184  	} // false if not set
   185  
   186  	return nil
   187  }
   188  
   189  func (s *store) Init(btlPolicy pvtdatapolicy.BTLPolicy) {
   190  	s.btlPolicy = btlPolicy
   191  }
   192  
   193  // Prepare implements the function in the interface `Store`
   194  func (s *store) Commit(blockNum uint64, pvtData []*ledger.TxPvtData, missingPvtData ledger.TxMissingPvtDataMap) error {
   195  	expectedBlockNum := s.nextBlockNum()
   196  	if expectedBlockNum != blockNum {
   197  		return &ErrIllegalArgs{fmt.Sprintf("Expected block number=%d, received block number=%d", expectedBlockNum, blockNum)}
   198  	}
   199  
   200  	batch := leveldbhelper.NewUpdateBatch()
   201  	var err error
   202  	var keyBytes, valBytes []byte
   203  
   204  	storeEntries, err := prepareStoreEntries(blockNum, pvtData, s.btlPolicy, missingPvtData)
   205  	if err != nil {
   206  		return err
   207  	}
   208  
   209  	for _, dataEntry := range storeEntries.dataEntries {
   210  		keyBytes = encodeDataKey(dataEntry.key)
   211  		if valBytes, err = encodeDataValue(dataEntry.value); err != nil {
   212  			return err
   213  		}
   214  		batch.Put(keyBytes, valBytes)
   215  	}
   216  
   217  	for _, expiryEntry := range storeEntries.expiryEntries {
   218  		keyBytes = encodeExpiryKey(expiryEntry.key)
   219  		if valBytes, err = encodeExpiryValue(expiryEntry.value); err != nil {
   220  			return err
   221  		}
   222  		batch.Put(keyBytes, valBytes)
   223  	}
   224  
   225  	for missingDataKey, missingDataValue := range storeEntries.missingDataEntries {
   226  		keyBytes = encodeMissingDataKey(&missingDataKey)
   227  		if valBytes, err = encodeMissingDataValue(missingDataValue); err != nil {
   228  			return err
   229  		}
   230  		batch.Put(keyBytes, valBytes)
   231  	}
   232  
   233  	committingBlockNum := s.nextBlockNum()
   234  	logger.Debugf("Committing private data for block [%d]", committingBlockNum)
   235  	batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum))
   236  	if err := s.db.WriteBatch(batch, true); err != nil {
   237  		return err
   238  	}
   239  
   240  	s.isEmpty = false
   241  	atomic.StoreUint64(&s.lastCommittedBlock, committingBlockNum)
   242  	logger.Debugf("Committed private data for block [%d]", committingBlockNum)
   243  	s.performPurgeIfScheduled(committingBlockNum)
   244  	return nil
   245  }
   246  
   247  // CommitPvtDataOfOldBlocks commits the pvtData (i.e., previously missing data) of old blocks.
   248  // The parameter `blocksPvtData` refers a list of old block's pvtdata which are missing in the pvtstore.
   249  // Given a list of old block's pvtData, `CommitPvtDataOfOldBlocks` performs the following four
   250  // operations
   251  // (1) construct dataEntries for all pvtData
   252  // (2) construct update entries (i.e., dataEntries, expiryEntries, missingDataEntries)
   253  //     from the above created data entries
   254  // (3) create a db update batch from the update entries
   255  // (4) commit the update batch to the pvtStore
   256  func (s *store) CommitPvtDataOfOldBlocks(blocksPvtData map[uint64][]*ledger.TxPvtData) error {
   257  	if s.isLastUpdatedOldBlocksSet {
   258  		return &ErrIllegalCall{`The lastUpdatedOldBlocksList is set. It means that the
   259  		stateDB may not be in sync with the pvtStore`}
   260  	}
   261  
   262  	// (1) construct dataEntries for all pvtData
   263  	dataEntries := constructDataEntriesFromBlocksPvtData(blocksPvtData)
   264  
   265  	// (2) construct update entries (i.e., dataEntries, expiryEntries, missingDataEntries) from the above created data entries
   266  	logger.Debugf("Constructing pvtdatastore entries for pvtData of [%d] old blocks", len(blocksPvtData))
   267  	updateEntries, err := s.constructUpdateEntriesFromDataEntries(dataEntries)
   268  	if err != nil {
   269  		return err
   270  	}
   271  
   272  	// (3) create a db update batch from the update entries
   273  	logger.Debug("Constructing update batch from pvtdatastore entries")
   274  	batch, err := constructUpdateBatchFromUpdateEntries(updateEntries)
   275  	if err != nil {
   276  		return err
   277  	}
   278  
   279  	// (4) commit the update batch to the pvtStore
   280  	logger.Debug("Committing the update batch to pvtdatastore")
   281  	if err := s.commitBatch(batch); err != nil {
   282  		return err
   283  	}
   284  
   285  	return nil
   286  }
   287  
   288  func constructDataEntriesFromBlocksPvtData(blocksPvtData map[uint64][]*ledger.TxPvtData) []*dataEntry {
   289  	// construct dataEntries for all pvtData
   290  	var dataEntries []*dataEntry
   291  	for blkNum, pvtData := range blocksPvtData {
   292  		// prepare the dataEntries for the pvtData
   293  		dataEntries = append(dataEntries, prepareDataEntries(blkNum, pvtData)...)
   294  	}
   295  	return dataEntries
   296  }
   297  
   298  func (s *store) constructUpdateEntriesFromDataEntries(dataEntries []*dataEntry) (*entriesForPvtDataOfOldBlocks, error) {
   299  	updateEntries := &entriesForPvtDataOfOldBlocks{
   300  		dataEntries:        make(map[dataKey]*rwset.CollectionPvtReadWriteSet),
   301  		expiryEntries:      make(map[expiryKey]*ExpiryData),
   302  		missingDataEntries: make(map[nsCollBlk]*bitset.BitSet)}
   303  
   304  	// for each data entry, first, get the expiryData and missingData from the pvtStore.
   305  	// Second, update the expiryData and missingData as per the data entry. Finally, add
   306  	// the data entry along with the updated expiryData and missingData to the update entries
   307  	for _, dataEntry := range dataEntries {
   308  		// get the expiryBlk number to construct the expiryKey
   309  		expiryKey, err := s.constructExpiryKeyFromDataEntry(dataEntry)
   310  		if err != nil {
   311  			return nil, err
   312  		}
   313  
   314  		// get the existing expiryData entry
   315  		var expiryData *ExpiryData
   316  		if !neverExpires(expiryKey.expiringBlk) {
   317  			if expiryData, err = s.getExpiryDataFromUpdateEntriesOrStore(updateEntries, expiryKey); err != nil {
   318  				return nil, err
   319  			}
   320  			if expiryData == nil {
   321  				// data entry is already expired
   322  				// and purged (a rare scenario)
   323  				continue
   324  			}
   325  		}
   326  
   327  		// get the existing missingData entry
   328  		var missingData *bitset.BitSet
   329  		nsCollBlk := dataEntry.key.nsCollBlk
   330  		if missingData, err = s.getMissingDataFromUpdateEntriesOrStore(updateEntries, nsCollBlk); err != nil {
   331  			return nil, err
   332  		}
   333  		if missingData == nil {
   334  			// data entry is already expired
   335  			// and purged (a rare scenario)
   336  			continue
   337  		}
   338  
   339  		updateEntries.addDataEntry(dataEntry)
   340  		if expiryData != nil { // would be nil for the never expiring entry
   341  			expiryEntry := &expiryEntry{&expiryKey, expiryData}
   342  			updateEntries.updateAndAddExpiryEntry(expiryEntry, dataEntry.key)
   343  		}
   344  		updateEntries.updateAndAddMissingDataEntry(missingData, dataEntry.key)
   345  	}
   346  	return updateEntries, nil
   347  }
   348  
   349  func (s *store) constructExpiryKeyFromDataEntry(dataEntry *dataEntry) (expiryKey, error) {
   350  	// get the expiryBlk number to construct the expiryKey
   351  	nsCollBlk := dataEntry.key.nsCollBlk
   352  	expiringBlk, err := s.btlPolicy.GetExpiringBlock(nsCollBlk.ns, nsCollBlk.coll, nsCollBlk.blkNum)
   353  	if err != nil {
   354  		return expiryKey{}, err
   355  	}
   356  	return expiryKey{expiringBlk, nsCollBlk.blkNum}, nil
   357  }
   358  
   359  func (s *store) getExpiryDataFromUpdateEntriesOrStore(updateEntries *entriesForPvtDataOfOldBlocks, expiryKey expiryKey) (*ExpiryData, error) {
   360  	expiryData, ok := updateEntries.expiryEntries[expiryKey]
   361  	if !ok {
   362  		var err error
   363  		expiryData, err = s.getExpiryDataOfExpiryKey(&expiryKey)
   364  		if err != nil {
   365  			return nil, err
   366  		}
   367  	}
   368  	return expiryData, nil
   369  }
   370  
   371  func (s *store) getMissingDataFromUpdateEntriesOrStore(updateEntries *entriesForPvtDataOfOldBlocks, nsCollBlk nsCollBlk) (*bitset.BitSet, error) {
   372  	missingData, ok := updateEntries.missingDataEntries[nsCollBlk]
   373  	if !ok {
   374  		var err error
   375  		missingDataKey := &missingDataKey{nsCollBlk, true}
   376  		missingData, err = s.getBitmapOfMissingDataKey(missingDataKey)
   377  		if err != nil {
   378  			return nil, err
   379  		}
   380  	}
   381  	return missingData, nil
   382  }
   383  
   384  func (updateEntries *entriesForPvtDataOfOldBlocks) addDataEntry(dataEntry *dataEntry) {
   385  	dataKey := dataKey{dataEntry.key.nsCollBlk, dataEntry.key.txNum}
   386  	updateEntries.dataEntries[dataKey] = dataEntry.value
   387  }
   388  
   389  func (updateEntries *entriesForPvtDataOfOldBlocks) updateAndAddExpiryEntry(expiryEntry *expiryEntry, dataKey *dataKey) {
   390  	txNum := dataKey.txNum
   391  	nsCollBlk := dataKey.nsCollBlk
   392  	// update
   393  	expiryEntry.value.addPresentData(nsCollBlk.ns, nsCollBlk.coll, txNum)
   394  	// we cannot delete entries from MissingDataMap as
   395  	// we keep only one entry per missing <ns-col>
   396  	// irrespective of the number of txNum.
   397  
   398  	// add
   399  	expiryKey := expiryKey{expiryEntry.key.expiringBlk, expiryEntry.key.committingBlk}
   400  	updateEntries.expiryEntries[expiryKey] = expiryEntry.value
   401  }
   402  
   403  func (updateEntries *entriesForPvtDataOfOldBlocks) updateAndAddMissingDataEntry(missingData *bitset.BitSet, dataKey *dataKey) {
   404  
   405  	txNum := dataKey.txNum
   406  	nsCollBlk := dataKey.nsCollBlk
   407  	// update
   408  	missingData.Clear(uint(txNum))
   409  	// add
   410  	updateEntries.missingDataEntries[nsCollBlk] = missingData
   411  }
   412  
   413  func constructUpdateBatchFromUpdateEntries(updateEntries *entriesForPvtDataOfOldBlocks) (*leveldbhelper.UpdateBatch, error) {
   414  	batch := leveldbhelper.NewUpdateBatch()
   415  
   416  	// add the following four types of entries to the update batch: (1) new data entries
   417  	// (i.e., pvtData), (2) updated expiry entries, (3) updated missing data entries, and
   418  	// (4) updated block list
   419  
   420  	// (1) add new data entries to the batch
   421  	if err := addNewDataEntriesToUpdateBatch(batch, updateEntries); err != nil {
   422  		return nil, err
   423  	}
   424  
   425  	// (2) add updated expiryEntry to the batch
   426  	if err := addUpdatedExpiryEntriesToUpdateBatch(batch, updateEntries); err != nil {
   427  		return nil, err
   428  	}
   429  
   430  	// (3) add updated missingData to the batch
   431  	if err := addUpdatedMissingDataEntriesToUpdateBatch(batch, updateEntries); err != nil {
   432  		return nil, err
   433  	}
   434  
   435  	return batch, nil
   436  }
   437  
   438  func addNewDataEntriesToUpdateBatch(batch *leveldbhelper.UpdateBatch, entries *entriesForPvtDataOfOldBlocks) error {
   439  	var keyBytes, valBytes []byte
   440  	var err error
   441  	for dataKey, pvtData := range entries.dataEntries {
   442  		keyBytes = encodeDataKey(&dataKey)
   443  		if valBytes, err = encodeDataValue(pvtData); err != nil {
   444  			return err
   445  		}
   446  		batch.Put(keyBytes, valBytes)
   447  	}
   448  	return nil
   449  }
   450  
   451  func addUpdatedExpiryEntriesToUpdateBatch(batch *leveldbhelper.UpdateBatch, entries *entriesForPvtDataOfOldBlocks) error {
   452  	var keyBytes, valBytes []byte
   453  	var err error
   454  	for expiryKey, expiryData := range entries.expiryEntries {
   455  		keyBytes = encodeExpiryKey(&expiryKey)
   456  		if valBytes, err = encodeExpiryValue(expiryData); err != nil {
   457  			return err
   458  		}
   459  		batch.Put(keyBytes, valBytes)
   460  	}
   461  	return nil
   462  }
   463  
   464  func addUpdatedMissingDataEntriesToUpdateBatch(batch *leveldbhelper.UpdateBatch, entries *entriesForPvtDataOfOldBlocks) error {
   465  	var keyBytes, valBytes []byte
   466  	var err error
   467  	for nsCollBlk, missingData := range entries.missingDataEntries {
   468  		keyBytes = encodeMissingDataKey(&missingDataKey{nsCollBlk, true})
   469  		// if the missingData is empty, we need to delete the missingDataKey
   470  		if missingData.None() {
   471  			batch.Delete(keyBytes)
   472  			continue
   473  		}
   474  		if valBytes, err = encodeMissingDataValue(missingData); err != nil {
   475  			return err
   476  		}
   477  		batch.Put(keyBytes, valBytes)
   478  	}
   479  	return nil
   480  }
   481  
   482  func (s *store) commitBatch(batch *leveldbhelper.UpdateBatch) error {
   483  	// commit the batch to the store
   484  	if err := s.db.WriteBatch(batch, true); err != nil {
   485  		return err
   486  	}
   487  
   488  	return nil
   489  }
   490  
   491  // TODO FAB-16293 -- GetLastUpdatedOldBlocksPvtData() can be removed either in v2.0 or in v2.1.
   492  // If we decide to rebuild stateDB in v2.0, by default, the rebuild logic would take
   493  // care of synching stateDB with pvtdataStore without calling GetLastUpdatedOldBlocksPvtData().
   494  // Hence, it can be safely removed. Suppose if we decide not to rebuild stateDB in v2.0,
   495  // we can remove this function in v2.1.
   496  // GetLastUpdatedOldBlocksPvtData implements the function in the interface `Store`
   497  func (s *store) GetLastUpdatedOldBlocksPvtData() (map[uint64][]*ledger.TxPvtData, error) {
   498  	if !s.isLastUpdatedOldBlocksSet {
   499  		return nil, nil
   500  	}
   501  
   502  	updatedBlksList, err := s.getLastUpdatedOldBlocksList()
   503  	if err != nil {
   504  		return nil, err
   505  	}
   506  
   507  	blksPvtData := make(map[uint64][]*ledger.TxPvtData)
   508  	for _, blkNum := range updatedBlksList {
   509  		if blksPvtData[blkNum], err = s.GetPvtDataByBlockNum(blkNum, nil); err != nil {
   510  			return nil, err
   511  		}
   512  	}
   513  	return blksPvtData, nil
   514  }
   515  
   516  func (s *store) getLastUpdatedOldBlocksList() ([]uint64, error) {
   517  	var v []byte
   518  	var err error
   519  	if v, err = s.db.Get(lastUpdatedOldBlocksKey); err != nil {
   520  		return nil, err
   521  	}
   522  	if v == nil {
   523  		return nil, nil
   524  	}
   525  
   526  	var updatedBlksList []uint64
   527  	buf := proto.NewBuffer(v)
   528  	numBlks, err := buf.DecodeVarint()
   529  	if err != nil {
   530  		return nil, err
   531  	}
   532  	for i := 0; i < int(numBlks); i++ {
   533  		blkNum, err := buf.DecodeVarint()
   534  		if err != nil {
   535  			return nil, err
   536  		}
   537  		updatedBlksList = append(updatedBlksList, blkNum)
   538  	}
   539  	return updatedBlksList, nil
   540  }
   541  
   542  // TODO FAB-16294 -- ResetLastUpdatedOldBlocksList() can be removed in v2.1.
   543  // From v2.0 onwards, we do not store the last updatedBlksList. Only to support
   544  // the rolling upgrade from v142 to v2.0, we retain the ResetLastUpdatedOldBlocksList()
   545  // in v2.0.
   546  
   547  // ResetLastUpdatedOldBlocksList implements the function in the interface `Store`
   548  func (s *store) ResetLastUpdatedOldBlocksList() error {
   549  	batch := leveldbhelper.NewUpdateBatch()
   550  	batch.Delete(lastUpdatedOldBlocksKey)
   551  	if err := s.db.WriteBatch(batch, true); err != nil {
   552  		return err
   553  	}
   554  	s.isLastUpdatedOldBlocksSet = false
   555  	return nil
   556  }
   557  
   558  // GetPvtDataByBlockNum implements the function in the interface `Store`.
   559  // If the store is empty or the last committed block number is smaller then the
   560  // requested block number, an 'ErrOutOfRange' is thrown
   561  func (s *store) GetPvtDataByBlockNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   562  	logger.Debugf("Get private data for block [%d], filter=%#v", blockNum, filter)
   563  	if s.isEmpty {
   564  		return nil, &ErrOutOfRange{"The store is empty"}
   565  	}
   566  	lastCommittedBlock := atomic.LoadUint64(&s.lastCommittedBlock)
   567  	if blockNum > lastCommittedBlock {
   568  		return nil, &ErrOutOfRange{fmt.Sprintf("Last committed block=%d, block requested=%d", lastCommittedBlock, blockNum)}
   569  	}
   570  	startKey, endKey := getDataKeysForRangeScanByBlockNum(blockNum)
   571  	logger.Debugf("Querying private data storage for write sets using startKey=%#v, endKey=%#v", startKey, endKey)
   572  	itr := s.db.GetIterator(startKey, endKey)
   573  	defer itr.Release()
   574  
   575  	var blockPvtdata []*ledger.TxPvtData
   576  	var currentTxNum uint64
   577  	var currentTxWsetAssember *txPvtdataAssembler
   578  	firstItr := true
   579  
   580  	for itr.Next() {
   581  		dataKeyBytes := itr.Key()
   582  		v11Fmt, err := v11Format(dataKeyBytes)
   583  		if err != nil {
   584  			return nil, err
   585  		}
   586  		if v11Fmt {
   587  			return v11RetrievePvtdata(itr, filter)
   588  		}
   589  		dataValueBytes := itr.Value()
   590  		dataKey, err := decodeDatakey(dataKeyBytes)
   591  		if err != nil {
   592  			return nil, err
   593  		}
   594  		expired, err := isExpired(dataKey.nsCollBlk, s.btlPolicy, lastCommittedBlock)
   595  		if err != nil {
   596  			return nil, err
   597  		}
   598  		if expired || !passesFilter(dataKey, filter) {
   599  			continue
   600  		}
   601  		dataValue, err := decodeDataValue(dataValueBytes)
   602  		if err != nil {
   603  			return nil, err
   604  		}
   605  
   606  		if firstItr {
   607  			currentTxNum = dataKey.txNum
   608  			currentTxWsetAssember = newTxPvtdataAssembler(blockNum, currentTxNum)
   609  			firstItr = false
   610  		}
   611  
   612  		if dataKey.txNum != currentTxNum {
   613  			blockPvtdata = append(blockPvtdata, currentTxWsetAssember.getTxPvtdata())
   614  			currentTxNum = dataKey.txNum
   615  			currentTxWsetAssember = newTxPvtdataAssembler(blockNum, currentTxNum)
   616  		}
   617  		currentTxWsetAssember.add(dataKey.ns, dataValue)
   618  	}
   619  	if currentTxWsetAssember != nil {
   620  		blockPvtdata = append(blockPvtdata, currentTxWsetAssember.getTxPvtdata())
   621  	}
   622  	return blockPvtdata, nil
   623  }
   624  
   625  // GetMissingPvtDataInfoForMostRecentBlocks implements the function in the interface `Store`
   626  func (s *store) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.MissingPvtDataInfo, error) {
   627  	// we assume that this function would be called by the gossip only after processing the
   628  	// last retrieved missing pvtdata info and committing the same.
   629  	if maxBlock < 1 {
   630  		return nil, nil
   631  	}
   632  
   633  	missingPvtDataInfo := make(ledger.MissingPvtDataInfo)
   634  	numberOfBlockProcessed := 0
   635  	lastProcessedBlock := uint64(0)
   636  	isMaxBlockLimitReached := false
   637  	// as we are not acquiring a read lock, new blocks can get committed while we
   638  	// construct the MissingPvtDataInfo. As a result, lastCommittedBlock can get
   639  	// changed. To ensure consistency, we atomically load the lastCommittedBlock value
   640  	lastCommittedBlock := atomic.LoadUint64(&s.lastCommittedBlock)
   641  
   642  	startKey, endKey := createRangeScanKeysForEligibleMissingDataEntries(lastCommittedBlock)
   643  	dbItr := s.db.GetIterator(startKey, endKey)
   644  	defer dbItr.Release()
   645  
   646  	for dbItr.Next() {
   647  		missingDataKeyBytes := dbItr.Key()
   648  		missingDataKey := decodeMissingDataKey(missingDataKeyBytes)
   649  
   650  		if isMaxBlockLimitReached && (missingDataKey.blkNum != lastProcessedBlock) {
   651  			// ensures that exactly maxBlock number
   652  			// of blocks' entries are processed
   653  			break
   654  		}
   655  
   656  		// check whether the entry is expired. If so, move to the next item.
   657  		// As we may use the old lastCommittedBlock value, there is a possibility that
   658  		// this missing data is actually expired but we may get the stale information.
   659  		// Though it may leads to extra work of pulling the expired data, it will not
   660  		// affect the correctness. Further, as we try to fetch the most recent missing
   661  		// data (less possibility of expiring now), such scenario would be rare. In the
   662  		// best case, we can load the latest lastCommittedBlock value here atomically to
   663  		// make this scenario very rare.
   664  		lastCommittedBlock = atomic.LoadUint64(&s.lastCommittedBlock)
   665  		expired, err := isExpired(missingDataKey.nsCollBlk, s.btlPolicy, lastCommittedBlock)
   666  		if err != nil {
   667  			return nil, err
   668  		}
   669  		if expired {
   670  			continue
   671  		}
   672  
   673  		// check for an existing entry for the blkNum in the MissingPvtDataInfo.
   674  		// If no such entry exists, create one. Also, keep track of the number of
   675  		// processed block due to maxBlock limit.
   676  		if _, ok := missingPvtDataInfo[missingDataKey.blkNum]; !ok {
   677  			numberOfBlockProcessed++
   678  			if numberOfBlockProcessed == maxBlock {
   679  				isMaxBlockLimitReached = true
   680  				// as there can be more than one entry for this block,
   681  				// we cannot `break` here
   682  				lastProcessedBlock = missingDataKey.blkNum
   683  			}
   684  		}
   685  
   686  		valueBytes := dbItr.Value()
   687  		bitmap, err := decodeMissingDataValue(valueBytes)
   688  		if err != nil {
   689  			return nil, err
   690  		}
   691  
   692  		// for each transaction which misses private data, make an entry in missingBlockPvtDataInfo
   693  		for index, isSet := bitmap.NextSet(0); isSet; index, isSet = bitmap.NextSet(index + 1) {
   694  			txNum := uint64(index)
   695  			missingPvtDataInfo.Add(missingDataKey.blkNum, txNum, missingDataKey.ns, missingDataKey.coll)
   696  		}
   697  	}
   698  
   699  	return missingPvtDataInfo, nil
   700  }
   701  
   702  // ProcessCollsEligibilityEnabled implements the function in the interface `Store`
   703  func (s *store) ProcessCollsEligibilityEnabled(committingBlk uint64, nsCollMap map[string][]string) error {
   704  	key := encodeCollElgKey(committingBlk)
   705  	m := newCollElgInfo(nsCollMap)
   706  	val, err := encodeCollElgVal(m)
   707  	if err != nil {
   708  		return err
   709  	}
   710  	batch := leveldbhelper.NewUpdateBatch()
   711  	batch.Put(key, val)
   712  	if err = s.db.WriteBatch(batch, true); err != nil {
   713  		return err
   714  	}
   715  	s.collElgProcSync.notify()
   716  	return nil
   717  }
   718  
   719  func (s *store) performPurgeIfScheduled(latestCommittedBlk uint64) {
   720  	if latestCommittedBlk%s.purgeInterval != 0 {
   721  		return
   722  	}
   723  	go func() {
   724  		s.purgerLock.Lock()
   725  		logger.Debugf("Purger started: Purging expired private data till block number [%d]", latestCommittedBlk)
   726  		defer s.purgerLock.Unlock()
   727  		err := s.purgeExpiredData(0, latestCommittedBlk)
   728  		if err != nil {
   729  			logger.Warningf("Could not purge data from pvtdata store:%s", err)
   730  		}
   731  		logger.Debug("Purger finished")
   732  	}()
   733  }
   734  
   735  func (s *store) purgeExpiredData(minBlkNum, maxBlkNum uint64) error {
   736  	batch := leveldbhelper.NewUpdateBatch()
   737  	expiryEntries, err := s.retrieveExpiryEntries(minBlkNum, maxBlkNum)
   738  	if err != nil || len(expiryEntries) == 0 {
   739  		return err
   740  	}
   741  	for _, expiryEntry := range expiryEntries {
   742  		// this encoding could have been saved if the function retrieveExpiryEntries also returns the encoded expiry keys.
   743  		// However, keeping it for better readability
   744  		batch.Delete(encodeExpiryKey(expiryEntry.key))
   745  		dataKeys, missingDataKeys := deriveKeys(expiryEntry)
   746  		for _, dataKey := range dataKeys {
   747  			batch.Delete(encodeDataKey(dataKey))
   748  		}
   749  		for _, missingDataKey := range missingDataKeys {
   750  			batch.Delete(encodeMissingDataKey(missingDataKey))
   751  		}
   752  		s.db.WriteBatch(batch, false)
   753  	}
   754  	logger.Infof("[%s] - [%d] Entries purged from private data storage till block number [%d]", s.ledgerid, len(expiryEntries), maxBlkNum)
   755  	return nil
   756  }
   757  
   758  func (s *store) retrieveExpiryEntries(minBlkNum, maxBlkNum uint64) ([]*expiryEntry, error) {
   759  	startKey, endKey := getExpiryKeysForRangeScan(minBlkNum, maxBlkNum)
   760  	logger.Debugf("retrieveExpiryEntries(): startKey=%#v, endKey=%#v", startKey, endKey)
   761  	itr := s.db.GetIterator(startKey, endKey)
   762  	defer itr.Release()
   763  
   764  	var expiryEntries []*expiryEntry
   765  	for itr.Next() {
   766  		expiryKeyBytes := itr.Key()
   767  		expiryValueBytes := itr.Value()
   768  		expiryKey, err := decodeExpiryKey(expiryKeyBytes)
   769  		if err != nil {
   770  			return nil, err
   771  		}
   772  		expiryValue, err := decodeExpiryValue(expiryValueBytes)
   773  		if err != nil {
   774  			return nil, err
   775  		}
   776  		expiryEntries = append(expiryEntries, &expiryEntry{key: expiryKey, value: expiryValue})
   777  	}
   778  	return expiryEntries, nil
   779  }
   780  
   781  func (s *store) launchCollElgProc() {
   782  	go func() {
   783  		s.processCollElgEvents() // process collection eligibility events when store is opened - in case there is an unprocessed events from previous run
   784  		for {
   785  			logger.Debugf("Waiting for collection eligibility event")
   786  			s.collElgProcSync.waitForNotification()
   787  			s.processCollElgEvents()
   788  			s.collElgProcSync.done()
   789  		}
   790  	}()
   791  }
   792  
   793  func (s *store) processCollElgEvents() {
   794  	logger.Debugf("Starting to process collection eligibility events")
   795  	s.purgerLock.Lock()
   796  	defer s.purgerLock.Unlock()
   797  	collElgStartKey, collElgEndKey := createRangeScanKeysForCollElg()
   798  	eventItr := s.db.GetIterator(collElgStartKey, collElgEndKey)
   799  	defer eventItr.Release()
   800  	batch := leveldbhelper.NewUpdateBatch()
   801  	totalEntriesConverted := 0
   802  
   803  	for eventItr.Next() {
   804  		collElgKey, collElgVal := eventItr.Key(), eventItr.Value()
   805  		blkNum := decodeCollElgKey(collElgKey)
   806  		CollElgInfo, err := decodeCollElgVal(collElgVal)
   807  		logger.Debugf("Processing collection eligibility event [blkNum=%d], CollElgInfo=%s", blkNum, CollElgInfo)
   808  		if err != nil {
   809  			logger.Errorf("This error is not expected %s", err)
   810  			continue
   811  		}
   812  		for ns, colls := range CollElgInfo.NsCollMap {
   813  			var coll string
   814  			for _, coll = range colls.Entries {
   815  				logger.Infof("Converting missing data entries from ineligible to eligible for [ns=%s, coll=%s]", ns, coll)
   816  				startKey, endKey := createRangeScanKeysForIneligibleMissingData(blkNum, ns, coll)
   817  				collItr := s.db.GetIterator(startKey, endKey)
   818  				collEntriesConverted := 0
   819  
   820  				for collItr.Next() { // each entry
   821  					originalKey, originalVal := collItr.Key(), collItr.Value()
   822  					modifiedKey := decodeMissingDataKey(originalKey)
   823  					modifiedKey.isEligible = true
   824  					batch.Delete(originalKey)
   825  					copyVal := make([]byte, len(originalVal))
   826  					copy(copyVal, originalVal)
   827  					batch.Put(encodeMissingDataKey(modifiedKey), copyVal)
   828  					collEntriesConverted++
   829  					if batch.Len() > s.maxBatchSize {
   830  						s.db.WriteBatch(batch, true)
   831  						batch = leveldbhelper.NewUpdateBatch()
   832  						sleepTime := time.Duration(s.batchesInterval)
   833  						logger.Infof("Going to sleep for %d milliseconds between batches. Entries for [ns=%s, coll=%s] converted so far = %d",
   834  							sleepTime, ns, coll, collEntriesConverted)
   835  						s.purgerLock.Unlock()
   836  						time.Sleep(sleepTime * time.Millisecond)
   837  						s.purgerLock.Lock()
   838  					}
   839  				} // entry loop
   840  
   841  				collItr.Release()
   842  				logger.Infof("Converted all [%d] entries for [ns=%s, coll=%s]", collEntriesConverted, ns, coll)
   843  				totalEntriesConverted += collEntriesConverted
   844  			} // coll loop
   845  		} // ns loop
   846  		batch.Delete(collElgKey) // delete the collection eligibility event key as well
   847  	} // event loop
   848  
   849  	s.db.WriteBatch(batch, true)
   850  	logger.Debugf("Converted [%d] ineligible missing data entries to eligible", totalEntriesConverted)
   851  }
   852  
   853  // LastCommittedBlockHeight implements the function in the interface `Store`
   854  func (s *store) LastCommittedBlockHeight() (uint64, error) {
   855  	if s.isEmpty {
   856  		return 0, nil
   857  	}
   858  	return atomic.LoadUint64(&s.lastCommittedBlock) + 1, nil
   859  }
   860  
   861  // Shutdown implements the function in the interface `Store`
   862  func (s *store) Shutdown() {
   863  	// do nothing
   864  }
   865  
   866  func (s *store) nextBlockNum() uint64 {
   867  	if s.isEmpty {
   868  		return 0
   869  	}
   870  	return atomic.LoadUint64(&s.lastCommittedBlock) + 1
   871  }
   872  
   873  // TODO: FAB-16298 -- the concept of pendingBatch is no longer valid
   874  // for pvtdataStore. We can remove it v2.1. We retain the concept in
   875  // v2.0 to allow rolling upgrade from v142 to v2.0
   876  func (s *store) hasPendingCommit() (bool, error) {
   877  	var v []byte
   878  	var err error
   879  	if v, err = s.db.Get(pendingCommitKey); err != nil {
   880  		return false, err
   881  	}
   882  	return v != nil, nil
   883  }
   884  
   885  func (s *store) getLastCommittedBlockNum() (bool, uint64, error) {
   886  	var v []byte
   887  	var err error
   888  	if v, err = s.db.Get(lastCommittedBlkkey); v == nil || err != nil {
   889  		return true, 0, err
   890  	}
   891  	return false, decodeLastCommittedBlockVal(v), nil
   892  }
   893  
   894  type collElgProcSync struct {
   895  	notification, procComplete chan bool
   896  }
   897  
   898  func (sync *collElgProcSync) notify() {
   899  	select {
   900  	case sync.notification <- true:
   901  		logger.Debugf("Signaled to collection eligibility processing routine")
   902  	default: //noop
   903  		logger.Debugf("Previous signal still pending. Skipping new signal")
   904  	}
   905  }
   906  
   907  func (sync *collElgProcSync) waitForNotification() {
   908  	<-sync.notification
   909  }
   910  
   911  func (sync *collElgProcSync) done() {
   912  	select {
   913  	case sync.procComplete <- true:
   914  	default:
   915  	}
   916  }
   917  
   918  func (sync *collElgProcSync) waitForDone() {
   919  	<-sync.procComplete
   920  }
   921  
   922  func (s *store) getBitmapOfMissingDataKey(missingDataKey *missingDataKey) (*bitset.BitSet, error) {
   923  	var v []byte
   924  	var err error
   925  	if v, err = s.db.Get(encodeMissingDataKey(missingDataKey)); err != nil {
   926  		return nil, err
   927  	}
   928  	if v == nil {
   929  		return nil, nil
   930  	}
   931  	return decodeMissingDataValue(v)
   932  }
   933  
   934  func (s *store) getExpiryDataOfExpiryKey(expiryKey *expiryKey) (*ExpiryData, error) {
   935  	var v []byte
   936  	var err error
   937  	if v, err = s.db.Get(encodeExpiryKey(expiryKey)); err != nil {
   938  		return nil, err
   939  	}
   940  	if v == nil {
   941  		return nil, nil
   942  	}
   943  	return decodeExpiryValue(v)
   944  }