github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/core/ledger/pvtdatastorage/store.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package pvtdatastorage
     8  
     9  import (
    10  	"fmt"
    11  	"sync"
    12  	"sync/atomic"
    13  	"time"
    14  
    15  	"github.com/golang/protobuf/proto"
    16  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    17  	"github.com/osdi23p228/fabric/common/flogging"
    18  	"github.com/osdi23p228/fabric/common/ledger/util/leveldbhelper"
    19  	"github.com/osdi23p228/fabric/core/ledger"
    20  	"github.com/osdi23p228/fabric/core/ledger/pvtdatapolicy"
    21  	"github.com/willf/bitset"
    22  )
    23  
    24  var (
    25  	logger = flogging.MustGetLogger("pvtdatastorage")
    26  )
    27  
    28  // Provider provides handle to specific 'Store' that in turn manages
    29  // private write sets for a ledger
    30  type Provider struct {
    31  	dbProvider *leveldbhelper.Provider
    32  	pvtData    *PrivateDataConfig
    33  }
    34  
    35  // PrivateDataConfig encapsulates the configuration for private data storage on the ledger
    36  type PrivateDataConfig struct {
    37  	// PrivateDataConfig is used to configure a private data storage provider
    38  	*ledger.PrivateDataConfig
    39  	// StorePath is the filesystem path for private data storage.
    40  	// It is internally computed by the ledger component,
    41  	// so it is not in ledger.PrivateDataConfig and not exposed to other components.
    42  	StorePath string
    43  }
    44  
    45  // Store manages the permanent storage of private write sets for a ledger
    46  type Store struct {
    47  	db              *leveldbhelper.DBHandle
    48  	ledgerid        string
    49  	btlPolicy       pvtdatapolicy.BTLPolicy
    50  	batchesInterval int
    51  	maxBatchSize    int
    52  	purgeInterval   uint64
    53  
    54  	isEmpty            bool
    55  	lastCommittedBlock uint64
    56  	purgerLock         sync.Mutex
    57  	collElgProcSync    *collElgProcSync
    58  	// After committing the pvtdata of old blocks,
    59  	// the `isLastUpdatedOldBlocksSet` is set to true.
    60  	// Once the stateDB is updated with these pvtdata,
    61  	// the `isLastUpdatedOldBlocksSet` is set to false.
    62  	// isLastUpdatedOldBlocksSet is mainly used during the
    63  	// recovery process. During the peer startup, if the
    64  	// isLastUpdatedOldBlocksSet is set to true, the pvtdata
    65  	// in the stateDB needs to be updated before finishing the
    66  	// recovery operation.
    67  	isLastUpdatedOldBlocksSet bool
    68  
    69  	deprioritizedDataReconcilerInterval time.Duration
    70  	accessDeprioMissingDataAfter        time.Time
    71  }
    72  
    73  type blkTranNumKey []byte
    74  
    75  type dataEntry struct {
    76  	key   *dataKey
    77  	value *rwset.CollectionPvtReadWriteSet
    78  }
    79  
    80  type expiryEntry struct {
    81  	key   *expiryKey
    82  	value *ExpiryData
    83  }
    84  
    85  type expiryKey struct {
    86  	expiringBlk   uint64
    87  	committingBlk uint64
    88  }
    89  
    90  type nsCollBlk struct {
    91  	ns, coll string
    92  	blkNum   uint64
    93  }
    94  
    95  type dataKey struct {
    96  	nsCollBlk
    97  	txNum uint64
    98  }
    99  
   100  type missingDataKey struct {
   101  	nsCollBlk
   102  }
   103  
   104  type storeEntries struct {
   105  	dataEntries             []*dataEntry
   106  	expiryEntries           []*expiryEntry
   107  	elgMissingDataEntries   map[missingDataKey]*bitset.BitSet
   108  	inelgMissingDataEntries map[missingDataKey]*bitset.BitSet
   109  }
   110  
   111  // lastUpdatedOldBlocksList keeps the list of last updated blocks
   112  // and is stored as the value of lastUpdatedOldBlocksKey (defined in kv_encoding.go)
   113  type lastUpdatedOldBlocksList []uint64
   114  
   115  //////// Provider functions  /////////////
   116  //////////////////////////////////////////
   117  
   118  // NewProvider instantiates a StoreProvider
   119  func NewProvider(conf *PrivateDataConfig) (*Provider, error) {
   120  	dbProvider, err := leveldbhelper.NewProvider(&leveldbhelper.Conf{DBPath: conf.StorePath})
   121  	if err != nil {
   122  		return nil, err
   123  	}
   124  	return &Provider{
   125  		dbProvider: dbProvider,
   126  		pvtData:    conf,
   127  	}, nil
   128  }
   129  
   130  // OpenStore returns a handle to a store
   131  func (p *Provider) OpenStore(ledgerid string) (*Store, error) {
   132  	dbHandle := p.dbProvider.GetDBHandle(ledgerid)
   133  	s := &Store{
   134  		db:                                  dbHandle,
   135  		ledgerid:                            ledgerid,
   136  		batchesInterval:                     p.pvtData.BatchesInterval,
   137  		maxBatchSize:                        p.pvtData.MaxBatchSize,
   138  		purgeInterval:                       uint64(p.pvtData.PurgeInterval),
   139  		deprioritizedDataReconcilerInterval: p.pvtData.DeprioritizedDataReconcilerInterval,
   140  		accessDeprioMissingDataAfter:        time.Now().Add(p.pvtData.DeprioritizedDataReconcilerInterval),
   141  		collElgProcSync: &collElgProcSync{
   142  			notification: make(chan bool, 1),
   143  			procComplete: make(chan bool, 1),
   144  		},
   145  	}
   146  	if err := s.initState(); err != nil {
   147  		return nil, err
   148  	}
   149  	s.launchCollElgProc()
   150  	logger.Debugf("Pvtdata store opened. Initial state: isEmpty [%t], lastCommittedBlock [%d]",
   151  		s.isEmpty, s.lastCommittedBlock)
   152  	return s, nil
   153  }
   154  
   155  // Close closes the store
   156  func (p *Provider) Close() {
   157  	p.dbProvider.Close()
   158  }
   159  
   160  //////// store functions  ////////////////
   161  //////////////////////////////////////////
   162  
   163  func (s *Store) initState() error {
   164  	var err error
   165  	var blist lastUpdatedOldBlocksList
   166  	if s.isEmpty, s.lastCommittedBlock, err = s.getLastCommittedBlockNum(); err != nil {
   167  		return err
   168  	}
   169  
   170  	// TODO: FAB-16298 -- the concept of pendingBatch is no longer valid
   171  	// for pvtdataStore. We can remove it v2.1. We retain the concept in
   172  	// v2.0 to allow rolling upgrade from v142 to v2.0
   173  	batchPending, err := s.hasPendingCommit()
   174  	if err != nil {
   175  		return err
   176  	}
   177  
   178  	if batchPending {
   179  		committingBlockNum := s.nextBlockNum()
   180  		batch := s.db.NewUpdateBatch()
   181  		batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum))
   182  		batch.Delete(pendingCommitKey)
   183  		if err := s.db.WriteBatch(batch, true); err != nil {
   184  			return err
   185  		}
   186  		s.isEmpty = false
   187  		s.lastCommittedBlock = committingBlockNum
   188  	}
   189  
   190  	if blist, err = s.getLastUpdatedOldBlocksList(); err != nil {
   191  		return err
   192  	}
   193  	if len(blist) > 0 {
   194  		s.isLastUpdatedOldBlocksSet = true
   195  	} // false if not set
   196  
   197  	return nil
   198  }
   199  
   200  // Init initializes the store. This function is expected to be invoked before using the store
   201  func (s *Store) Init(btlPolicy pvtdatapolicy.BTLPolicy) {
   202  	s.btlPolicy = btlPolicy
   203  }
   204  
   205  // Commit commits the pvt data as well as both the eligible and ineligible
   206  // missing private data --- `eligible` denotes that the missing private data belongs to a collection
   207  // for which this peer is a member; `ineligible` denotes that the missing private data belong to a
   208  // collection for which this peer is not a member.
   209  func (s *Store) Commit(blockNum uint64, pvtData []*ledger.TxPvtData, missingPvtData ledger.TxMissingPvtDataMap) error {
   210  	expectedBlockNum := s.nextBlockNum()
   211  	if expectedBlockNum != blockNum {
   212  		return &ErrIllegalArgs{fmt.Sprintf("Expected block number=%d, received block number=%d", expectedBlockNum, blockNum)}
   213  	}
   214  
   215  	batch := s.db.NewUpdateBatch()
   216  	var err error
   217  	var key, val []byte
   218  
   219  	storeEntries, err := prepareStoreEntries(blockNum, pvtData, s.btlPolicy, missingPvtData)
   220  	if err != nil {
   221  		return err
   222  	}
   223  
   224  	for _, dataEntry := range storeEntries.dataEntries {
   225  		key = encodeDataKey(dataEntry.key)
   226  		if val, err = encodeDataValue(dataEntry.value); err != nil {
   227  			return err
   228  		}
   229  		batch.Put(key, val)
   230  	}
   231  
   232  	for _, expiryEntry := range storeEntries.expiryEntries {
   233  		key = encodeExpiryKey(expiryEntry.key)
   234  		if val, err = encodeExpiryValue(expiryEntry.value); err != nil {
   235  			return err
   236  		}
   237  		batch.Put(key, val)
   238  	}
   239  
   240  	for missingDataKey, missingDataValue := range storeEntries.elgMissingDataEntries {
   241  		key = encodeElgPrioMissingDataKey(&missingDataKey)
   242  
   243  		if val, err = encodeMissingDataValue(missingDataValue); err != nil {
   244  			return err
   245  		}
   246  		batch.Put(key, val)
   247  	}
   248  
   249  	for missingDataKey, missingDataValue := range storeEntries.inelgMissingDataEntries {
   250  		key = encodeInelgMissingDataKey(&missingDataKey)
   251  
   252  		if val, err = encodeMissingDataValue(missingDataValue); err != nil {
   253  			return err
   254  		}
   255  		batch.Put(key, val)
   256  	}
   257  
   258  	committingBlockNum := s.nextBlockNum()
   259  	logger.Debugf("Committing private data for block [%d]", committingBlockNum)
   260  	batch.Put(lastCommittedBlkkey, encodeLastCommittedBlockVal(committingBlockNum))
   261  	if err := s.db.WriteBatch(batch, true); err != nil {
   262  		return err
   263  	}
   264  
   265  	s.isEmpty = false
   266  	atomic.StoreUint64(&s.lastCommittedBlock, committingBlockNum)
   267  	logger.Debugf("Committed private data for block [%d]", committingBlockNum)
   268  	s.performPurgeIfScheduled(committingBlockNum)
   269  	return nil
   270  }
   271  
   272  // GetLastUpdatedOldBlocksPvtData returns the pvtdata of blocks listed in `lastUpdatedOldBlocksList`
   273  // TODO FAB-16293 -- GetLastUpdatedOldBlocksPvtData() can be removed either in v2.0 or in v2.1.
   274  // If we decide to rebuild stateDB in v2.0, by default, the rebuild logic would take
   275  // care of synching stateDB with pvtdataStore without calling GetLastUpdatedOldBlocksPvtData().
   276  // Hence, it can be safely removed. Suppose if we decide not to rebuild stateDB in v2.0,
   277  // we can remove this function in v2.1.
   278  func (s *Store) GetLastUpdatedOldBlocksPvtData() (map[uint64][]*ledger.TxPvtData, error) {
   279  	if !s.isLastUpdatedOldBlocksSet {
   280  		return nil, nil
   281  	}
   282  
   283  	updatedBlksList, err := s.getLastUpdatedOldBlocksList()
   284  	if err != nil {
   285  		return nil, err
   286  	}
   287  
   288  	blksPvtData := make(map[uint64][]*ledger.TxPvtData)
   289  	for _, blkNum := range updatedBlksList {
   290  		if blksPvtData[blkNum], err = s.GetPvtDataByBlockNum(blkNum, nil); err != nil {
   291  			return nil, err
   292  		}
   293  	}
   294  	return blksPvtData, nil
   295  }
   296  
   297  func (s *Store) getLastUpdatedOldBlocksList() ([]uint64, error) {
   298  	var v []byte
   299  	var err error
   300  	if v, err = s.db.Get(lastUpdatedOldBlocksKey); err != nil {
   301  		return nil, err
   302  	}
   303  	if v == nil {
   304  		return nil, nil
   305  	}
   306  
   307  	var updatedBlksList []uint64
   308  	buf := proto.NewBuffer(v)
   309  	numBlks, err := buf.DecodeVarint()
   310  	if err != nil {
   311  		return nil, err
   312  	}
   313  	for i := 0; i < int(numBlks); i++ {
   314  		blkNum, err := buf.DecodeVarint()
   315  		if err != nil {
   316  			return nil, err
   317  		}
   318  		updatedBlksList = append(updatedBlksList, blkNum)
   319  	}
   320  	return updatedBlksList, nil
   321  }
   322  
   323  // TODO FAB-16294 -- ResetLastUpdatedOldBlocksList() can be removed in v2.1.
   324  // From v2.0 onwards, we do not store the last updatedBlksList. Only to support
   325  // the rolling upgrade from v142 to v2.0, we retain the ResetLastUpdatedOldBlocksList()
   326  // in v2.0.
   327  
   328  // ResetLastUpdatedOldBlocksList removes the `lastUpdatedOldBlocksList` entry from the store
   329  func (s *Store) ResetLastUpdatedOldBlocksList() error {
   330  	batch := s.db.NewUpdateBatch()
   331  	batch.Delete(lastUpdatedOldBlocksKey)
   332  	if err := s.db.WriteBatch(batch, true); err != nil {
   333  		return err
   334  	}
   335  	s.isLastUpdatedOldBlocksSet = false
   336  	return nil
   337  }
   338  
   339  // GetPvtDataByBlockNum returns only the pvt data  corresponding to the given block number
   340  // The pvt data is filtered by the list of 'ns/collections' supplied in the filter
   341  // A nil filter does not filter any results
   342  func (s *Store) GetPvtDataByBlockNum(blockNum uint64, filter ledger.PvtNsCollFilter) ([]*ledger.TxPvtData, error) {
   343  	logger.Debugf("Get private data for block [%d], filter=%#v", blockNum, filter)
   344  	if s.isEmpty {
   345  		return nil, &ErrOutOfRange{"The store is empty"}
   346  	}
   347  	lastCommittedBlock := atomic.LoadUint64(&s.lastCommittedBlock)
   348  	if blockNum > lastCommittedBlock {
   349  		return nil, &ErrOutOfRange{fmt.Sprintf("Last committed block=%d, block requested=%d", lastCommittedBlock, blockNum)}
   350  	}
   351  	startKey, endKey := getDataKeysForRangeScanByBlockNum(blockNum)
   352  	logger.Debugf("Querying private data storage for write sets using startKey=%#v, endKey=%#v", startKey, endKey)
   353  	itr, err := s.db.GetIterator(startKey, endKey)
   354  	if err != nil {
   355  		return nil, err
   356  	}
   357  	defer itr.Release()
   358  
   359  	var blockPvtdata []*ledger.TxPvtData
   360  	var currentTxNum uint64
   361  	var currentTxWsetAssember *txPvtdataAssembler
   362  	firstItr := true
   363  
   364  	for itr.Next() {
   365  		dataKeyBytes := itr.Key()
   366  		v11Fmt, err := v11Format(dataKeyBytes)
   367  		if err != nil {
   368  			return nil, err
   369  		}
   370  		if v11Fmt {
   371  			return v11RetrievePvtdata(itr, filter)
   372  		}
   373  		dataValueBytes := itr.Value()
   374  		dataKey, err := decodeDatakey(dataKeyBytes)
   375  		if err != nil {
   376  			return nil, err
   377  		}
   378  		expired, err := isExpired(dataKey.nsCollBlk, s.btlPolicy, lastCommittedBlock)
   379  		if err != nil {
   380  			return nil, err
   381  		}
   382  		if expired || !passesFilter(dataKey, filter) {
   383  			continue
   384  		}
   385  		dataValue, err := decodeDataValue(dataValueBytes)
   386  		if err != nil {
   387  			return nil, err
   388  		}
   389  
   390  		if firstItr {
   391  			currentTxNum = dataKey.txNum
   392  			currentTxWsetAssember = newTxPvtdataAssembler(blockNum, currentTxNum)
   393  			firstItr = false
   394  		}
   395  
   396  		if dataKey.txNum != currentTxNum {
   397  			blockPvtdata = append(blockPvtdata, currentTxWsetAssember.getTxPvtdata())
   398  			currentTxNum = dataKey.txNum
   399  			currentTxWsetAssember = newTxPvtdataAssembler(blockNum, currentTxNum)
   400  		}
   401  		currentTxWsetAssember.add(dataKey.ns, dataValue)
   402  	}
   403  	if currentTxWsetAssember != nil {
   404  		blockPvtdata = append(blockPvtdata, currentTxWsetAssember.getTxPvtdata())
   405  	}
   406  	return blockPvtdata, nil
   407  }
   408  
   409  // GetMissingPvtDataInfoForMostRecentBlocks returns the missing private data information for the
   410  // most recent `maxBlock` blocks which miss at least a private data of a eligible collection.
   411  func (s *Store) GetMissingPvtDataInfoForMostRecentBlocks(maxBlock int) (ledger.MissingPvtDataInfo, error) {
   412  	// we assume that this function would be called by the gossip only after processing the
   413  	// last retrieved missing pvtdata info and committing the same.
   414  	if maxBlock < 1 {
   415  		return nil, nil
   416  	}
   417  
   418  	if time.Now().After(s.accessDeprioMissingDataAfter) {
   419  		s.accessDeprioMissingDataAfter = time.Now().Add(s.deprioritizedDataReconcilerInterval)
   420  		logger.Debug("fetching missing pvtdata entries from the deprioritized list")
   421  		return s.getMissingData(elgDeprioritizedMissingDataGroup, maxBlock)
   422  	}
   423  
   424  	logger.Debug("fetching missing pvtdata entries from the prioritized list")
   425  	return s.getMissingData(elgPrioritizedMissingDataGroup, maxBlock)
   426  }
   427  
   428  func (s *Store) getMissingData(group []byte, maxBlock int) (ledger.MissingPvtDataInfo, error) {
   429  	missingPvtDataInfo := make(ledger.MissingPvtDataInfo)
   430  	numberOfBlockProcessed := 0
   431  	lastProcessedBlock := uint64(0)
   432  	isMaxBlockLimitReached := false
   433  
   434  	// as we are not acquiring a read lock, new blocks can get committed while we
   435  	// construct the MissingPvtDataInfo. As a result, lastCommittedBlock can get
   436  	// changed. To ensure consistency, we atomically load the lastCommittedBlock value
   437  	lastCommittedBlock := atomic.LoadUint64(&s.lastCommittedBlock)
   438  
   439  	startKey, endKey := createRangeScanKeysForElgMissingData(lastCommittedBlock, group)
   440  	dbItr, err := s.db.GetIterator(startKey, endKey)
   441  	if err != nil {
   442  		return nil, err
   443  	}
   444  	defer dbItr.Release()
   445  
   446  	for dbItr.Next() {
   447  		missingDataKeyBytes := dbItr.Key()
   448  		missingDataKey := decodeElgMissingDataKey(missingDataKeyBytes)
   449  
   450  		if isMaxBlockLimitReached && (missingDataKey.blkNum != lastProcessedBlock) {
   451  			// ensures that exactly maxBlock number
   452  			// of blocks' entries are processed
   453  			break
   454  		}
   455  
   456  		// check whether the entry is expired. If so, move to the next item.
   457  		// As we may use the old lastCommittedBlock value, there is a possibility that
   458  		// this missing data is actually expired but we may get the stale information.
   459  		// Though it may leads to extra work of pulling the expired data, it will not
   460  		// affect the correctness. Further, as we try to fetch the most recent missing
   461  		// data (less possibility of expiring now), such scenario would be rare. In the
   462  		// best case, we can load the latest lastCommittedBlock value here atomically to
   463  		// make this scenario very rare.
   464  		lastCommittedBlock = atomic.LoadUint64(&s.lastCommittedBlock)
   465  		expired, err := isExpired(missingDataKey.nsCollBlk, s.btlPolicy, lastCommittedBlock)
   466  		if err != nil {
   467  			return nil, err
   468  		}
   469  		if expired {
   470  			continue
   471  		}
   472  
   473  		// check for an existing entry for the blkNum in the MissingPvtDataInfo.
   474  		// If no such entry exists, create one. Also, keep track of the number of
   475  		// processed block due to maxBlock limit.
   476  		if _, ok := missingPvtDataInfo[missingDataKey.blkNum]; !ok {
   477  			numberOfBlockProcessed++
   478  			if numberOfBlockProcessed == maxBlock {
   479  				isMaxBlockLimitReached = true
   480  				// as there can be more than one entry for this block,
   481  				// we cannot `break` here
   482  				lastProcessedBlock = missingDataKey.blkNum
   483  			}
   484  		}
   485  
   486  		valueBytes := dbItr.Value()
   487  		bitmap, err := decodeMissingDataValue(valueBytes)
   488  		if err != nil {
   489  			return nil, err
   490  		}
   491  
   492  		// for each transaction which misses private data, make an entry in missingBlockPvtDataInfo
   493  		for index, isSet := bitmap.NextSet(0); isSet; index, isSet = bitmap.NextSet(index + 1) {
   494  			txNum := uint64(index)
   495  			missingPvtDataInfo.Add(missingDataKey.blkNum, txNum, missingDataKey.ns, missingDataKey.coll)
   496  		}
   497  	}
   498  
   499  	return missingPvtDataInfo, nil
   500  }
   501  
   502  // ProcessCollsEligibilityEnabled notifies the store when the peer becomes eligible to receive data for an
   503  // existing collection. Parameter 'committingBlk' refers to the block number that contains the corresponding
   504  // collection upgrade transaction and the parameter 'nsCollMap' contains the collections for which the peer
   505  // is now eligible to receive pvt data
   506  func (s *Store) ProcessCollsEligibilityEnabled(committingBlk uint64, nsCollMap map[string][]string) error {
   507  	key := encodeCollElgKey(committingBlk)
   508  	m := newCollElgInfo(nsCollMap)
   509  	val, err := encodeCollElgVal(m)
   510  	if err != nil {
   511  		return err
   512  	}
   513  	batch := s.db.NewUpdateBatch()
   514  	batch.Put(key, val)
   515  	if err = s.db.WriteBatch(batch, true); err != nil {
   516  		return err
   517  	}
   518  	s.collElgProcSync.notify()
   519  	return nil
   520  }
   521  
   522  func (s *Store) performPurgeIfScheduled(latestCommittedBlk uint64) {
   523  	if latestCommittedBlk%s.purgeInterval != 0 {
   524  		return
   525  	}
   526  	go func() {
   527  		s.purgerLock.Lock()
   528  		logger.Debugf("Purger started: Purging expired private data till block number [%d]", latestCommittedBlk)
   529  		defer s.purgerLock.Unlock()
   530  		err := s.purgeExpiredData(0, latestCommittedBlk)
   531  		if err != nil {
   532  			logger.Warningf("Could not purge data from pvtdata store:%s", err)
   533  		}
   534  		logger.Debug("Purger finished")
   535  	}()
   536  }
   537  
   538  func (s *Store) purgeExpiredData(minBlkNum, maxBlkNum uint64) error {
   539  	expiryEntries, err := s.retrieveExpiryEntries(minBlkNum, maxBlkNum)
   540  	if err != nil || len(expiryEntries) == 0 {
   541  		return err
   542  	}
   543  
   544  	batch := s.db.NewUpdateBatch()
   545  	for _, expiryEntry := range expiryEntries {
   546  		batch.Delete(encodeExpiryKey(expiryEntry.key))
   547  		dataKeys, missingDataKeys := deriveKeys(expiryEntry)
   548  
   549  		for _, dataKey := range dataKeys {
   550  			batch.Delete(encodeDataKey(dataKey))
   551  		}
   552  
   553  		for _, missingDataKey := range missingDataKeys {
   554  			batch.Delete(
   555  				encodeElgPrioMissingDataKey(missingDataKey),
   556  			)
   557  			batch.Delete(
   558  				encodeElgDeprioMissingDataKey(missingDataKey),
   559  			)
   560  			batch.Delete(
   561  				encodeInelgMissingDataKey(missingDataKey),
   562  			)
   563  		}
   564  
   565  		if err := s.db.WriteBatch(batch, false); err != nil {
   566  			return err
   567  		}
   568  		batch.Reset()
   569  	}
   570  
   571  	logger.Infof("[%s] - [%d] Entries purged from private data storage till block number [%d]", s.ledgerid, len(expiryEntries), maxBlkNum)
   572  	return nil
   573  }
   574  
   575  func (s *Store) retrieveExpiryEntries(minBlkNum, maxBlkNum uint64) ([]*expiryEntry, error) {
   576  	startKey, endKey := getExpiryKeysForRangeScan(minBlkNum, maxBlkNum)
   577  	logger.Debugf("retrieveExpiryEntries(): startKey=%#v, endKey=%#v", startKey, endKey)
   578  	itr, err := s.db.GetIterator(startKey, endKey)
   579  	if err != nil {
   580  		return nil, err
   581  	}
   582  	defer itr.Release()
   583  
   584  	var expiryEntries []*expiryEntry
   585  	for itr.Next() {
   586  		expiryKeyBytes := itr.Key()
   587  		expiryValueBytes := itr.Value()
   588  		expiryKey, err := decodeExpiryKey(expiryKeyBytes)
   589  		if err != nil {
   590  			return nil, err
   591  		}
   592  		expiryValue, err := decodeExpiryValue(expiryValueBytes)
   593  		if err != nil {
   594  			return nil, err
   595  		}
   596  		expiryEntries = append(expiryEntries, &expiryEntry{key: expiryKey, value: expiryValue})
   597  	}
   598  	return expiryEntries, nil
   599  }
   600  
   601  func (s *Store) launchCollElgProc() {
   602  	go func() {
   603  		if err := s.processCollElgEvents(); err != nil {
   604  			// process collection eligibility events when store is opened -
   605  			// in case there is an unprocessed events from previous run
   606  			logger.Errorw("failed to process collection eligibility events", "err", err)
   607  		}
   608  		for {
   609  			logger.Debugf("Waiting for collection eligibility event")
   610  			s.collElgProcSync.waitForNotification()
   611  			if err := s.processCollElgEvents(); err != nil {
   612  				logger.Errorw("failed to process collection eligibility events", "err", err)
   613  			}
   614  			s.collElgProcSync.done()
   615  		}
   616  	}()
   617  }
   618  
   619  func (s *Store) processCollElgEvents() error {
   620  	logger.Debugf("Starting to process collection eligibility events")
   621  	s.purgerLock.Lock()
   622  	defer s.purgerLock.Unlock()
   623  	collElgStartKey, collElgEndKey := createRangeScanKeysForCollElg()
   624  	eventItr, err := s.db.GetIterator(collElgStartKey, collElgEndKey)
   625  	if err != nil {
   626  		return err
   627  	}
   628  	defer eventItr.Release()
   629  	batch := s.db.NewUpdateBatch()
   630  	totalEntriesConverted := 0
   631  
   632  	for eventItr.Next() {
   633  		collElgKey, collElgVal := eventItr.Key(), eventItr.Value()
   634  		blkNum := decodeCollElgKey(collElgKey)
   635  		CollElgInfo, err := decodeCollElgVal(collElgVal)
   636  		logger.Debugf("Processing collection eligibility event [blkNum=%d], CollElgInfo=%s", blkNum, CollElgInfo)
   637  		if err != nil {
   638  			logger.Errorf("This error is not expected %s", err)
   639  			continue
   640  		}
   641  		for ns, colls := range CollElgInfo.NsCollMap {
   642  			var coll string
   643  			for _, coll = range colls.Entries {
   644  				logger.Infof("Converting missing data entries from ineligible to eligible for [ns=%s, coll=%s]", ns, coll)
   645  				startKey, endKey := createRangeScanKeysForInelgMissingData(blkNum, ns, coll)
   646  				collItr, err := s.db.GetIterator(startKey, endKey)
   647  				if err != nil {
   648  					return err
   649  				}
   650  				collEntriesConverted := 0
   651  
   652  				for collItr.Next() { // each entry
   653  					originalKey, originalVal := collItr.Key(), collItr.Value()
   654  					modifiedKey := decodeInelgMissingDataKey(originalKey)
   655  					batch.Delete(originalKey)
   656  					copyVal := make([]byte, len(originalVal))
   657  					copy(copyVal, originalVal)
   658  					batch.Put(
   659  						encodeElgPrioMissingDataKey(modifiedKey),
   660  						copyVal,
   661  					)
   662  					collEntriesConverted++
   663  					if batch.Len() > s.maxBatchSize {
   664  						s.db.WriteBatch(batch, true)
   665  						batch.Reset()
   666  						sleepTime := time.Duration(s.batchesInterval)
   667  						logger.Infof("Going to sleep for %d milliseconds between batches. Entries for [ns=%s, coll=%s] converted so far = %d",
   668  							sleepTime, ns, coll, collEntriesConverted)
   669  						s.purgerLock.Unlock()
   670  						time.Sleep(sleepTime * time.Millisecond)
   671  						s.purgerLock.Lock()
   672  					}
   673  				} // entry loop
   674  
   675  				collItr.Release()
   676  				logger.Infof("Converted all [%d] entries for [ns=%s, coll=%s]", collEntriesConverted, ns, coll)
   677  				totalEntriesConverted += collEntriesConverted
   678  			} // coll loop
   679  		} // ns loop
   680  		batch.Delete(collElgKey) // delete the collection eligibility event key as well
   681  	} // event loop
   682  
   683  	s.db.WriteBatch(batch, true)
   684  	logger.Debugf("Converted [%d] ineligible missing data entries to eligible", totalEntriesConverted)
   685  	return nil
   686  }
   687  
   688  // LastCommittedBlockHeight returns the height of the last committed block
   689  func (s *Store) LastCommittedBlockHeight() (uint64, error) {
   690  	if s.isEmpty {
   691  		return 0, nil
   692  	}
   693  	return atomic.LoadUint64(&s.lastCommittedBlock) + 1, nil
   694  }
   695  
   696  func (s *Store) nextBlockNum() uint64 {
   697  	if s.isEmpty {
   698  		return 0
   699  	}
   700  	return atomic.LoadUint64(&s.lastCommittedBlock) + 1
   701  }
   702  
   703  // TODO: FAB-16298 -- the concept of pendingBatch is no longer valid
   704  // for pvtdataStore. We can remove it v2.1. We retain the concept in
   705  // v2.0 to allow rolling upgrade from v142 to v2.0
   706  func (s *Store) hasPendingCommit() (bool, error) {
   707  	var v []byte
   708  	var err error
   709  	if v, err = s.db.Get(pendingCommitKey); err != nil {
   710  		return false, err
   711  	}
   712  	return v != nil, nil
   713  }
   714  
   715  func (s *Store) getLastCommittedBlockNum() (bool, uint64, error) {
   716  	var v []byte
   717  	var err error
   718  	if v, err = s.db.Get(lastCommittedBlkkey); v == nil || err != nil {
   719  		return true, 0, err
   720  	}
   721  	return false, decodeLastCommittedBlockVal(v), nil
   722  }
   723  
   724  type collElgProcSync struct {
   725  	notification, procComplete chan bool
   726  }
   727  
   728  func (c *collElgProcSync) notify() {
   729  	select {
   730  	case c.notification <- true:
   731  		logger.Debugf("Signaled to collection eligibility processing routine")
   732  	default: //noop
   733  		logger.Debugf("Previous signal still pending. Skipping new signal")
   734  	}
   735  }
   736  
   737  func (c *collElgProcSync) waitForNotification() {
   738  	<-c.notification
   739  }
   740  
   741  func (c *collElgProcSync) done() {
   742  	select {
   743  	case c.procComplete <- true:
   744  	default:
   745  	}
   746  }
   747  
   748  func (c *collElgProcSync) waitForDone() {
   749  	<-c.procComplete
   750  }
   751  
   752  // ErrIllegalCall is to be thrown by a store impl if the store does not expect a call to Prepare/Commit/Rollback/InitLastCommittedBlock
   753  type ErrIllegalCall struct {
   754  	msg string
   755  }
   756  
   757  func (err *ErrIllegalCall) Error() string {
   758  	return err.msg
   759  }
   760  
   761  // ErrIllegalArgs is to be thrown by a store impl if the args passed are not allowed
   762  type ErrIllegalArgs struct {
   763  	msg string
   764  }
   765  
   766  func (err *ErrIllegalArgs) Error() string {
   767  	return err.msg
   768  }
   769  
   770  // ErrOutOfRange is to be thrown for the request for the data that is not yet committed
   771  type ErrOutOfRange struct {
   772  	msg string
   773  }
   774  
   775  func (err *ErrOutOfRange) Error() string {
   776  	return err.msg
   777  }