github.com/true-sqn/fabric@v2.1.1+incompatible/core/ledger/kvledger/txmgmt/pvtstatepurgemgmt/purge_mgr.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package pvtstatepurgemgmt
     8  
     9  import (
    10  	"math"
    11  	"sync"
    12  
    13  	"github.com/hyperledger/fabric/core/ledger/kvledger/bookkeeping"
    14  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/privacyenabledstate"
    15  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/statedb"
    16  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/version"
    17  	"github.com/hyperledger/fabric/core/ledger/pvtdatapolicy"
    18  	"github.com/hyperledger/fabric/core/ledger/util"
    19  )
    20  
    21  // PurgeMgr manages purging of the expired pvtdata
    22  type PurgeMgr interface {
    23  	// PrepareForExpiringKeys gives a chance to the PurgeMgr to do background work in advance if any
    24  	PrepareForExpiringKeys(expiringAtBlk uint64)
    25  	// WaitForPrepareToFinish holds the caller till the background goroutine launched by 'PrepareForExpiringKeys' is finished
    26  	WaitForPrepareToFinish()
    27  	// DeleteExpiredAndUpdateBookkeeping updates the bookkeeping and modifies the update batch by adding the deletes for the expired pvtdata
    28  	DeleteExpiredAndUpdateBookkeeping(
    29  		pvtUpdates *privacyenabledstate.PvtUpdateBatch,
    30  		hashedUpdates *privacyenabledstate.HashedUpdateBatch) error
    31  	// UpdateBookkeepingForPvtDataOfOldBlocks updates the existing expiry entries in the bookkeeper with the given pvtUpdates
    32  	UpdateBookkeepingForPvtDataOfOldBlocks(pvtUpdates *privacyenabledstate.PvtUpdateBatch) error
    33  	// BlockCommitDone is a callback to the PurgeMgr when the block is committed to the ledger
    34  	BlockCommitDone() error
    35  }
    36  
    37  type keyAndVersion struct {
    38  	key             string
    39  	committingBlock uint64
    40  	purgeKeyOnly    bool
    41  }
    42  
    43  type expiryInfoMap map[privacyenabledstate.HashedCompositeKey]*keyAndVersion
    44  
    45  type workingset struct {
    46  	toPurge             expiryInfoMap
    47  	toClearFromSchedule []*expiryInfoKey
    48  	expiringBlk         uint64
    49  	err                 error
    50  }
    51  
    52  type purgeMgr struct {
    53  	btlPolicy pvtdatapolicy.BTLPolicy
    54  	db        privacyenabledstate.DB
    55  	expKeeper expiryKeeper
    56  
    57  	lock    *sync.Mutex
    58  	waitGrp *sync.WaitGroup
    59  
    60  	workingset *workingset
    61  }
    62  
    63  // InstantiatePurgeMgr instantiates a PurgeMgr.
    64  func InstantiatePurgeMgr(ledgerid string, db privacyenabledstate.DB, btlPolicy pvtdatapolicy.BTLPolicy, bookkeepingProvider bookkeeping.Provider) (PurgeMgr, error) {
    65  	return &purgeMgr{
    66  		btlPolicy: btlPolicy,
    67  		db:        db,
    68  		expKeeper: newExpiryKeeper(ledgerid, bookkeepingProvider),
    69  		lock:      &sync.Mutex{},
    70  		waitGrp:   &sync.WaitGroup{},
    71  	}, nil
    72  }
    73  
    74  // PrepareForExpiringKeys implements function in the interface 'PurgeMgr'
    75  func (p *purgeMgr) PrepareForExpiringKeys(expiringAtBlk uint64) {
    76  	p.waitGrp.Add(1)
    77  	go func() {
    78  		p.lock.Lock()
    79  		p.waitGrp.Done()
    80  		defer p.lock.Unlock()
    81  		p.workingset = p.prepareWorkingsetFor(expiringAtBlk)
    82  	}()
    83  	p.waitGrp.Wait()
    84  }
    85  
    86  // WaitForPrepareToFinish implements function in the interface 'PurgeMgr'
    87  func (p *purgeMgr) WaitForPrepareToFinish() {
    88  	p.lock.Lock()
    89  	p.lock.Unlock()
    90  }
    91  
    92  func (p *purgeMgr) UpdateBookkeepingForPvtDataOfOldBlocks(pvtUpdates *privacyenabledstate.PvtUpdateBatch) error {
    93  	builder := newExpiryScheduleBuilder(p.btlPolicy)
    94  	pvtUpdateCompositeKeyMap := pvtUpdates.ToCompositeKeyMap()
    95  	for k, vv := range pvtUpdateCompositeKeyMap {
    96  		builder.add(k.Namespace, k.CollectionName, k.Key, util.ComputeStringHash(k.Key), vv)
    97  	}
    98  
    99  	var updatedList []*expiryInfo
   100  	for _, toAdd := range builder.getExpiryInfo() {
   101  		toUpdate, err := p.expKeeper.retrieveByExpiryKey(toAdd.expiryInfoKey)
   102  		if err != nil {
   103  			return err
   104  		}
   105  		// Though we could update the existing entry (as there should be one due
   106  		// to only the keyHash of this pvtUpdateKey), for simplicity and to be less
   107  		// expensive, we append a new entry
   108  		toUpdate.pvtdataKeys.addAll(toAdd.pvtdataKeys)
   109  		updatedList = append(updatedList, toUpdate)
   110  	}
   111  
   112  	// As the expiring keys list might have been constructed after the last
   113  	// regular block commit, we need to update the list. This is because,
   114  	// some of the old pvtData which are being committed might get expired
   115  	// during the next regular block commit. As a result, the corresponding
   116  	// hashedKey in the expiring keys list would be missing the pvtData.
   117  	p.addMissingPvtDataToWorkingSet(pvtUpdateCompositeKeyMap)
   118  
   119  	return p.expKeeper.updateBookkeeping(updatedList, nil)
   120  }
   121  
   122  func (p *purgeMgr) addMissingPvtDataToWorkingSet(pvtKeys privacyenabledstate.PvtdataCompositeKeyMap) {
   123  	if p.workingset == nil || len(p.workingset.toPurge) == 0 {
   124  		return
   125  	}
   126  
   127  	for k := range pvtKeys {
   128  		hashedCompositeKey := privacyenabledstate.HashedCompositeKey{
   129  			Namespace:      k.Namespace,
   130  			CollectionName: k.CollectionName,
   131  			KeyHash:        string(util.ComputeStringHash(k.Key))}
   132  
   133  		toPurgeKey, ok := p.workingset.toPurge[hashedCompositeKey]
   134  		if !ok {
   135  			// corresponding hashedKey is not present in the
   136  			// expiring keys list
   137  			continue
   138  		}
   139  
   140  		// if the purgeKeyOnly is set, it means that the version of the pvtKey
   141  		// stored in the stateDB is older than the version of the hashedKey.
   142  		// As a result, only the pvtKey needs to be purged (expiring block height
   143  		// for the recent hashedKey would be higher). If the recent
   144  		// pvtKey of the corresponding hashedKey is being committed, we need to
   145  		// remove the purgeKeyOnly entries from the toPurgeList it is going to be
   146  		// updated by the commit of missing pvtData
   147  		if toPurgeKey.purgeKeyOnly {
   148  			delete(p.workingset.toPurge, hashedCompositeKey)
   149  		} else {
   150  			toPurgeKey.key = k.Key
   151  		}
   152  	}
   153  }
   154  
   155  // DeleteExpiredAndUpdateBookkeeping implements function in the interface 'PurgeMgr'
   156  func (p *purgeMgr) DeleteExpiredAndUpdateBookkeeping(
   157  	pvtUpdates *privacyenabledstate.PvtUpdateBatch,
   158  	hashedUpdates *privacyenabledstate.HashedUpdateBatch) error {
   159  	p.lock.Lock()
   160  	defer p.lock.Unlock()
   161  	if p.workingset.err != nil {
   162  		return p.workingset.err
   163  	}
   164  
   165  	listExpiryInfo, err := buildExpirySchedule(p.btlPolicy, pvtUpdates, hashedUpdates)
   166  	if err != nil {
   167  		return err
   168  	}
   169  
   170  	// For each key selected for purging, check if the key is not getting updated in the current block,
   171  	// add its deletion in the update batches for pvt and hashed updates
   172  	for compositeHashedKey, keyAndVersion := range p.workingset.toPurge {
   173  		ns := compositeHashedKey.Namespace
   174  		coll := compositeHashedKey.CollectionName
   175  		keyHash := []byte(compositeHashedKey.KeyHash)
   176  		key := keyAndVersion.key
   177  		purgeKeyOnly := keyAndVersion.purgeKeyOnly
   178  		hashUpdated := hashedUpdates.Contains(ns, coll, keyHash)
   179  		pvtKeyUpdated := pvtUpdates.Contains(ns, coll, key)
   180  
   181  		logger.Debugf("Checking whether the key [ns=%s, coll=%s, keyHash=%x, purgeKeyOnly=%t] "+
   182  			"is updated in the update batch for the committing block - hashUpdated=%t, and pvtKeyUpdated=%t",
   183  			ns, coll, keyHash, purgeKeyOnly, hashUpdated, pvtKeyUpdated)
   184  
   185  		expiringTxVersion := version.NewHeight(p.workingset.expiringBlk, math.MaxUint64)
   186  		if !hashUpdated && !purgeKeyOnly {
   187  			logger.Debugf("Adding the hashed key to be purged to the delete list in the update batch")
   188  			hashedUpdates.Delete(ns, coll, keyHash, expiringTxVersion)
   189  		}
   190  		if key != "" && !pvtKeyUpdated {
   191  			logger.Debugf("Adding the pvt key to be purged to the delete list in the update batch")
   192  			pvtUpdates.Delete(ns, coll, key, expiringTxVersion)
   193  		}
   194  	}
   195  	return p.expKeeper.updateBookkeeping(listExpiryInfo, nil)
   196  }
   197  
   198  // BlockCommitDone implements function in the interface 'PurgeMgr'
   199  // These orphan entries for purge-schedule can be cleared off in bulk in a separate background routine as well
   200  // If we maintain the following logic (i.e., clear off entries just after block commit), we need a TODO -
   201  // We need to perform a check in the start, because there could be a crash between the block commit and
   202  // invocation to this function resulting in the orphan entry for the deletes scheduled for the last block
   203  // Also, the another way is to club the delete of these entries in the same batch that adds entries for the future expirations -
   204  // however, that requires updating the expiry store by replaying the last block from blockchain in order to sustain a crash between
   205  // entries updates and block commit
   206  func (p *purgeMgr) BlockCommitDone() error {
   207  	defer func() { p.workingset = nil }()
   208  	return p.expKeeper.updateBookkeeping(nil, p.workingset.toClearFromSchedule)
   209  }
   210  
   211  // prepareWorkingsetFor returns a working set for a given expiring block 'expiringAtBlk'.
   212  // This working set contains the pvt data keys that will expire with the commit of block 'expiringAtBlk'.
   213  func (p *purgeMgr) prepareWorkingsetFor(expiringAtBlk uint64) *workingset {
   214  	logger.Debugf("Preparing potential purge list working-set for expiringAtBlk [%d]", expiringAtBlk)
   215  	workingset := &workingset{expiringBlk: expiringAtBlk}
   216  	// Retrieve the keys from bookkeeper
   217  	expiryInfo, err := p.expKeeper.retrieve(expiringAtBlk)
   218  	if err != nil {
   219  		workingset.err = err
   220  		return workingset
   221  	}
   222  	// Transform the keys into the form such that for each hashed key that is eligible for purge appears in 'toPurge'
   223  	toPurge := transformToExpiryInfoMap(expiryInfo)
   224  	// Load the latest versions of the hashed keys
   225  	p.preloadCommittedVersionsInCache(toPurge)
   226  	var expiryInfoKeysToClear []*expiryInfoKey
   227  
   228  	if len(toPurge) == 0 {
   229  		logger.Debugf("No expiry entry found for expiringAtBlk [%d]", expiringAtBlk)
   230  		return workingset
   231  	}
   232  	logger.Debugf("Total [%d] expiring entries found. Evaluating whether some of these keys have been overwritten in later blocks...", len(toPurge))
   233  
   234  	for purgeEntryK, purgeEntryV := range toPurge {
   235  		logger.Debugf("Evaluating for hashedKey [%s]", purgeEntryK)
   236  		expiryInfoKeysToClear = append(expiryInfoKeysToClear, &expiryInfoKey{committingBlk: purgeEntryV.committingBlock, expiryBlk: expiringAtBlk})
   237  		currentVersion, err := p.db.GetKeyHashVersion(purgeEntryK.Namespace, purgeEntryK.CollectionName, []byte(purgeEntryK.KeyHash))
   238  		if err != nil {
   239  			workingset.err = err
   240  			return workingset
   241  		}
   242  
   243  		if sameVersion(currentVersion, purgeEntryV.committingBlock) {
   244  			logger.Debugf(
   245  				"The version of the hashed key in the committed state and in the expiry entry is same " +
   246  					"hence, keeping the entry in the purge list")
   247  			continue
   248  		}
   249  
   250  		logger.Debugf("The version of the hashed key in the committed state and in the expiry entry is different")
   251  		if purgeEntryV.key != "" {
   252  			logger.Debugf("The expiry entry also contains the raw key along with the key hash")
   253  			committedPvtVerVal, err := p.db.GetPrivateData(purgeEntryK.Namespace, purgeEntryK.CollectionName, purgeEntryV.key)
   254  			if err != nil {
   255  				workingset.err = err
   256  				return workingset
   257  			}
   258  
   259  			if sameVersionFromVal(committedPvtVerVal, purgeEntryV.committingBlock) {
   260  				logger.Debugf(
   261  					"The version of the pvt key in the committed state and in the expiry entry is same" +
   262  						"Including only key in the purge list and not the hashed key")
   263  				purgeEntryV.purgeKeyOnly = true
   264  				continue
   265  			}
   266  		}
   267  
   268  		// If we reached here, the keyhash and private key (if present, in the expiry entry) have been updated in a later block, therefore remove from current purge list
   269  		logger.Debugf("Removing from purge list - the key hash and key (if present, in the expiry entry)")
   270  		delete(toPurge, purgeEntryK)
   271  	}
   272  	// Final keys to purge from state
   273  	workingset.toPurge = toPurge
   274  	// Keys to clear from bookkeeper
   275  	workingset.toClearFromSchedule = expiryInfoKeysToClear
   276  	return workingset
   277  }
   278  
   279  func (p *purgeMgr) preloadCommittedVersionsInCache(expInfoMap expiryInfoMap) {
   280  	if !p.db.IsBulkOptimizable() {
   281  		return
   282  	}
   283  	var hashedKeys []*privacyenabledstate.HashedCompositeKey
   284  	for k := range expInfoMap {
   285  		hashedKeys = append(hashedKeys, &k)
   286  	}
   287  	p.db.LoadCommittedVersionsOfPubAndHashedKeys(nil, hashedKeys)
   288  }
   289  
   290  func transformToExpiryInfoMap(expiryInfo []*expiryInfo) expiryInfoMap {
   291  	expinfoMap := make(expiryInfoMap)
   292  	for _, expinfo := range expiryInfo {
   293  		for ns, colls := range expinfo.pvtdataKeys.Map {
   294  			for coll, keysAndHashes := range colls.Map {
   295  				for _, keyAndHash := range keysAndHashes.List {
   296  					compositeKey := privacyenabledstate.HashedCompositeKey{Namespace: ns, CollectionName: coll, KeyHash: string(keyAndHash.Hash)}
   297  					expinfoMap[compositeKey] = &keyAndVersion{key: keyAndHash.Key, committingBlock: expinfo.expiryInfoKey.committingBlk}
   298  				}
   299  			}
   300  		}
   301  	}
   302  	return expinfoMap
   303  }
   304  
   305  func sameVersion(version *version.Height, blockNum uint64) bool {
   306  	return version != nil && version.BlockNum == blockNum
   307  }
   308  
   309  func sameVersionFromVal(vv *statedb.VersionedValue, blockNum uint64) bool {
   310  	return vv != nil && sameVersion(vv.Version, blockNum)
   311  }