github.com/lzy4123/fabric@v2.1.1+incompatible/gossip/privdata/pvtdataprovider.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package privdata
     8  
     9  import (
    10  	"bytes"
    11  	"encoding/hex"
    12  	"fmt"
    13  	"time"
    14  
    15  	"github.com/golang/protobuf/proto"
    16  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    17  	"github.com/hyperledger/fabric-protos-go/msp"
    18  	"github.com/hyperledger/fabric-protos-go/peer"
    19  	vsccErrors "github.com/hyperledger/fabric/common/errors"
    20  	"github.com/hyperledger/fabric/common/metrics"
    21  	commonutil "github.com/hyperledger/fabric/common/util"
    22  	pvtdatasc "github.com/hyperledger/fabric/core/common/privdata"
    23  	"github.com/hyperledger/fabric/core/ledger"
    24  	"github.com/hyperledger/fabric/core/transientstore"
    25  	pvtdatacommon "github.com/hyperledger/fabric/gossip/privdata/common"
    26  	"github.com/hyperledger/fabric/gossip/util"
    27  	"github.com/hyperledger/fabric/protoutil"
    28  )
    29  
    30  type sleeper struct {
    31  	sleep func(time.Duration)
    32  }
    33  
    34  func (s sleeper) Sleep(d time.Duration) {
    35  	if s.sleep == nil {
    36  		time.Sleep(d)
    37  		return
    38  	}
    39  	s.sleep(d)
    40  }
    41  
    42  type RetrievedPvtdata struct {
    43  	blockPvtdata            *ledger.BlockPvtdata
    44  	pvtdataRetrievalInfo    *pvtdataRetrievalInfo
    45  	transientStore          *transientstore.Store
    46  	logger                  util.Logger
    47  	purgeDurationHistogram  metrics.Histogram
    48  	blockNum                uint64
    49  	transientBlockRetention uint64
    50  }
    51  
    52  // GetBlockPvtdata returns the BlockPvtdata
    53  func (r *RetrievedPvtdata) GetBlockPvtdata() *ledger.BlockPvtdata {
    54  	return r.blockPvtdata
    55  }
    56  
    57  // Purge purges private data for transactions in the block from the transient store.
    58  // Transactions older than the retention period are considered orphaned and also purged.
    59  func (r *RetrievedPvtdata) Purge() {
    60  	purgeStart := time.Now()
    61  
    62  	if len(r.blockPvtdata.PvtData) > 0 {
    63  		// Finally, purge all transactions in block - valid or not valid.
    64  		if err := r.transientStore.PurgeByTxids(r.pvtdataRetrievalInfo.txns); err != nil {
    65  			r.logger.Errorf("Purging transactions %v failed: %s", r.pvtdataRetrievalInfo.txns, err)
    66  		}
    67  	}
    68  
    69  	blockNum := r.blockNum
    70  	if blockNum%r.transientBlockRetention == 0 && blockNum > r.transientBlockRetention {
    71  		err := r.transientStore.PurgeBelowHeight(blockNum - r.transientBlockRetention)
    72  		if err != nil {
    73  			r.logger.Errorf("Failed purging data from transient store at block [%d]: %s", blockNum, err)
    74  		}
    75  	}
    76  
    77  	r.purgeDurationHistogram.Observe(time.Since(purgeStart).Seconds())
    78  }
    79  
    80  type eligibilityComputer struct {
    81  	logger                  util.Logger
    82  	storePvtdataOfInvalidTx bool
    83  	channelID               string
    84  	selfSignedData          protoutil.SignedData
    85  	idDeserializerFactory   IdentityDeserializerFactory
    86  }
    87  
    88  // computeEligibility computes eligilibity of private data and
    89  // groups all private data as either eligibleMissing or ineligibleMissing prior to fetching
    90  func (ec *eligibilityComputer) computeEligibility(mspID string, pvtdataToRetrieve []*ledger.TxPvtdataInfo) (*pvtdataRetrievalInfo, error) {
    91  	sources := make(map[rwSetKey][]*peer.Endorsement)
    92  	eligibleMissingKeys := make(rwsetKeys)
    93  	ineligibleMissingKeys := make(rwsetKeys)
    94  
    95  	var txList []string
    96  	for _, txPvtdata := range pvtdataToRetrieve {
    97  		txID := txPvtdata.TxID
    98  		seqInBlock := txPvtdata.SeqInBlock
    99  		invalid := txPvtdata.Invalid
   100  		txList = append(txList, txID)
   101  		if invalid && !ec.storePvtdataOfInvalidTx {
   102  			ec.logger.Debugf("Skipping Tx [%s] at sequence [%d] because it's invalid.", txID, seqInBlock)
   103  			continue
   104  		}
   105  		deserializer := ec.idDeserializerFactory.GetIdentityDeserializer(ec.channelID)
   106  		for _, colInfo := range txPvtdata.CollectionPvtdataInfo {
   107  			ns := colInfo.Namespace
   108  			col := colInfo.Collection
   109  			hash := colInfo.ExpectedHash
   110  			endorsers := colInfo.Endorsers
   111  			colConfig := colInfo.CollectionConfig
   112  
   113  			policy, err := pvtdatasc.NewSimpleCollection(colConfig, deserializer)
   114  			if err != nil {
   115  				ec.logger.Errorf("Failed to retrieve collection access policy for chaincode [%s], collection name [%s] for txID [%s]: %s.",
   116  					ns, col, txID, err)
   117  				return nil, &vsccErrors.VSCCExecutionFailureError{Err: err}
   118  			}
   119  
   120  			key := rwSetKey{
   121  				txID:       txID,
   122  				seqInBlock: seqInBlock,
   123  				hash:       hex.EncodeToString(hash),
   124  				namespace:  ns,
   125  				collection: col,
   126  			}
   127  
   128  			// First check if mspID is found in the MemberOrgs before falling back to AccessFilter policy evaluation
   129  			memberOrgs := policy.MemberOrgs()
   130  			if _, ok := memberOrgs[mspID]; !ok &&
   131  				!policy.AccessFilter()(ec.selfSignedData) {
   132  				ec.logger.Debugf("Peer is not eligible for collection: chaincode [%s], "+
   133  					"collection name [%s], txID [%s] the policy is [%#v]. Skipping.",
   134  					ns, col, txID, policy)
   135  				ineligibleMissingKeys[key] = rwsetInfo{}
   136  				continue
   137  			}
   138  
   139  			// treat all eligible keys as missing
   140  			eligibleMissingKeys[key] = rwsetInfo{
   141  				invalid: invalid,
   142  			}
   143  			sources[key] = endorsersFromEligibleOrgs(ns, col, endorsers, memberOrgs)
   144  		}
   145  	}
   146  
   147  	return &pvtdataRetrievalInfo{
   148  		sources:               sources,
   149  		txns:                  txList,
   150  		eligibleMissingKeys:   eligibleMissingKeys,
   151  		ineligibleMissingKeys: ineligibleMissingKeys,
   152  	}, nil
   153  }
   154  
   155  type PvtdataProvider struct {
   156  	mspID                                   string
   157  	selfSignedData                          protoutil.SignedData
   158  	logger                                  util.Logger
   159  	listMissingPrivateDataDurationHistogram metrics.Histogram
   160  	fetchDurationHistogram                  metrics.Histogram
   161  	purgeDurationHistogram                  metrics.Histogram
   162  	transientStore                          *transientstore.Store
   163  	pullRetryThreshold                      time.Duration
   164  	prefetchedPvtdata                       util.PvtDataCollections
   165  	transientBlockRetention                 uint64
   166  	channelID                               string
   167  	blockNum                                uint64
   168  	storePvtdataOfInvalidTx                 bool
   169  	skipPullingInvalidTransactions          bool
   170  	idDeserializerFactory                   IdentityDeserializerFactory
   171  	fetcher                                 Fetcher
   172  
   173  	sleeper sleeper
   174  }
   175  
   176  // RetrievePvtdata is passed a list of private data items from a block,
   177  // it determines which private data items this peer is eligible for, and then
   178  // retrieves the private data from local cache, local transient store, or a remote peer.
   179  func (pdp *PvtdataProvider) RetrievePvtdata(pvtdataToRetrieve []*ledger.TxPvtdataInfo) (*RetrievedPvtdata, error) {
   180  	retrievedPvtdata := &RetrievedPvtdata{
   181  		transientStore:          pdp.transientStore,
   182  		logger:                  pdp.logger,
   183  		purgeDurationHistogram:  pdp.purgeDurationHistogram,
   184  		blockNum:                pdp.blockNum,
   185  		transientBlockRetention: pdp.transientBlockRetention,
   186  	}
   187  
   188  	listMissingStart := time.Now()
   189  	eligibilityComputer := &eligibilityComputer{
   190  		logger:                  pdp.logger,
   191  		storePvtdataOfInvalidTx: pdp.storePvtdataOfInvalidTx,
   192  		channelID:               pdp.channelID,
   193  		selfSignedData:          pdp.selfSignedData,
   194  		idDeserializerFactory:   pdp.idDeserializerFactory,
   195  	}
   196  
   197  	pvtdataRetrievalInfo, err := eligibilityComputer.computeEligibility(pdp.mspID, pvtdataToRetrieve)
   198  	if err != nil {
   199  		return nil, err
   200  	}
   201  	pdp.listMissingPrivateDataDurationHistogram.Observe(time.Since(listMissingStart).Seconds())
   202  
   203  	pvtdata := make(rwsetByKeys)
   204  
   205  	// POPULATE FROM CACHE
   206  	pdp.populateFromCache(pvtdata, pvtdataRetrievalInfo, pvtdataToRetrieve)
   207  	if len(pvtdataRetrievalInfo.eligibleMissingKeys) == 0 {
   208  		pdp.logger.Debug("No missing collection private write sets to fetch from transient store")
   209  		retrievedPvtdata.pvtdataRetrievalInfo = pvtdataRetrievalInfo
   210  		retrievedPvtdata.blockPvtdata = pdp.prepareBlockPvtdata(pvtdata, pvtdataRetrievalInfo)
   211  		return retrievedPvtdata, nil
   212  	}
   213  
   214  	// POPULATE FROM TRANSIENT STORE
   215  	pdp.logger.Debugf("Could not find all collection private write sets in cache for block [%d]", pdp.blockNum)
   216  	pdp.logger.Debugf("Fetching %d collection private write sets from transient store", len(pvtdataRetrievalInfo.eligibleMissingKeys))
   217  	pdp.populateFromTransientStore(pvtdata, pvtdataRetrievalInfo)
   218  	if len(pvtdataRetrievalInfo.eligibleMissingKeys) == 0 {
   219  		pdp.logger.Debug("No missing collection private write sets to fetch from remote peers")
   220  		retrievedPvtdata.pvtdataRetrievalInfo = pvtdataRetrievalInfo
   221  		retrievedPvtdata.blockPvtdata = pdp.prepareBlockPvtdata(pvtdata, pvtdataRetrievalInfo)
   222  		return retrievedPvtdata, nil
   223  	}
   224  
   225  	// POPULATE FROM REMOTE PEERS
   226  	retryThresh := pdp.pullRetryThreshold
   227  	pdp.logger.Debugf("Could not find all collection private write sets in local peer transient store for block [%d]", pdp.blockNum)
   228  	pdp.logger.Debugf("Fetching %d collection private write sets from remote peers for a maximum duration of %s", len(pvtdataRetrievalInfo.eligibleMissingKeys), retryThresh)
   229  	startPull := time.Now()
   230  	for len(pvtdataRetrievalInfo.eligibleMissingKeys) > 0 && time.Since(startPull) < retryThresh {
   231  		if needToRetry := pdp.populateFromRemotePeers(pvtdata, pvtdataRetrievalInfo); !needToRetry {
   232  			break
   233  		}
   234  		// If there are still missing keys, sleep before retry
   235  		pdp.sleeper.Sleep(pullRetrySleepInterval)
   236  	}
   237  	elapsedPull := int64(time.Since(startPull) / time.Millisecond) // duration in ms
   238  	pdp.fetchDurationHistogram.Observe(time.Since(startPull).Seconds())
   239  
   240  	if len(pvtdataRetrievalInfo.eligibleMissingKeys) == 0 {
   241  		pdp.logger.Debugf("Fetched all missing collection private write sets from remote peers for block [%d] (%dms)", pdp.blockNum, elapsedPull)
   242  	} else {
   243  		pdp.logger.Debugf("Could not fetch all missing collection private write sets from remote peers for block [%d]",
   244  			pdp.blockNum)
   245  	}
   246  
   247  	retrievedPvtdata.pvtdataRetrievalInfo = pvtdataRetrievalInfo
   248  	retrievedPvtdata.blockPvtdata = pdp.prepareBlockPvtdata(pvtdata, pvtdataRetrievalInfo)
   249  	return retrievedPvtdata, nil
   250  }
   251  
   252  // populateFromCache populates pvtdata with data fetched from cache and updates
   253  // pvtdataRetrievalInfo by removing missing data that was fetched from cache
   254  func (pdp *PvtdataProvider) populateFromCache(pvtdata rwsetByKeys, pvtdataRetrievalInfo *pvtdataRetrievalInfo, pvtdataToRetrieve []*ledger.TxPvtdataInfo) {
   255  	pdp.logger.Debugf("Attempting to retrieve %d private write sets from cache.", len(pvtdataRetrievalInfo.eligibleMissingKeys))
   256  
   257  	for _, txPvtdata := range pdp.prefetchedPvtdata {
   258  		txID := getTxIDBySeqInBlock(txPvtdata.SeqInBlock, pvtdataToRetrieve)
   259  		// if can't match txID from query, then the data was never requested so skip the entire tx
   260  		if txID == "" {
   261  			pdp.logger.Warningf("Found extra data in prefetched at sequence [%d]. Skipping.", txPvtdata.SeqInBlock)
   262  			continue
   263  		}
   264  		for _, ns := range txPvtdata.WriteSet.NsPvtRwset {
   265  			for _, col := range ns.CollectionPvtRwset {
   266  				key := rwSetKey{
   267  					txID:       txID,
   268  					seqInBlock: txPvtdata.SeqInBlock,
   269  					collection: col.CollectionName,
   270  					namespace:  ns.Namespace,
   271  					hash:       hex.EncodeToString(commonutil.ComputeSHA256(col.Rwset)),
   272  				}
   273  				// skip if key not originally missing
   274  				if _, missing := pvtdataRetrievalInfo.eligibleMissingKeys[key]; !missing {
   275  					pdp.logger.Warningf("Found extra data in prefetched:[%v]. Skipping.", key)
   276  					continue
   277  				}
   278  				// populate the pvtdata with the RW set from the cache
   279  				pvtdata[key] = col.Rwset
   280  				// remove key from missing
   281  				delete(pvtdataRetrievalInfo.eligibleMissingKeys, key)
   282  			} // iterate over collections in the namespace
   283  		} // iterate over the namespaces in the WSet
   284  	} // iterate over cached private data in the block
   285  }
   286  
   287  // populateFromTransientStore populates pvtdata with data fetched from transient store
   288  // and updates pvtdataRetrievalInfo by removing missing data that was fetched from transient store
   289  func (pdp *PvtdataProvider) populateFromTransientStore(pvtdata rwsetByKeys, pvtdataRetrievalInfo *pvtdataRetrievalInfo) {
   290  	pdp.logger.Debugf("Attempting to retrieve %d private write sets from transient store.", len(pvtdataRetrievalInfo.eligibleMissingKeys))
   291  
   292  	// Put into pvtdata RW sets that are missing and found in the transient store
   293  	for k := range pvtdataRetrievalInfo.eligibleMissingKeys {
   294  		filter := ledger.NewPvtNsCollFilter()
   295  		filter.Add(k.namespace, k.collection)
   296  		iterator, err := pdp.transientStore.GetTxPvtRWSetByTxid(k.txID, filter)
   297  		if err != nil {
   298  			pdp.logger.Warningf("Failed fetching private data from transient store: Failed obtaining iterator from transient store: %s", err)
   299  			return
   300  		}
   301  		defer iterator.Close()
   302  		for {
   303  			res, err := iterator.Next()
   304  			if err != nil {
   305  				pdp.logger.Warningf("Failed fetching private data from transient store: Failed iterating over transient store data: %s", err)
   306  				return
   307  			}
   308  			if res == nil {
   309  				// End of iteration
   310  				break
   311  			}
   312  			if res.PvtSimulationResultsWithConfig == nil {
   313  				pdp.logger.Warningf("Resultset's PvtSimulationResultsWithConfig for txID [%s] is nil. Skipping.", k.txID)
   314  				continue
   315  			}
   316  			simRes := res.PvtSimulationResultsWithConfig
   317  			if simRes.PvtRwset == nil {
   318  				pdp.logger.Warningf("The PvtRwset of PvtSimulationResultsWithConfig for txID [%s] is nil. Skipping.", k.txID)
   319  				continue
   320  			}
   321  			for _, ns := range simRes.PvtRwset.NsPvtRwset {
   322  				for _, col := range ns.CollectionPvtRwset {
   323  					key := rwSetKey{
   324  						txID:       k.txID,
   325  						seqInBlock: k.seqInBlock,
   326  						collection: col.CollectionName,
   327  						namespace:  ns.Namespace,
   328  						hash:       hex.EncodeToString(commonutil.ComputeSHA256(col.Rwset)),
   329  					}
   330  					// skip if not missing
   331  					if _, missing := pvtdataRetrievalInfo.eligibleMissingKeys[key]; !missing {
   332  						continue
   333  					}
   334  					// populate the pvtdata with the RW set from the transient store
   335  					pvtdata[key] = col.Rwset
   336  					// remove key from missing
   337  					delete(pvtdataRetrievalInfo.eligibleMissingKeys, key)
   338  				} // iterating over all collections
   339  			} // iterating over all namespaces
   340  		} // iterating over the TxPvtRWSet results
   341  	}
   342  }
   343  
   344  // populateFromRemotePeers populates pvtdata with data fetched from remote peers and updates
   345  // pvtdataRetrievalInfo by removing missing data that was fetched from remote peers
   346  func (pdp *PvtdataProvider) populateFromRemotePeers(pvtdata rwsetByKeys, pvtdataRetrievalInfo *pvtdataRetrievalInfo) bool {
   347  	pdp.logger.Debugf("Attempting to retrieve %d private write sets from remote peers.", len(pvtdataRetrievalInfo.eligibleMissingKeys))
   348  
   349  	dig2src := make(map[pvtdatacommon.DigKey][]*peer.Endorsement)
   350  	var skipped int
   351  	for k, v := range pvtdataRetrievalInfo.eligibleMissingKeys {
   352  		if v.invalid && pdp.skipPullingInvalidTransactions {
   353  			pdp.logger.Debugf("Skipping invalid key [%v] because peer is configured to skip pulling rwsets of invalid transactions.", k)
   354  			skipped++
   355  			continue
   356  		}
   357  		pdp.logger.Debugf("Fetching [%v] from remote peers", k)
   358  		dig := pvtdatacommon.DigKey{
   359  			TxId:       k.txID,
   360  			SeqInBlock: k.seqInBlock,
   361  			Collection: k.collection,
   362  			Namespace:  k.namespace,
   363  			BlockSeq:   pdp.blockNum,
   364  		}
   365  		dig2src[dig] = pvtdataRetrievalInfo.sources[k]
   366  	}
   367  
   368  	if len(dig2src) == 0 {
   369  		return false
   370  	}
   371  
   372  	fetchedData, err := pdp.fetcher.fetch(dig2src)
   373  	if err != nil {
   374  		pdp.logger.Warningf("Failed fetching private data from remote peers for dig2src:[%v], err: %s", dig2src, err)
   375  		return true
   376  	}
   377  
   378  	// Iterate over data fetched from remote peers
   379  	for _, element := range fetchedData.AvailableElements {
   380  		dig := element.Digest
   381  		for _, rws := range element.Payload {
   382  			key := rwSetKey{
   383  				txID:       dig.TxId,
   384  				namespace:  dig.Namespace,
   385  				collection: dig.Collection,
   386  				seqInBlock: dig.SeqInBlock,
   387  				hash:       hex.EncodeToString(commonutil.ComputeSHA256(rws)),
   388  			}
   389  			// skip if not missing
   390  			if _, missing := pvtdataRetrievalInfo.eligibleMissingKeys[key]; !missing {
   391  				// key isn't missing and was never fetched earlier, log that it wasn't originally requested
   392  				if _, exists := pvtdata[key]; !exists {
   393  					pdp.logger.Debugf("Ignoring [%v] because it was never requested.", key)
   394  				}
   395  				continue
   396  			}
   397  			// populate the pvtdata with the RW set from the remote peer
   398  			pvtdata[key] = rws
   399  			// remove key from missing
   400  			delete(pvtdataRetrievalInfo.eligibleMissingKeys, key)
   401  			pdp.logger.Debugf("Fetched [%v]", key)
   402  		}
   403  	}
   404  	// Iterate over purged data
   405  	for _, dig := range fetchedData.PurgedElements {
   406  		// delete purged key from missing keys
   407  		for missingPvtRWKey := range pvtdataRetrievalInfo.eligibleMissingKeys {
   408  			if missingPvtRWKey.namespace == dig.Namespace &&
   409  				missingPvtRWKey.collection == dig.Collection &&
   410  				missingPvtRWKey.seqInBlock == dig.SeqInBlock &&
   411  				missingPvtRWKey.txID == dig.TxId {
   412  				delete(pvtdataRetrievalInfo.eligibleMissingKeys, missingPvtRWKey)
   413  				pdp.logger.Warningf("Missing key because was purged or will soon be purged, "+
   414  					"continue block commit without [%+v] in private rwset", missingPvtRWKey)
   415  			}
   416  		}
   417  	}
   418  
   419  	return len(pvtdataRetrievalInfo.eligibleMissingKeys) > skipped
   420  }
   421  
   422  // prepareBlockPvtdata consolidates the fetched private data as well as ineligible and eligible
   423  // missing private data into a ledger.BlockPvtdata for the PvtdataProvider to return to the consumer
   424  func (pdp *PvtdataProvider) prepareBlockPvtdata(pvtdata rwsetByKeys, pvtdataRetrievalInfo *pvtdataRetrievalInfo) *ledger.BlockPvtdata {
   425  	blockPvtdata := &ledger.BlockPvtdata{
   426  		PvtData:        make(ledger.TxPvtDataMap),
   427  		MissingPvtData: make(ledger.TxMissingPvtDataMap),
   428  	}
   429  
   430  	if len(pvtdataRetrievalInfo.eligibleMissingKeys) == 0 {
   431  		pdp.logger.Infof("Successfully fetched all eligible collection private write sets for block [%d]", pdp.blockNum)
   432  	} else {
   433  		pdp.logger.Warningf("Could not fetch all missing eligible collection private write sets for block [%d]. Will commit block with missing private write sets:[%v]",
   434  			pdp.blockNum, pvtdataRetrievalInfo.eligibleMissingKeys)
   435  	}
   436  
   437  	for seqInBlock, nsRWS := range pvtdata.bySeqsInBlock() {
   438  		// add all found pvtdata to blockPvtDataPvtdata for seqInBlock
   439  		blockPvtdata.PvtData[seqInBlock] = &ledger.TxPvtData{
   440  			SeqInBlock: seqInBlock,
   441  			WriteSet:   nsRWS.toRWSet(),
   442  		}
   443  	}
   444  
   445  	for key := range pvtdataRetrievalInfo.eligibleMissingKeys {
   446  		blockPvtdata.MissingPvtData.Add(key.seqInBlock, key.namespace, key.collection, true)
   447  	}
   448  
   449  	for key := range pvtdataRetrievalInfo.ineligibleMissingKeys {
   450  		blockPvtdata.MissingPvtData.Add(key.seqInBlock, key.namespace, key.collection, false)
   451  	}
   452  
   453  	return blockPvtdata
   454  }
   455  
   456  type pvtdataRetrievalInfo struct {
   457  	sources               map[rwSetKey][]*peer.Endorsement
   458  	txns                  []string
   459  	eligibleMissingKeys   rwsetKeys
   460  	ineligibleMissingKeys rwsetKeys
   461  }
   462  
   463  // rwset types
   464  
   465  type readWriteSets []*readWriteSet
   466  
   467  func (s readWriteSets) toRWSet() *rwset.TxPvtReadWriteSet {
   468  	namespaces := make(map[string]*rwset.NsPvtReadWriteSet)
   469  	dataModel := rwset.TxReadWriteSet_KV
   470  	for _, rws := range s {
   471  		if _, exists := namespaces[rws.namespace]; !exists {
   472  			namespaces[rws.namespace] = &rwset.NsPvtReadWriteSet{
   473  				Namespace: rws.namespace,
   474  			}
   475  		}
   476  		col := &rwset.CollectionPvtReadWriteSet{
   477  			CollectionName: rws.collection,
   478  			Rwset:          rws.rws,
   479  		}
   480  		namespaces[rws.namespace].CollectionPvtRwset = append(namespaces[rws.namespace].CollectionPvtRwset, col)
   481  	}
   482  
   483  	var namespaceSlice []*rwset.NsPvtReadWriteSet
   484  	for _, nsRWset := range namespaces {
   485  		namespaceSlice = append(namespaceSlice, nsRWset)
   486  	}
   487  
   488  	return &rwset.TxPvtReadWriteSet{
   489  		DataModel:  dataModel,
   490  		NsPvtRwset: namespaceSlice,
   491  	}
   492  }
   493  
   494  type readWriteSet struct {
   495  	rwSetKey
   496  	rws []byte
   497  }
   498  
   499  type rwsetByKeys map[rwSetKey][]byte
   500  
   501  func (s rwsetByKeys) bySeqsInBlock() map[uint64]readWriteSets {
   502  	res := make(map[uint64]readWriteSets)
   503  	for k, rws := range s {
   504  		res[k.seqInBlock] = append(res[k.seqInBlock], &readWriteSet{
   505  			rws:      rws,
   506  			rwSetKey: k,
   507  		})
   508  	}
   509  	return res
   510  }
   511  
   512  type rwsetInfo struct {
   513  	invalid bool
   514  }
   515  
   516  type rwsetKeys map[rwSetKey]rwsetInfo
   517  
   518  // String returns a string representation of the rwsetKeys
   519  func (s rwsetKeys) String() string {
   520  	var buffer bytes.Buffer
   521  	for k := range s {
   522  		buffer.WriteString(fmt.Sprintf("%s\n", k.String()))
   523  	}
   524  	return buffer.String()
   525  }
   526  
   527  type rwSetKey struct {
   528  	txID       string
   529  	seqInBlock uint64
   530  	namespace  string
   531  	collection string
   532  	hash       string
   533  }
   534  
   535  // String returns a string representation of the rwSetKey
   536  func (k *rwSetKey) String() string {
   537  	return fmt.Sprintf("txID: %s, seq: %d, namespace: %s, collection: %s, hash: %s", k.txID, k.seqInBlock, k.namespace, k.collection, k.hash)
   538  }
   539  
   540  func getTxIDBySeqInBlock(seqInBlock uint64, pvtdataToRetrieve []*ledger.TxPvtdataInfo) string {
   541  	for _, txPvtdataItem := range pvtdataToRetrieve {
   542  		if txPvtdataItem.SeqInBlock == seqInBlock {
   543  			return txPvtdataItem.TxID
   544  		}
   545  	}
   546  
   547  	return ""
   548  }
   549  
   550  func endorsersFromEligibleOrgs(ns string, col string, endorsers []*peer.Endorsement, orgs map[string]struct{}) []*peer.Endorsement {
   551  	var res []*peer.Endorsement
   552  	for _, e := range endorsers {
   553  		sID := &msp.SerializedIdentity{}
   554  		err := proto.Unmarshal(e.Endorser, sID)
   555  		if err != nil {
   556  			logger.Warning("Failed unmarshalling endorser:", err)
   557  			continue
   558  		}
   559  		if _, ok := orgs[sID.Mspid]; !ok {
   560  			logger.Debug(sID.Mspid, "isn't among the collection's orgs:", orgs, "for namespace", ns, ",collection", col)
   561  			continue
   562  		}
   563  		res = append(res, e)
   564  	}
   565  	return res
   566  }