github.com/yimialmonte/fabric@v2.1.1+incompatible/gossip/privdata/coordinator.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package privdata
     8  
     9  import (
    10  	"time"
    11  
    12  	"github.com/hyperledger/fabric-protos-go/common"
    13  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    14  	"github.com/hyperledger/fabric-protos-go/peer"
    15  	protostransientstore "github.com/hyperledger/fabric-protos-go/transientstore"
    16  	"github.com/hyperledger/fabric/common/channelconfig"
    17  	"github.com/hyperledger/fabric/core/committer"
    18  	"github.com/hyperledger/fabric/core/committer/txvalidator"
    19  	"github.com/hyperledger/fabric/core/common/privdata"
    20  	"github.com/hyperledger/fabric/core/ledger"
    21  	"github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
    22  	"github.com/hyperledger/fabric/core/transientstore"
    23  	"github.com/hyperledger/fabric/gossip/metrics"
    24  	privdatacommon "github.com/hyperledger/fabric/gossip/privdata/common"
    25  	"github.com/hyperledger/fabric/gossip/util"
    26  	"github.com/hyperledger/fabric/protoutil"
    27  	"github.com/pkg/errors"
    28  )
    29  
    30  const pullRetrySleepInterval = time.Second
    31  
    32  var logger = util.GetLogger(util.PrivateDataLogger, "")
    33  
    34  //go:generate mockery -dir . -name CollectionStore -case underscore -output mocks/
    35  
    36  // CollectionStore is the local interface used to generate mocks for foreign interface.
    37  type CollectionStore interface {
    38  	privdata.CollectionStore
    39  }
    40  
    41  //go:generate mockery -dir . -name Committer -case underscore -output mocks/
    42  
    43  // Committer is the local interface used to generate mocks for foreign interface.
    44  type Committer interface {
    45  	committer.Committer
    46  }
    47  
    48  // Coordinator orchestrates the flow of the new
    49  // blocks arrival and in flight transient data, responsible
    50  // to complete missing parts of transient data for given block.
    51  type Coordinator interface {
    52  	// StoreBlock deliver new block with underlined private data
    53  	// returns missing transaction ids
    54  	StoreBlock(block *common.Block, data util.PvtDataCollections) error
    55  
    56  	// StorePvtData used to persist private data into transient store
    57  	StorePvtData(txid string, privData *protostransientstore.TxPvtReadWriteSetWithConfigInfo, blckHeight uint64) error
    58  
    59  	// GetPvtDataAndBlockByNum gets block by number and also returns all related private data
    60  	// that requesting peer is eligible for.
    61  	// The order of private data in slice of PvtDataCollections doesn't imply the order of
    62  	// transactions in the block related to these private data, to get the correct placement
    63  	// need to read TxPvtData.SeqInBlock field
    64  	GetPvtDataAndBlockByNum(seqNum uint64, peerAuth protoutil.SignedData) (*common.Block, util.PvtDataCollections, error)
    65  
    66  	// Get recent block sequence number
    67  	LedgerHeight() (uint64, error)
    68  
    69  	// Close coordinator, shuts down coordinator service
    70  	Close()
    71  }
    72  
    73  type dig2sources map[privdatacommon.DigKey][]*peer.Endorsement
    74  
    75  func (d2s dig2sources) keys() []privdatacommon.DigKey {
    76  	res := make([]privdatacommon.DigKey, 0, len(d2s))
    77  	for dig := range d2s {
    78  		res = append(res, dig)
    79  	}
    80  	return res
    81  }
    82  
    83  // Fetcher interface which defines API to fetch missing
    84  // private data elements
    85  type Fetcher interface {
    86  	fetch(dig2src dig2sources) (*privdatacommon.FetchedPvtDataContainer, error)
    87  }
    88  
    89  //go:generate mockery -dir ./ -name CapabilityProvider -case underscore -output mocks/
    90  
    91  // CapabilityProvider contains functions to retrieve capability information for a channel
    92  type CapabilityProvider interface {
    93  	// Capabilities defines the capabilities for the application portion of this channel
    94  	Capabilities() channelconfig.ApplicationCapabilities
    95  }
    96  
    97  // Support encapsulates set of interfaces to
    98  // aggregate required functionality by single struct
    99  type Support struct {
   100  	ChainID string
   101  	privdata.CollectionStore
   102  	txvalidator.Validator
   103  	committer.Committer
   104  	Fetcher
   105  	CapabilityProvider
   106  }
   107  
   108  // CoordinatorConfig encapsulates the config that is passed to a new coordinator
   109  type CoordinatorConfig struct {
   110  	// TransientBlockRetention indicates the number of blocks to retain in the transient store
   111  	// when purging below height on commiting every TransientBlockRetention-th block
   112  	TransientBlockRetention uint64
   113  	// PullRetryThreshold indicates the max duration an attempted fetch from a remote peer will retry
   114  	// for before giving up and leaving the private data as missing
   115  	PullRetryThreshold time.Duration
   116  	// SkipPullingInvalidTransactions if true will skip the fetch from remote peer step for transactions
   117  	// marked as invalid
   118  	SkipPullingInvalidTransactions bool
   119  }
   120  
   121  type coordinator struct {
   122  	mspID          string
   123  	selfSignedData protoutil.SignedData
   124  	Support
   125  	store                          *transientstore.Store
   126  	transientBlockRetention        uint64
   127  	metrics                        *metrics.PrivdataMetrics
   128  	pullRetryThreshold             time.Duration
   129  	skipPullingInvalidTransactions bool
   130  	idDeserializerFactory          IdentityDeserializerFactory
   131  }
   132  
   133  // NewCoordinator creates a new instance of coordinator
   134  func NewCoordinator(mspID string, support Support, store *transientstore.Store, selfSignedData protoutil.SignedData, metrics *metrics.PrivdataMetrics,
   135  	config CoordinatorConfig, idDeserializerFactory IdentityDeserializerFactory) Coordinator {
   136  	return &coordinator{Support: support,
   137  		mspID:                          mspID,
   138  		store:                          store,
   139  		selfSignedData:                 selfSignedData,
   140  		transientBlockRetention:        config.TransientBlockRetention,
   141  		metrics:                        metrics,
   142  		pullRetryThreshold:             config.PullRetryThreshold,
   143  		skipPullingInvalidTransactions: config.SkipPullingInvalidTransactions,
   144  		idDeserializerFactory:          idDeserializerFactory,
   145  	}
   146  }
   147  
   148  // StoreBlock stores block with private data into the ledger
   149  func (c *coordinator) StoreBlock(block *common.Block, privateDataSets util.PvtDataCollections) error {
   150  	if block.Data == nil {
   151  		return errors.New("Block data is empty")
   152  	}
   153  	if block.Header == nil {
   154  		return errors.New("Block header is nil")
   155  	}
   156  
   157  	logger.Infof("[%s] Received block [%d] from buffer", c.ChainID, block.Header.Number)
   158  
   159  	logger.Debugf("[%s] Validating block [%d]", c.ChainID, block.Header.Number)
   160  
   161  	validationStart := time.Now()
   162  	err := c.Validator.Validate(block)
   163  	c.reportValidationDuration(time.Since(validationStart))
   164  	if err != nil {
   165  		logger.Errorf("Validation failed: %+v", err)
   166  		return err
   167  	}
   168  
   169  	blockAndPvtData := &ledger.BlockAndPvtData{
   170  		Block:          block,
   171  		PvtData:        make(ledger.TxPvtDataMap),
   172  		MissingPvtData: make(ledger.TxMissingPvtDataMap),
   173  	}
   174  
   175  	exist, err := c.DoesPvtDataInfoExistInLedger(block.Header.Number)
   176  	if err != nil {
   177  		return err
   178  	}
   179  	if exist {
   180  		commitOpts := &ledger.CommitOptions{FetchPvtDataFromLedger: true}
   181  		return c.CommitLegacy(blockAndPvtData, commitOpts)
   182  	}
   183  
   184  	listMissingPrivateDataDurationHistogram := c.metrics.ListMissingPrivateDataDuration.With("channel", c.ChainID)
   185  	fetchDurationHistogram := c.metrics.FetchDuration.With("channel", c.ChainID)
   186  	purgeDurationHistogram := c.metrics.PurgeDuration.With("channel", c.ChainID)
   187  	pdp := &PvtdataProvider{
   188  		mspID:                                   c.mspID,
   189  		selfSignedData:                          c.selfSignedData,
   190  		logger:                                  logger.With("channel", c.ChainID),
   191  		listMissingPrivateDataDurationHistogram: listMissingPrivateDataDurationHistogram,
   192  		fetchDurationHistogram:                  fetchDurationHistogram,
   193  		purgeDurationHistogram:                  purgeDurationHistogram,
   194  		transientStore:                          c.store,
   195  		pullRetryThreshold:                      c.pullRetryThreshold,
   196  		prefetchedPvtdata:                       privateDataSets,
   197  		transientBlockRetention:                 c.transientBlockRetention,
   198  		channelID:                               c.ChainID,
   199  		blockNum:                                block.Header.Number,
   200  		storePvtdataOfInvalidTx:                 c.Support.CapabilityProvider.Capabilities().StorePvtDataOfInvalidTx(),
   201  		skipPullingInvalidTransactions:          c.skipPullingInvalidTransactions,
   202  		fetcher:                                 c.Fetcher,
   203  		idDeserializerFactory:                   c.idDeserializerFactory,
   204  	}
   205  	pvtdataToRetrieve, err := c.getTxPvtdataInfoFromBlock(block)
   206  	if err != nil {
   207  		logger.Warningf("Failed to get private data info from block: %s", err)
   208  		return err
   209  	}
   210  
   211  	// Retrieve the private data.
   212  	// RetrievePvtdata checks this peer's eligibility and then retreives from cache, transient store, or from a remote peer.
   213  	retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve)
   214  	if err != nil {
   215  		logger.Warningf("Failed to retrieve pvtdata: %s", err)
   216  		return err
   217  	}
   218  
   219  	blockAndPvtData.PvtData = retrievedPvtdata.blockPvtdata.PvtData
   220  	blockAndPvtData.MissingPvtData = retrievedPvtdata.blockPvtdata.MissingPvtData
   221  
   222  	// commit block and private data
   223  	commitStart := time.Now()
   224  	err = c.CommitLegacy(blockAndPvtData, &ledger.CommitOptions{})
   225  	c.reportCommitDuration(time.Since(commitStart))
   226  	if err != nil {
   227  		return errors.Wrap(err, "commit failed")
   228  	}
   229  
   230  	// Purge transactions
   231  	retrievedPvtdata.Purge()
   232  
   233  	return nil
   234  }
   235  
   236  // StorePvtData used to persist private date into transient store
   237  func (c *coordinator) StorePvtData(txID string, privData *protostransientstore.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
   238  	return c.store.Persist(txID, blkHeight, privData)
   239  }
   240  
   241  // GetPvtDataAndBlockByNum gets block by number and also returns all related private data
   242  // that requesting peer is eligible for.
   243  // The order of private data in slice of PvtDataCollections doesn't imply the order of
   244  // transactions in the block related to these private data, to get the correct placement
   245  // need to read TxPvtData.SeqInBlock field
   246  func (c *coordinator) GetPvtDataAndBlockByNum(seqNum uint64, peerAuthInfo protoutil.SignedData) (*common.Block, util.PvtDataCollections, error) {
   247  	blockAndPvtData, err := c.Committer.GetPvtDataAndBlockByNum(seqNum)
   248  	if err != nil {
   249  		return nil, nil, err
   250  	}
   251  
   252  	seqs2Namespaces := aggregatedCollections{}
   253  	for seqInBlock := range blockAndPvtData.Block.Data.Data {
   254  		txPvtDataItem, exists := blockAndPvtData.PvtData[uint64(seqInBlock)]
   255  		if !exists {
   256  			continue
   257  		}
   258  
   259  		// Iterate through the private write sets and include them in response if requesting peer is eligible for it
   260  		for _, ns := range txPvtDataItem.WriteSet.NsPvtRwset {
   261  			for _, col := range ns.CollectionPvtRwset {
   262  				cc := privdata.CollectionCriteria{
   263  					Channel:    c.ChainID,
   264  					Namespace:  ns.Namespace,
   265  					Collection: col.CollectionName,
   266  				}
   267  				sp, err := c.CollectionStore.RetrieveCollectionAccessPolicy(cc)
   268  				if err != nil {
   269  					logger.Warningf("Failed obtaining policy for collection criteria [%#v]: %s", cc, err)
   270  					continue
   271  				}
   272  				isAuthorized := sp.AccessFilter()
   273  				if isAuthorized == nil {
   274  					logger.Warningf("Failed obtaining filter for collection criteria [%#v]", cc)
   275  					continue
   276  				}
   277  				if !isAuthorized(peerAuthInfo) {
   278  					logger.Debugf("Skipping collection criteria [%#v] because peer isn't authorized", cc)
   279  					continue
   280  				}
   281  				seqs2Namespaces.addCollection(uint64(seqInBlock), txPvtDataItem.WriteSet.DataModel, ns.Namespace, col)
   282  			}
   283  		}
   284  	}
   285  
   286  	return blockAndPvtData.Block, seqs2Namespaces.asPrivateData(), nil
   287  }
   288  
   289  // getTxPvtdataInfoFromBlock parses the block transactions and returns the list of private data items in the block.
   290  // Note that this peer's eligibility for the private data is not checked here.
   291  func (c *coordinator) getTxPvtdataInfoFromBlock(block *common.Block) ([]*ledger.TxPvtdataInfo, error) {
   292  	txPvtdataItemsFromBlock := []*ledger.TxPvtdataInfo{}
   293  
   294  	if block.Metadata == nil || len(block.Metadata.Metadata) <= int(common.BlockMetadataIndex_TRANSACTIONS_FILTER) {
   295  		return nil, errors.New("Block.Metadata is nil or Block.Metadata lacks a Tx filter bitmap")
   296  	}
   297  	txsFilter := txValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
   298  	data := block.Data.Data
   299  	if len(txsFilter) != len(block.Data.Data) {
   300  		return nil, errors.Errorf("block data size(%d) is different from Tx filter size(%d)", len(block.Data.Data), len(txsFilter))
   301  	}
   302  
   303  	for seqInBlock, txEnvBytes := range data {
   304  		invalid := txsFilter[seqInBlock] != uint8(peer.TxValidationCode_VALID)
   305  		txInfo, err := getTxInfoFromTransactionBytes(txEnvBytes)
   306  		if err != nil {
   307  			continue
   308  		}
   309  
   310  		colPvtdataInfo := []*ledger.CollectionPvtdataInfo{}
   311  		for _, ns := range txInfo.txRWSet.NsRwSets {
   312  			for _, hashedCollection := range ns.CollHashedRwSets {
   313  				// skip if no writes
   314  				if !containsWrites(txInfo.txID, ns.NameSpace, hashedCollection) {
   315  					continue
   316  				}
   317  				cc := privdata.CollectionCriteria{
   318  					Channel:    txInfo.channelID,
   319  					Namespace:  ns.NameSpace,
   320  					Collection: hashedCollection.CollectionName,
   321  				}
   322  
   323  				colConfig, err := c.CollectionStore.RetrieveCollectionConfig(cc)
   324  				if err != nil {
   325  					logger.Warningf("Failed to retrieve collection config for collection criteria [%#v]: %s", cc, err)
   326  					return nil, err
   327  				}
   328  				col := &ledger.CollectionPvtdataInfo{
   329  					Namespace:        ns.NameSpace,
   330  					Collection:       hashedCollection.CollectionName,
   331  					ExpectedHash:     hashedCollection.PvtRwSetHash,
   332  					CollectionConfig: colConfig,
   333  					Endorsers:        txInfo.endorsements,
   334  				}
   335  				colPvtdataInfo = append(colPvtdataInfo, col)
   336  			}
   337  		}
   338  		txPvtdataToRetrieve := &ledger.TxPvtdataInfo{
   339  			TxID:                  txInfo.txID,
   340  			Invalid:               invalid,
   341  			SeqInBlock:            uint64(seqInBlock),
   342  			CollectionPvtdataInfo: colPvtdataInfo,
   343  		}
   344  		txPvtdataItemsFromBlock = append(txPvtdataItemsFromBlock, txPvtdataToRetrieve)
   345  	}
   346  
   347  	return txPvtdataItemsFromBlock, nil
   348  }
   349  
   350  func (c *coordinator) reportValidationDuration(time time.Duration) {
   351  	c.metrics.ValidationDuration.With("channel", c.ChainID).Observe(time.Seconds())
   352  }
   353  
   354  func (c *coordinator) reportCommitDuration(time time.Duration) {
   355  	c.metrics.CommitPrivateDataDuration.With("channel", c.ChainID).Observe(time.Seconds())
   356  }
   357  
   358  type seqAndDataModel struct {
   359  	seq       uint64
   360  	dataModel rwset.TxReadWriteSet_DataModel
   361  }
   362  
   363  // map from seqAndDataModel to:
   364  //     map from namespace to []*rwset.CollectionPvtReadWriteSet
   365  type aggregatedCollections map[seqAndDataModel]map[string][]*rwset.CollectionPvtReadWriteSet
   366  
   367  func (ac aggregatedCollections) addCollection(seqInBlock uint64, dm rwset.TxReadWriteSet_DataModel, namespace string, col *rwset.CollectionPvtReadWriteSet) {
   368  	seq := seqAndDataModel{
   369  		dataModel: dm,
   370  		seq:       seqInBlock,
   371  	}
   372  	if _, exists := ac[seq]; !exists {
   373  		ac[seq] = make(map[string][]*rwset.CollectionPvtReadWriteSet)
   374  	}
   375  	ac[seq][namespace] = append(ac[seq][namespace], col)
   376  }
   377  
   378  func (ac aggregatedCollections) asPrivateData() []*ledger.TxPvtData {
   379  	var data []*ledger.TxPvtData
   380  	for seq, ns := range ac {
   381  		txPrivateData := &ledger.TxPvtData{
   382  			SeqInBlock: seq.seq,
   383  			WriteSet: &rwset.TxPvtReadWriteSet{
   384  				DataModel: seq.dataModel,
   385  			},
   386  		}
   387  		for namespaceName, cols := range ns {
   388  			txPrivateData.WriteSet.NsPvtRwset = append(txPrivateData.WriteSet.NsPvtRwset, &rwset.NsPvtReadWriteSet{
   389  				Namespace:          namespaceName,
   390  				CollectionPvtRwset: cols,
   391  			})
   392  		}
   393  		data = append(data, txPrivateData)
   394  	}
   395  	return data
   396  }
   397  
   398  type txInfo struct {
   399  	channelID    string
   400  	txID         string
   401  	endorsements []*peer.Endorsement
   402  	txRWSet      *rwsetutil.TxRwSet
   403  }
   404  
   405  // getTxInfoFromTransactionBytes parses a transaction and returns info required for private data retrieval
   406  func getTxInfoFromTransactionBytes(envBytes []byte) (*txInfo, error) {
   407  	txInfo := &txInfo{}
   408  	env, err := protoutil.GetEnvelopeFromBlock(envBytes)
   409  	if err != nil {
   410  		logger.Warningf("Invalid envelope: %s", err)
   411  		return nil, err
   412  	}
   413  
   414  	payload, err := protoutil.UnmarshalPayload(env.Payload)
   415  	if err != nil {
   416  		logger.Warningf("Invalid payload: %s", err)
   417  		return nil, err
   418  	}
   419  	if payload.Header == nil {
   420  		err := errors.New("payload header is nil")
   421  		logger.Warningf("Invalid tx: %s", err)
   422  		return nil, err
   423  	}
   424  
   425  	chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader)
   426  	if err != nil {
   427  		logger.Warningf("Invalid channel header: %s", err)
   428  		return nil, err
   429  	}
   430  	txInfo.channelID = chdr.ChannelId
   431  	txInfo.txID = chdr.TxId
   432  
   433  	if chdr.Type != int32(common.HeaderType_ENDORSER_TRANSACTION) {
   434  		err := errors.New("header type is not an endorser transaction")
   435  		logger.Warningf("Invalid transaction type: %s", err)
   436  		return nil, err
   437  	}
   438  
   439  	respPayload, err := protoutil.GetActionFromEnvelope(envBytes)
   440  	if err != nil {
   441  		logger.Warningf("Failed obtaining action from envelope: %s", err)
   442  		return nil, err
   443  	}
   444  
   445  	tx, err := protoutil.UnmarshalTransaction(payload.Data)
   446  	if err != nil {
   447  		logger.Warningf("Invalid transaction in payload data for tx [%s]: %s", chdr.TxId, err)
   448  		return nil, err
   449  	}
   450  
   451  	ccActionPayload, err := protoutil.UnmarshalChaincodeActionPayload(tx.Actions[0].Payload)
   452  	if err != nil {
   453  		logger.Warningf("Invalid chaincode action in payload for tx [%s]: %s", chdr.TxId, err)
   454  		return nil, err
   455  	}
   456  
   457  	if ccActionPayload.Action == nil {
   458  		logger.Warningf("Action in ChaincodeActionPayload for tx [%s] is nil", chdr.TxId)
   459  		return nil, err
   460  	}
   461  	txInfo.endorsements = ccActionPayload.Action.Endorsements
   462  
   463  	txRWSet := &rwsetutil.TxRwSet{}
   464  	if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil {
   465  		logger.Warningf("Failed obtaining TxRwSet from ChaincodeAction's results: %s", err)
   466  		return nil, err
   467  	}
   468  	txInfo.txRWSet = txRWSet
   469  
   470  	return txInfo, nil
   471  }
   472  
   473  // containsWrites checks whether the given CollHashedRwSet contains writes
   474  func containsWrites(txID string, namespace string, colHashedRWSet *rwsetutil.CollHashedRwSet) bool {
   475  	if colHashedRWSet.HashedRwSet == nil {
   476  		logger.Warningf("HashedRWSet of tx [%s], namespace [%s], collection [%s] is nil", txID, namespace, colHashedRWSet.CollectionName)
   477  		return false
   478  	}
   479  	if len(colHashedRWSet.HashedRwSet.HashedWrites) == 0 && len(colHashedRWSet.HashedRwSet.MetadataWrites) == 0 {
   480  		logger.Debugf("HashedRWSet of tx [%s], namespace [%s], collection [%s] doesn't contain writes", txID, namespace, colHashedRWSet.CollectionName)
   481  		return false
   482  	}
   483  	return true
   484  }