github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/gossip/privdata/coordinator.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package privdata
     8  
     9  import (
    10  	"time"
    11  
    12  	"github.com/hyperledger/fabric-protos-go/common"
    13  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    14  	"github.com/hyperledger/fabric-protos-go/peer"
    15  	protostransientstore "github.com/hyperledger/fabric-protos-go/transientstore"
    16  	"github.com/osdi23p228/fabric/common/channelconfig"
    17  	"github.com/osdi23p228/fabric/core/committer"
    18  	"github.com/osdi23p228/fabric/core/committer/txvalidator"
    19  	"github.com/osdi23p228/fabric/core/common/privdata"
    20  	"github.com/osdi23p228/fabric/core/ledger"
    21  	"github.com/osdi23p228/fabric/core/ledger/kvledger/txmgmt/rwsetutil"
    22  	"github.com/osdi23p228/fabric/core/transientstore"
    23  	"github.com/osdi23p228/fabric/gossip/metrics"
    24  	privdatacommon "github.com/osdi23p228/fabric/gossip/privdata/common"
    25  	"github.com/osdi23p228/fabric/gossip/util"
    26  	"github.com/osdi23p228/fabric/protoutil"
    27  	"github.com/pkg/errors"
    28  )
    29  
    30  const pullRetrySleepInterval = time.Second
    31  
    32  var logger = util.GetLogger(util.PrivateDataLogger, "")
    33  
    34  //go:generate mockery -dir . -name CollectionStore -case underscore -output mocks/
    35  
    36  // CollectionStore is the local interface used to generate mocks for foreign interface.
    37  type CollectionStore interface {
    38  	privdata.CollectionStore
    39  }
    40  
    41  //go:generate mockery -dir . -name Committer -case underscore -output mocks/
    42  
    43  // Committer is the local interface used to generate mocks for foreign interface.
    44  type Committer interface {
    45  	committer.Committer
    46  }
    47  
    48  // Coordinator orchestrates the flow of the new
    49  // blocks arrival and in flight transient data, responsible
    50  // to complete missing parts of transient data for given block.
    51  type Coordinator interface {
    52  	// StoreBlock deliver new block with underlined private data
    53  	// returns missing transaction ids
    54  	StoreBlock(block *common.Block, data util.PvtDataCollections) error
    55  
    56  	// StorePvtData used to persist private data into transient store
    57  	StorePvtData(txid string, privData *protostransientstore.TxPvtReadWriteSetWithConfigInfo, blckHeight uint64) error
    58  
    59  	// GetPvtDataAndBlockByNum gets block by number and also returns all related private data
    60  	// that requesting peer is eligible for.
    61  	// The order of private data in slice of PvtDataCollections doesn't imply the order of
    62  	// transactions in the block related to these private data, to get the correct placement
    63  	// need to read TxPvtData.SeqInBlock field
    64  	GetPvtDataAndBlockByNum(seqNum uint64, peerAuth protoutil.SignedData) (*common.Block, util.PvtDataCollections, error)
    65  
    66  	// Get recent block sequence number
    67  	LedgerHeight() (uint64, error)
    68  
    69  	// Close coordinator, shuts down coordinator service
    70  	Close()
    71  }
    72  
    73  type dig2sources map[privdatacommon.DigKey][]*peer.Endorsement
    74  
    75  func (d2s dig2sources) keys() []privdatacommon.DigKey {
    76  	res := make([]privdatacommon.DigKey, 0, len(d2s))
    77  	for dig := range d2s {
    78  		res = append(res, dig)
    79  	}
    80  	return res
    81  }
    82  
    83  // Fetcher interface which defines API to fetch missing
    84  // private data elements
    85  type Fetcher interface {
    86  	fetch(dig2src dig2sources) (*privdatacommon.FetchedPvtDataContainer, error)
    87  }
    88  
    89  //go:generate mockery -dir ./ -name CapabilityProvider -case underscore -output mocks/
    90  
    91  // CapabilityProvider contains functions to retrieve capability information for a channel
    92  type CapabilityProvider interface {
    93  	// Capabilities defines the capabilities for the application portion of this channel
    94  	Capabilities() channelconfig.ApplicationCapabilities
    95  }
    96  
    97  // Support encapsulates set of interfaces to
    98  // aggregate required functionality by single struct
    99  type Support struct {
   100  	ChainID string
   101  	privdata.CollectionStore
   102  	txvalidator.Validator
   103  	committer.Committer
   104  	Fetcher
   105  	CapabilityProvider
   106  }
   107  
   108  // CoordinatorConfig encapsulates the config that is passed to a new coordinator
   109  type CoordinatorConfig struct {
   110  	// TransientBlockRetention indicates the number of blocks to retain in the transient store
   111  	// when purging below height on committing every TransientBlockRetention-th block
   112  	TransientBlockRetention uint64
   113  	// PullRetryThreshold indicates the max duration an attempted fetch from a remote peer will retry
   114  	// for before giving up and leaving the private data as missing
   115  	PullRetryThreshold time.Duration
   116  	// SkipPullingInvalidTransactions if true will skip the fetch from remote peer step for transactions
   117  	// marked as invalid
   118  	SkipPullingInvalidTransactions bool
   119  }
   120  
   121  type coordinator struct {
   122  	mspID          string
   123  	selfSignedData protoutil.SignedData
   124  	Support
   125  	store                          *transientstore.Store
   126  	transientBlockRetention        uint64
   127  	logger                         util.Logger
   128  	metrics                        *metrics.PrivdataMetrics
   129  	pullRetryThreshold             time.Duration
   130  	skipPullingInvalidTransactions bool
   131  	idDeserializerFactory          IdentityDeserializerFactory
   132  }
   133  
   134  // NewCoordinator creates a new instance of coordinator
   135  func NewCoordinator(mspID string, support Support, store *transientstore.Store, selfSignedData protoutil.SignedData, metrics *metrics.PrivdataMetrics,
   136  	config CoordinatorConfig, idDeserializerFactory IdentityDeserializerFactory) Coordinator {
   137  	return &coordinator{Support: support,
   138  		mspID:                          mspID,
   139  		store:                          store,
   140  		selfSignedData:                 selfSignedData,
   141  		transientBlockRetention:        config.TransientBlockRetention,
   142  		logger:                         logger.With("channel", support.ChainID),
   143  		metrics:                        metrics,
   144  		pullRetryThreshold:             config.PullRetryThreshold,
   145  		skipPullingInvalidTransactions: config.SkipPullingInvalidTransactions,
   146  		idDeserializerFactory:          idDeserializerFactory,
   147  	}
   148  }
   149  
   150  // StoreBlock stores block with private data into the ledger
   151  func (c *coordinator) StoreBlock(block *common.Block, privateDataSets util.PvtDataCollections) error {
   152  	if block.Data == nil {
   153  		return errors.New("Block data is empty")
   154  	}
   155  	if block.Header == nil {
   156  		return errors.New("Block header is nil")
   157  	}
   158  
   159  	c.logger.Infof("Received block [%d] from buffer", block.Header.Number)
   160  
   161  	c.logger.Debugf("Validating block [%d]", block.Header.Number)
   162  
   163  	validationStart := time.Now()
   164  	err := c.Validator.Validate(block)
   165  	c.reportValidationDuration(time.Since(validationStart))
   166  	if err != nil {
   167  		c.logger.Errorf("Validation failed: %+v", err)
   168  		return err
   169  	}
   170  
   171  	blockAndPvtData := &ledger.BlockAndPvtData{
   172  		Block:          block,
   173  		PvtData:        make(ledger.TxPvtDataMap),
   174  		MissingPvtData: make(ledger.TxMissingPvtDataMap),
   175  	}
   176  
   177  	exist, err := c.DoesPvtDataInfoExistInLedger(block.Header.Number)
   178  	if err != nil {
   179  		return err
   180  	}
   181  	if exist {
   182  		commitOpts := &ledger.CommitOptions{FetchPvtDataFromLedger: true}
   183  		return c.CommitLegacy(blockAndPvtData, commitOpts)
   184  	}
   185  
   186  	listMissingPrivateDataDurationHistogram := c.metrics.ListMissingPrivateDataDuration.With("channel", c.ChainID)
   187  	fetchDurationHistogram := c.metrics.FetchDuration.With("channel", c.ChainID)
   188  	purgeDurationHistogram := c.metrics.PurgeDuration.With("channel", c.ChainID)
   189  	pdp := &PvtdataProvider{
   190  		mspID:                                   c.mspID,
   191  		selfSignedData:                          c.selfSignedData,
   192  		logger:                                  logger.With("channel", c.ChainID),
   193  		listMissingPrivateDataDurationHistogram: listMissingPrivateDataDurationHistogram,
   194  		fetchDurationHistogram:                  fetchDurationHistogram,
   195  		purgeDurationHistogram:                  purgeDurationHistogram,
   196  		transientStore:                          c.store,
   197  		pullRetryThreshold:                      c.pullRetryThreshold,
   198  		prefetchedPvtdata:                       privateDataSets,
   199  		transientBlockRetention:                 c.transientBlockRetention,
   200  		channelID:                               c.ChainID,
   201  		blockNum:                                block.Header.Number,
   202  		storePvtdataOfInvalidTx:                 c.Support.CapabilityProvider.Capabilities().StorePvtDataOfInvalidTx(),
   203  		skipPullingInvalidTransactions:          c.skipPullingInvalidTransactions,
   204  		fetcher:                                 c.Fetcher,
   205  		idDeserializerFactory:                   c.idDeserializerFactory,
   206  	}
   207  	pvtdataToRetrieve, err := c.getTxPvtdataInfoFromBlock(block)
   208  	if err != nil {
   209  		c.logger.Warningf("Failed to get private data info from block: %s", err)
   210  		return err
   211  	}
   212  
   213  	// Retrieve the private data.
   214  	// RetrievePvtdata checks this peer's eligibility and then retreives from cache, transient store, or from a remote peer.
   215  	retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve)
   216  	if err != nil {
   217  		c.logger.Warningf("Failed to retrieve pvtdata: %s", err)
   218  		return err
   219  	}
   220  
   221  	blockAndPvtData.PvtData = retrievedPvtdata.blockPvtdata.PvtData
   222  	blockAndPvtData.MissingPvtData = retrievedPvtdata.blockPvtdata.MissingPvtData
   223  
   224  	// commit block and private data
   225  	commitStart := time.Now()
   226  	err = c.CommitLegacy(blockAndPvtData, &ledger.CommitOptions{})
   227  	c.reportCommitDuration(time.Since(commitStart))
   228  	if err != nil {
   229  		return errors.Wrap(err, "commit failed")
   230  	}
   231  
   232  	// Purge transactions
   233  	go retrievedPvtdata.Purge()
   234  
   235  	return nil
   236  }
   237  
   238  // StorePvtData used to persist private date into transient store
   239  func (c *coordinator) StorePvtData(txID string, privData *protostransientstore.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error {
   240  	return c.store.Persist(txID, blkHeight, privData)
   241  }
   242  
   243  // GetPvtDataAndBlockByNum gets block by number and also returns all related private data
   244  // that requesting peer is eligible for.
   245  // The order of private data in slice of PvtDataCollections doesn't imply the order of
   246  // transactions in the block related to these private data, to get the correct placement
   247  // need to read TxPvtData.SeqInBlock field
   248  func (c *coordinator) GetPvtDataAndBlockByNum(seqNum uint64, peerAuthInfo protoutil.SignedData) (*common.Block, util.PvtDataCollections, error) {
   249  	blockAndPvtData, err := c.Committer.GetPvtDataAndBlockByNum(seqNum)
   250  	if err != nil {
   251  		return nil, nil, err
   252  	}
   253  
   254  	seqs2Namespaces := aggregatedCollections{}
   255  	for seqInBlock := range blockAndPvtData.Block.Data.Data {
   256  		txPvtDataItem, exists := blockAndPvtData.PvtData[uint64(seqInBlock)]
   257  		if !exists {
   258  			continue
   259  		}
   260  
   261  		// Iterate through the private write sets and include them in response if requesting peer is eligible for it
   262  		for _, ns := range txPvtDataItem.WriteSet.NsPvtRwset {
   263  			for _, col := range ns.CollectionPvtRwset {
   264  				cc := privdata.CollectionCriteria{
   265  					Channel:    c.ChainID,
   266  					Namespace:  ns.Namespace,
   267  					Collection: col.CollectionName,
   268  				}
   269  				sp, err := c.CollectionStore.RetrieveCollectionAccessPolicy(cc)
   270  				if err != nil {
   271  					c.logger.Warningf("Failed obtaining policy for collection criteria [%#v]: %s", cc, err)
   272  					continue
   273  				}
   274  				isAuthorized := sp.AccessFilter()
   275  				if isAuthorized == nil {
   276  					c.logger.Warningf("Failed obtaining filter for collection criteria [%#v]", cc)
   277  					continue
   278  				}
   279  				if !isAuthorized(peerAuthInfo) {
   280  					c.logger.Debugf("Skipping collection criteria [%#v] because peer isn't authorized", cc)
   281  					continue
   282  				}
   283  				seqs2Namespaces.addCollection(uint64(seqInBlock), txPvtDataItem.WriteSet.DataModel, ns.Namespace, col)
   284  			}
   285  		}
   286  	}
   287  
   288  	return blockAndPvtData.Block, seqs2Namespaces.asPrivateData(), nil
   289  }
   290  
   291  // getTxPvtdataInfoFromBlock parses the block transactions and returns the list of private data items in the block.
   292  // Note that this peer's eligibility for the private data is not checked here.
   293  func (c *coordinator) getTxPvtdataInfoFromBlock(block *common.Block) ([]*ledger.TxPvtdataInfo, error) {
   294  	txPvtdataItemsFromBlock := []*ledger.TxPvtdataInfo{}
   295  
   296  	if block.Metadata == nil || len(block.Metadata.Metadata) <= int(common.BlockMetadataIndex_TRANSACTIONS_FILTER) {
   297  		return nil, errors.New("Block.Metadata is nil or Block.Metadata lacks a Tx filter bitmap")
   298  	}
   299  	txsFilter := txValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
   300  	data := block.Data.Data
   301  	if len(txsFilter) != len(block.Data.Data) {
   302  		return nil, errors.Errorf("block data size(%d) is different from Tx filter size(%d)", len(block.Data.Data), len(txsFilter))
   303  	}
   304  
   305  	for seqInBlock, txEnvBytes := range data {
   306  		invalid := txsFilter[seqInBlock] != uint8(peer.TxValidationCode_VALID)
   307  		txInfo, err := getTxInfoFromTransactionBytes(txEnvBytes)
   308  		if err != nil {
   309  			continue
   310  		}
   311  
   312  		colPvtdataInfo := []*ledger.CollectionPvtdataInfo{}
   313  		for _, ns := range txInfo.txRWSet.NsRwSets {
   314  			for _, hashedCollection := range ns.CollHashedRwSets {
   315  				// skip if no writes
   316  				if !containsWrites(txInfo.txID, ns.NameSpace, hashedCollection) {
   317  					continue
   318  				}
   319  				cc := privdata.CollectionCriteria{
   320  					Channel:    txInfo.channelID,
   321  					Namespace:  ns.NameSpace,
   322  					Collection: hashedCollection.CollectionName,
   323  				}
   324  
   325  				colConfig, err := c.CollectionStore.RetrieveCollectionConfig(cc)
   326  				if err != nil {
   327  					c.logger.Warningf("Failed to retrieve collection config for collection criteria [%#v]: %s", cc, err)
   328  					return nil, err
   329  				}
   330  				col := &ledger.CollectionPvtdataInfo{
   331  					Namespace:        ns.NameSpace,
   332  					Collection:       hashedCollection.CollectionName,
   333  					ExpectedHash:     hashedCollection.PvtRwSetHash,
   334  					CollectionConfig: colConfig,
   335  					Endorsers:        txInfo.endorsements,
   336  				}
   337  				colPvtdataInfo = append(colPvtdataInfo, col)
   338  			}
   339  		}
   340  		txPvtdataToRetrieve := &ledger.TxPvtdataInfo{
   341  			TxID:                  txInfo.txID,
   342  			Invalid:               invalid,
   343  			SeqInBlock:            uint64(seqInBlock),
   344  			CollectionPvtdataInfo: colPvtdataInfo,
   345  		}
   346  		txPvtdataItemsFromBlock = append(txPvtdataItemsFromBlock, txPvtdataToRetrieve)
   347  	}
   348  
   349  	return txPvtdataItemsFromBlock, nil
   350  }
   351  
   352  func (c *coordinator) reportValidationDuration(time time.Duration) {
   353  	c.metrics.ValidationDuration.With("channel", c.ChainID).Observe(time.Seconds())
   354  }
   355  
   356  func (c *coordinator) reportCommitDuration(time time.Duration) {
   357  	c.metrics.CommitPrivateDataDuration.With("channel", c.ChainID).Observe(time.Seconds())
   358  }
   359  
   360  type seqAndDataModel struct {
   361  	seq       uint64
   362  	dataModel rwset.TxReadWriteSet_DataModel
   363  }
   364  
   365  // map from seqAndDataModel to:
   366  //     map from namespace to []*rwset.CollectionPvtReadWriteSet
   367  type aggregatedCollections map[seqAndDataModel]map[string][]*rwset.CollectionPvtReadWriteSet
   368  
   369  func (ac aggregatedCollections) addCollection(seqInBlock uint64, dm rwset.TxReadWriteSet_DataModel, namespace string, col *rwset.CollectionPvtReadWriteSet) {
   370  	seq := seqAndDataModel{
   371  		dataModel: dm,
   372  		seq:       seqInBlock,
   373  	}
   374  	if _, exists := ac[seq]; !exists {
   375  		ac[seq] = make(map[string][]*rwset.CollectionPvtReadWriteSet)
   376  	}
   377  	ac[seq][namespace] = append(ac[seq][namespace], col)
   378  }
   379  
   380  func (ac aggregatedCollections) asPrivateData() []*ledger.TxPvtData {
   381  	var data []*ledger.TxPvtData
   382  	for seq, ns := range ac {
   383  		txPrivateData := &ledger.TxPvtData{
   384  			SeqInBlock: seq.seq,
   385  			WriteSet: &rwset.TxPvtReadWriteSet{
   386  				DataModel: seq.dataModel,
   387  			},
   388  		}
   389  		for namespaceName, cols := range ns {
   390  			txPrivateData.WriteSet.NsPvtRwset = append(txPrivateData.WriteSet.NsPvtRwset, &rwset.NsPvtReadWriteSet{
   391  				Namespace:          namespaceName,
   392  				CollectionPvtRwset: cols,
   393  			})
   394  		}
   395  		data = append(data, txPrivateData)
   396  	}
   397  	return data
   398  }
   399  
   400  type txInfo struct {
   401  	channelID    string
   402  	txID         string
   403  	endorsements []*peer.Endorsement
   404  	txRWSet      *rwsetutil.TxRwSet
   405  }
   406  
   407  // getTxInfoFromTransactionBytes parses a transaction and returns info required for private data retrieval
   408  func getTxInfoFromTransactionBytes(envBytes []byte) (*txInfo, error) {
   409  	txInfo := &txInfo{}
   410  	env, err := protoutil.GetEnvelopeFromBlock(envBytes)
   411  	if err != nil {
   412  		logger.Warningf("Invalid envelope: %s", err)
   413  		return nil, err
   414  	}
   415  
   416  	payload, err := protoutil.UnmarshalPayload(env.Payload)
   417  	if err != nil {
   418  		logger.Warningf("Invalid payload: %s", err)
   419  		return nil, err
   420  	}
   421  	if payload.Header == nil {
   422  		err := errors.New("payload header is nil")
   423  		logger.Warningf("Invalid tx: %s", err)
   424  		return nil, err
   425  	}
   426  
   427  	chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader)
   428  	if err != nil {
   429  		logger.Warningf("Invalid channel header: %s", err)
   430  		return nil, err
   431  	}
   432  	txInfo.channelID = chdr.ChannelId
   433  	txInfo.txID = chdr.TxId
   434  
   435  	if chdr.Type != int32(common.HeaderType_ENDORSER_TRANSACTION) {
   436  		err := errors.New("header type is not an endorser transaction")
   437  		logger.Debugf("Invalid transaction type: %s", err)
   438  		return nil, err
   439  	}
   440  
   441  	respPayload, err := protoutil.GetActionFromEnvelope(envBytes)
   442  	if err != nil {
   443  		logger.Warningf("Failed obtaining action from envelope: %s", err)
   444  		return nil, err
   445  	}
   446  
   447  	tx, err := protoutil.UnmarshalTransaction(payload.Data)
   448  	if err != nil {
   449  		logger.Warningf("Invalid transaction in payload data for tx [%s]: %s", chdr.TxId, err)
   450  		return nil, err
   451  	}
   452  
   453  	ccActionPayload, err := protoutil.UnmarshalChaincodeActionPayload(tx.Actions[0].Payload)
   454  	if err != nil {
   455  		logger.Warningf("Invalid chaincode action in payload for tx [%s]: %s", chdr.TxId, err)
   456  		return nil, err
   457  	}
   458  
   459  	if ccActionPayload.Action == nil {
   460  		logger.Warningf("Action in ChaincodeActionPayload for tx [%s] is nil", chdr.TxId)
   461  		return nil, err
   462  	}
   463  	txInfo.endorsements = ccActionPayload.Action.Endorsements
   464  
   465  	txRWSet := &rwsetutil.TxRwSet{}
   466  	if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil {
   467  		logger.Warningf("Failed obtaining TxRwSet from ChaincodeAction's results: %s", err)
   468  		return nil, err
   469  	}
   470  	txInfo.txRWSet = txRWSet
   471  
   472  	return txInfo, nil
   473  }
   474  
   475  // containsWrites checks whether the given CollHashedRwSet contains writes
   476  func containsWrites(txID string, namespace string, colHashedRWSet *rwsetutil.CollHashedRwSet) bool {
   477  	if colHashedRWSet.HashedRwSet == nil {
   478  		logger.Warningf("HashedRWSet of tx [%s], namespace [%s], collection [%s] is nil", txID, namespace, colHashedRWSet.CollectionName)
   479  		return false
   480  	}
   481  	if len(colHashedRWSet.HashedRwSet.HashedWrites) == 0 && len(colHashedRWSet.HashedRwSet.MetadataWrites) == 0 {
   482  		logger.Debugf("HashedRWSet of tx [%s], namespace [%s], collection [%s] doesn't contain writes", txID, namespace, colHashedRWSet.CollectionName)
   483  		return false
   484  	}
   485  	return true
   486  }