github.com/yacovm/fabric@v2.0.0-alpha.0.20191128145320-c5d4087dc723+incompatible/gossip/privdata/coordinator.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package privdata 8 9 import ( 10 "time" 11 12 "github.com/hyperledger/fabric-protos-go/common" 13 "github.com/hyperledger/fabric-protos-go/ledger/rwset" 14 "github.com/hyperledger/fabric-protos-go/peer" 15 protostransientstore "github.com/hyperledger/fabric-protos-go/transientstore" 16 "github.com/hyperledger/fabric/common/channelconfig" 17 "github.com/hyperledger/fabric/core/committer" 18 "github.com/hyperledger/fabric/core/committer/txvalidator" 19 "github.com/hyperledger/fabric/core/common/privdata" 20 "github.com/hyperledger/fabric/core/ledger" 21 "github.com/hyperledger/fabric/core/ledger/kvledger/txmgmt/rwsetutil" 22 "github.com/hyperledger/fabric/core/transientstore" 23 "github.com/hyperledger/fabric/gossip/metrics" 24 privdatacommon "github.com/hyperledger/fabric/gossip/privdata/common" 25 "github.com/hyperledger/fabric/gossip/util" 26 "github.com/hyperledger/fabric/protoutil" 27 "github.com/pkg/errors" 28 ) 29 30 const pullRetrySleepInterval = time.Second 31 32 var logger = util.GetLogger(util.PrivateDataLogger, "") 33 34 //go:generate mockery -dir . -name CollectionStore -case underscore -output mocks/ 35 36 // CollectionStore is the local interface used to generate mocks for foreign interface. 37 type CollectionStore interface { 38 privdata.CollectionStore 39 } 40 41 //go:generate mockery -dir . -name Committer -case underscore -output mocks/ 42 43 // Committer is the local interface used to generate mocks for foreign interface. 44 type Committer interface { 45 committer.Committer 46 } 47 48 // Coordinator orchestrates the flow of the new 49 // blocks arrival and in flight transient data, responsible 50 // to complete missing parts of transient data for given block. 51 type Coordinator interface { 52 // StoreBlock deliver new block with underlined private data 53 // returns missing transaction ids 54 StoreBlock(block *common.Block, data util.PvtDataCollections) error 55 56 // StorePvtData used to persist private data into transient store 57 StorePvtData(txid string, privData *protostransientstore.TxPvtReadWriteSetWithConfigInfo, blckHeight uint64) error 58 59 // GetPvtDataAndBlockByNum gets block by number and also returns all related private data 60 // that requesting peer is eligible for. 61 // The order of private data in slice of PvtDataCollections doesn't imply the order of 62 // transactions in the block related to these private data, to get the correct placement 63 // need to read TxPvtData.SeqInBlock field 64 GetPvtDataAndBlockByNum(seqNum uint64, peerAuth protoutil.SignedData) (*common.Block, util.PvtDataCollections, error) 65 66 // Get recent block sequence number 67 LedgerHeight() (uint64, error) 68 69 // Close coordinator, shuts down coordinator service 70 Close() 71 } 72 73 type dig2sources map[privdatacommon.DigKey][]*peer.Endorsement 74 75 func (d2s dig2sources) keys() []privdatacommon.DigKey { 76 res := make([]privdatacommon.DigKey, 0, len(d2s)) 77 for dig := range d2s { 78 res = append(res, dig) 79 } 80 return res 81 } 82 83 // Fetcher interface which defines API to fetch missing 84 // private data elements 85 type Fetcher interface { 86 fetch(dig2src dig2sources) (*privdatacommon.FetchedPvtDataContainer, error) 87 } 88 89 //go:generate mockery -dir ./ -name CapabilityProvider -case underscore -output mocks/ 90 91 // CapabilityProvider contains functions to retrieve capability information for a channel 92 type CapabilityProvider interface { 93 // Capabilities defines the capabilities for the application portion of this channel 94 Capabilities() channelconfig.ApplicationCapabilities 95 } 96 97 // Support encapsulates set of interfaces to 98 // aggregate required functionality by single struct 99 type Support struct { 100 ChainID string 101 privdata.CollectionStore 102 txvalidator.Validator 103 committer.Committer 104 Fetcher 105 CapabilityProvider 106 } 107 108 // CoordinatorConfig encapsulates the config that is passed to a new coordinator 109 type CoordinatorConfig struct { 110 // TransientBlockRetention indicates the number of blocks to retain in the transient store 111 // when purging below height on commiting every TransientBlockRetention-th block 112 TransientBlockRetention uint64 113 // PullRetryThreshold indicates the max duration an attempted fetch from a remote peer will retry 114 // for before giving up and leaving the private data as missing 115 PullRetryThreshold time.Duration 116 // SkipPullingInvalidTransactions if true will skip the fetch from remote peer step for transactions 117 // marked as invalid 118 SkipPullingInvalidTransactions bool 119 } 120 121 type coordinator struct { 122 selfSignedData protoutil.SignedData 123 Support 124 store *transientstore.Store 125 transientBlockRetention uint64 126 metrics *metrics.PrivdataMetrics 127 pullRetryThreshold time.Duration 128 skipPullingInvalidTransactions bool 129 idDeserializerFactory IdentityDeserializerFactory 130 } 131 132 // NewCoordinator creates a new instance of coordinator 133 func NewCoordinator(support Support, store *transientstore.Store, selfSignedData protoutil.SignedData, metrics *metrics.PrivdataMetrics, 134 config CoordinatorConfig, idDeserializerFactory IdentityDeserializerFactory) Coordinator { 135 return &coordinator{Support: support, 136 store: store, 137 selfSignedData: selfSignedData, 138 transientBlockRetention: config.TransientBlockRetention, 139 metrics: metrics, 140 pullRetryThreshold: config.PullRetryThreshold, 141 skipPullingInvalidTransactions: config.SkipPullingInvalidTransactions, 142 idDeserializerFactory: idDeserializerFactory, 143 } 144 } 145 146 // StoreBlock stores block with private data into the ledger 147 func (c *coordinator) StoreBlock(block *common.Block, privateDataSets util.PvtDataCollections) error { 148 if block.Data == nil { 149 return errors.New("Block data is empty") 150 } 151 if block.Header == nil { 152 return errors.New("Block header is nil") 153 } 154 155 logger.Infof("[%s] Received block [%d] from buffer", c.ChainID, block.Header.Number) 156 157 logger.Debugf("[%s] Validating block [%d]", c.ChainID, block.Header.Number) 158 159 validationStart := time.Now() 160 err := c.Validator.Validate(block) 161 c.reportValidationDuration(time.Since(validationStart)) 162 if err != nil { 163 logger.Errorf("Validation failed: %+v", err) 164 return err 165 } 166 167 blockAndPvtData := &ledger.BlockAndPvtData{ 168 Block: block, 169 PvtData: make(ledger.TxPvtDataMap), 170 MissingPvtData: make(ledger.TxMissingPvtDataMap), 171 } 172 173 exist, err := c.DoesPvtDataInfoExistInLedger(block.Header.Number) 174 if err != nil { 175 return err 176 } 177 if exist { 178 commitOpts := &ledger.CommitOptions{FetchPvtDataFromLedger: true} 179 return c.CommitLegacy(blockAndPvtData, commitOpts) 180 } 181 182 listMissingPrivateDataDurationHistogram := c.metrics.ListMissingPrivateDataDuration.With("channel", c.ChainID) 183 fetchDurationHistogram := c.metrics.FetchDuration.With("channel", c.ChainID) 184 purgeDurationHistogram := c.metrics.PurgeDuration.With("channel", c.ChainID) 185 pdp := &PvtdataProvider{ 186 selfSignedData: c.selfSignedData, 187 logger: logger.With("channel", c.ChainID), 188 listMissingPrivateDataDurationHistogram: listMissingPrivateDataDurationHistogram, 189 fetchDurationHistogram: fetchDurationHistogram, 190 purgeDurationHistogram: purgeDurationHistogram, 191 transientStore: c.store, 192 pullRetryThreshold: c.pullRetryThreshold, 193 prefetchedPvtdata: privateDataSets, 194 transientBlockRetention: c.transientBlockRetention, 195 channelID: c.ChainID, 196 blockNum: block.Header.Number, 197 storePvtdataOfInvalidTx: c.Support.CapabilityProvider.Capabilities().StorePvtDataOfInvalidTx(), 198 skipPullingInvalidTransactions: c.skipPullingInvalidTransactions, 199 fetcher: c.Fetcher, 200 idDeserializerFactory: c.idDeserializerFactory, 201 } 202 pvtdataToRetrieve, err := c.getTxPvtdataInfoFromBlock(block) 203 if err != nil { 204 logger.Warningf("Failed to get private data info from block: %s", err) 205 return err 206 } 207 208 // Retrieve the private data. 209 // RetrievePvtdata checks this peer's eligibility and then retreives from cache, transient store, or from a remote peer. 210 retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve) 211 if err != nil { 212 logger.Warningf("Failed to retrieve pvtdata: %s", err) 213 return err 214 } 215 216 blockAndPvtData.PvtData = retrievedPvtdata.blockPvtdata.PvtData 217 blockAndPvtData.MissingPvtData = retrievedPvtdata.blockPvtdata.MissingPvtData 218 219 // commit block and private data 220 commitStart := time.Now() 221 err = c.CommitLegacy(blockAndPvtData, &ledger.CommitOptions{}) 222 c.reportCommitDuration(time.Since(commitStart)) 223 if err != nil { 224 return errors.Wrap(err, "commit failed") 225 } 226 227 // Purge transactions 228 retrievedPvtdata.Purge() 229 230 return nil 231 } 232 233 // StorePvtData used to persist private date into transient store 234 func (c *coordinator) StorePvtData(txID string, privData *protostransientstore.TxPvtReadWriteSetWithConfigInfo, blkHeight uint64) error { 235 return c.store.Persist(txID, blkHeight, privData) 236 } 237 238 // GetPvtDataAndBlockByNum gets block by number and also returns all related private data 239 // that requesting peer is eligible for. 240 // The order of private data in slice of PvtDataCollections doesn't imply the order of 241 // transactions in the block related to these private data, to get the correct placement 242 // need to read TxPvtData.SeqInBlock field 243 func (c *coordinator) GetPvtDataAndBlockByNum(seqNum uint64, peerAuthInfo protoutil.SignedData) (*common.Block, util.PvtDataCollections, error) { 244 blockAndPvtData, err := c.Committer.GetPvtDataAndBlockByNum(seqNum) 245 if err != nil { 246 return nil, nil, err 247 } 248 249 seqs2Namespaces := aggregatedCollections{} 250 for seqInBlock := range blockAndPvtData.Block.Data.Data { 251 txPvtDataItem, exists := blockAndPvtData.PvtData[uint64(seqInBlock)] 252 if !exists { 253 continue 254 } 255 256 // Iterate through the private write sets and include them in response if requesting peer is eligible for it 257 for _, ns := range txPvtDataItem.WriteSet.NsPvtRwset { 258 for _, col := range ns.CollectionPvtRwset { 259 cc := privdata.CollectionCriteria{ 260 Channel: c.ChainID, 261 Namespace: ns.Namespace, 262 Collection: col.CollectionName, 263 } 264 sp, err := c.CollectionStore.RetrieveCollectionAccessPolicy(cc) 265 if err != nil { 266 logger.Warningf("Failed obtaining policy for collection criteria [%#v]: %s", cc, err) 267 continue 268 } 269 isAuthorized := sp.AccessFilter() 270 if isAuthorized == nil { 271 logger.Warningf("Failed obtaining filter for collection criteria [%#v]", cc) 272 continue 273 } 274 if !isAuthorized(peerAuthInfo) { 275 logger.Debugf("Skipping collection criteria [%#v] because peer isn't authorized", cc) 276 continue 277 } 278 seqs2Namespaces.addCollection(uint64(seqInBlock), txPvtDataItem.WriteSet.DataModel, ns.Namespace, col) 279 } 280 } 281 } 282 283 return blockAndPvtData.Block, seqs2Namespaces.asPrivateData(), nil 284 } 285 286 // getTxPvtdataInfoFromBlock parses the block transactions and returns the list of private data items in the block. 287 // Note that this peer's eligibility for the private data is not checked here. 288 func (c *coordinator) getTxPvtdataInfoFromBlock(block *common.Block) ([]*ledger.TxPvtdataInfo, error) { 289 txPvtdataItemsFromBlock := []*ledger.TxPvtdataInfo{} 290 291 if block.Metadata == nil || len(block.Metadata.Metadata) <= int(common.BlockMetadataIndex_TRANSACTIONS_FILTER) { 292 return nil, errors.New("Block.Metadata is nil or Block.Metadata lacks a Tx filter bitmap") 293 } 294 txsFilter := txValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER]) 295 data := block.Data.Data 296 if len(txsFilter) != len(block.Data.Data) { 297 return nil, errors.Errorf("block data size(%d) is different from Tx filter size(%d)", len(block.Data.Data), len(txsFilter)) 298 } 299 300 for seqInBlock, txEnvBytes := range data { 301 invalid := txsFilter[seqInBlock] != uint8(peer.TxValidationCode_VALID) 302 txInfo, err := getTxInfoFromTransactionBytes(txEnvBytes) 303 if err != nil { 304 continue 305 } 306 307 colPvtdataInfo := []*ledger.CollectionPvtdataInfo{} 308 for _, ns := range txInfo.txRWSet.NsRwSets { 309 for _, hashedCollection := range ns.CollHashedRwSets { 310 // skip if no writes 311 if !containsWrites(txInfo.txID, ns.NameSpace, hashedCollection) { 312 continue 313 } 314 cc := privdata.CollectionCriteria{ 315 Channel: txInfo.channelID, 316 Namespace: ns.NameSpace, 317 Collection: hashedCollection.CollectionName, 318 } 319 320 colConfig, err := c.CollectionStore.RetrieveCollectionConfig(cc) 321 if err != nil { 322 logger.Warningf("Failed to retrieve collection config for collection criteria [%#v]: %s", cc, err) 323 return nil, err 324 } 325 col := &ledger.CollectionPvtdataInfo{ 326 Namespace: ns.NameSpace, 327 Collection: hashedCollection.CollectionName, 328 ExpectedHash: hashedCollection.PvtRwSetHash, 329 CollectionConfig: colConfig, 330 Endorsers: txInfo.endorsements, 331 } 332 colPvtdataInfo = append(colPvtdataInfo, col) 333 } 334 } 335 txPvtdataToRetrieve := &ledger.TxPvtdataInfo{ 336 TxID: txInfo.txID, 337 Invalid: invalid, 338 SeqInBlock: uint64(seqInBlock), 339 CollectionPvtdataInfo: colPvtdataInfo, 340 } 341 txPvtdataItemsFromBlock = append(txPvtdataItemsFromBlock, txPvtdataToRetrieve) 342 } 343 344 return txPvtdataItemsFromBlock, nil 345 } 346 347 func (c *coordinator) reportValidationDuration(time time.Duration) { 348 c.metrics.ValidationDuration.With("channel", c.ChainID).Observe(time.Seconds()) 349 } 350 351 func (c *coordinator) reportCommitDuration(time time.Duration) { 352 c.metrics.CommitPrivateDataDuration.With("channel", c.ChainID).Observe(time.Seconds()) 353 } 354 355 type seqAndDataModel struct { 356 seq uint64 357 dataModel rwset.TxReadWriteSet_DataModel 358 } 359 360 // map from seqAndDataModel to: 361 // map from namespace to []*rwset.CollectionPvtReadWriteSet 362 type aggregatedCollections map[seqAndDataModel]map[string][]*rwset.CollectionPvtReadWriteSet 363 364 func (ac aggregatedCollections) addCollection(seqInBlock uint64, dm rwset.TxReadWriteSet_DataModel, namespace string, col *rwset.CollectionPvtReadWriteSet) { 365 seq := seqAndDataModel{ 366 dataModel: dm, 367 seq: seqInBlock, 368 } 369 if _, exists := ac[seq]; !exists { 370 ac[seq] = make(map[string][]*rwset.CollectionPvtReadWriteSet) 371 } 372 ac[seq][namespace] = append(ac[seq][namespace], col) 373 } 374 375 func (ac aggregatedCollections) asPrivateData() []*ledger.TxPvtData { 376 var data []*ledger.TxPvtData 377 for seq, ns := range ac { 378 txPrivateData := &ledger.TxPvtData{ 379 SeqInBlock: seq.seq, 380 WriteSet: &rwset.TxPvtReadWriteSet{ 381 DataModel: seq.dataModel, 382 }, 383 } 384 for namespaceName, cols := range ns { 385 txPrivateData.WriteSet.NsPvtRwset = append(txPrivateData.WriteSet.NsPvtRwset, &rwset.NsPvtReadWriteSet{ 386 Namespace: namespaceName, 387 CollectionPvtRwset: cols, 388 }) 389 } 390 data = append(data, txPrivateData) 391 } 392 return data 393 } 394 395 type txInfo struct { 396 channelID string 397 txID string 398 endorsements []*peer.Endorsement 399 txRWSet *rwsetutil.TxRwSet 400 } 401 402 // getTxInfoFromTransactionBytes parses a transaction and returns info required for private data retrieval 403 func getTxInfoFromTransactionBytes(envBytes []byte) (*txInfo, error) { 404 txInfo := &txInfo{} 405 env, err := protoutil.GetEnvelopeFromBlock(envBytes) 406 if err != nil { 407 logger.Warningf("Invalid envelope: %s", err) 408 return nil, err 409 } 410 411 payload, err := protoutil.UnmarshalPayload(env.Payload) 412 if err != nil { 413 logger.Warningf("Invalid payload: %s", err) 414 return nil, err 415 } 416 if payload.Header == nil { 417 err := errors.New("payload header is nil") 418 logger.Warningf("Invalid tx: %s", err) 419 return nil, err 420 } 421 422 chdr, err := protoutil.UnmarshalChannelHeader(payload.Header.ChannelHeader) 423 if err != nil { 424 logger.Warningf("Invalid channel header: %s", err) 425 return nil, err 426 } 427 txInfo.channelID = chdr.ChannelId 428 txInfo.txID = chdr.TxId 429 430 if chdr.Type != int32(common.HeaderType_ENDORSER_TRANSACTION) { 431 err := errors.New("header type is not an endorser transaction") 432 logger.Warningf("Invalid transaction type: %s", err) 433 return nil, err 434 } 435 436 respPayload, err := protoutil.GetActionFromEnvelope(envBytes) 437 if err != nil { 438 logger.Warningf("Failed obtaining action from envelope: %s", err) 439 return nil, err 440 } 441 442 tx, err := protoutil.UnmarshalTransaction(payload.Data) 443 if err != nil { 444 logger.Warningf("Invalid transaction in payload data for tx [%s]: %s", chdr.TxId, err) 445 return nil, err 446 } 447 448 ccActionPayload, err := protoutil.UnmarshalChaincodeActionPayload(tx.Actions[0].Payload) 449 if err != nil { 450 logger.Warningf("Invalid chaincode action in payload for tx [%s]: %s", chdr.TxId, err) 451 return nil, err 452 } 453 454 if ccActionPayload.Action == nil { 455 logger.Warningf("Action in ChaincodeActionPayload for tx [%s] is nil", chdr.TxId) 456 return nil, err 457 } 458 txInfo.endorsements = ccActionPayload.Action.Endorsements 459 460 txRWSet := &rwsetutil.TxRwSet{} 461 if err = txRWSet.FromProtoBytes(respPayload.Results); err != nil { 462 logger.Warningf("Failed obtaining TxRwSet from ChaincodeAction's results: %s", err) 463 return nil, err 464 } 465 txInfo.txRWSet = txRWSet 466 467 return txInfo, nil 468 } 469 470 // containsWrites checks whether the given CollHashedRwSet contains writes 471 func containsWrites(txID string, namespace string, colHashedRWSet *rwsetutil.CollHashedRwSet) bool { 472 if colHashedRWSet.HashedRwSet == nil { 473 logger.Warningf("HashedRWSet of tx [%s], namespace [%s], collection [%s] is nil", txID, namespace, colHashedRWSet.CollectionName) 474 return false 475 } 476 if len(colHashedRWSet.HashedRwSet.HashedWrites) == 0 && len(colHashedRWSet.HashedRwSet.MetadataWrites) == 0 { 477 logger.Debugf("HashedRWSet of tx [%s], namespace [%s], collection [%s] doesn't contain writes", txID, namespace, colHashedRWSet.CollectionName) 478 return false 479 } 480 return true 481 }