github.com/yacovm/fabric@v2.0.0-alpha.0.20191128145320-c5d4087dc723+incompatible/gossip/privdata/pvtdataprovider_test.go (about) 1 /* 2 Copyright IBM Corp. All Rights Reserved. 3 4 SPDX-License-Identifier: Apache-2.0 5 */ 6 7 package privdata 8 9 import ( 10 "fmt" 11 "io/ioutil" 12 "os" 13 "sort" 14 "testing" 15 "time" 16 17 "github.com/hyperledger/fabric-protos-go/common" 18 proto "github.com/hyperledger/fabric-protos-go/gossip" 19 "github.com/hyperledger/fabric-protos-go/ledger/rwset" 20 mspproto "github.com/hyperledger/fabric-protos-go/msp" 21 "github.com/hyperledger/fabric-protos-go/peer" 22 tspb "github.com/hyperledger/fabric-protos-go/transientstore" 23 "github.com/hyperledger/fabric/bccsp/factory" 24 "github.com/hyperledger/fabric/common/metrics/disabled" 25 util2 "github.com/hyperledger/fabric/common/util" 26 "github.com/hyperledger/fabric/core/ledger" 27 "github.com/hyperledger/fabric/core/transientstore" 28 "github.com/hyperledger/fabric/gossip/metrics" 29 privdatacommon "github.com/hyperledger/fabric/gossip/privdata/common" 30 "github.com/hyperledger/fabric/gossip/privdata/mocks" 31 "github.com/hyperledger/fabric/gossip/util" 32 "github.com/hyperledger/fabric/msp" 33 mspmgmt "github.com/hyperledger/fabric/msp/mgmt" 34 msptesttools "github.com/hyperledger/fabric/msp/mgmt/testtools" 35 "github.com/hyperledger/fabric/protoutil" 36 "github.com/stretchr/testify/assert" 37 "github.com/stretchr/testify/mock" 38 "github.com/stretchr/testify/require" 39 ) 40 41 type testSupport struct { 42 preHash, hash []byte 43 channelID string 44 blockNum uint64 45 endorsers []string 46 peerSelfSignedData protoutil.SignedData 47 } 48 49 type rwSet struct { 50 txID string 51 namespace string 52 collections []string 53 preHash, hash []byte 54 seqInBlock uint64 55 } 56 57 func init() { 58 util.SetupTestLoggingWithLevel("INFO") 59 } 60 61 func TestRetrievePvtdata(t *testing.T) { 62 err := msptesttools.LoadMSPSetupForTesting() 63 require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err)) 64 65 identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault()) 66 serializedID, err := identity.Serialize() 67 require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err)) 68 data := []byte{1, 2, 3} 69 signature, err := identity.Sign(data) 70 require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err)) 71 peerSelfSignedData := protoutil.SignedData{ 72 Identity: serializedID, 73 Signature: signature, 74 Data: data, 75 } 76 endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{ 77 Mspid: identity.GetMSPIdentifier(), 78 IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())), 79 }) 80 81 ts := testSupport{ 82 preHash: []byte("rws-pre-image"), 83 hash: util2.ComputeSHA256([]byte("rws-pre-image")), 84 channelID: "testchannelid", 85 blockNum: uint64(1), 86 endorsers: []string{identity.GetMSPIdentifier()}, 87 peerSelfSignedData: peerSelfSignedData, 88 } 89 90 ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature) 91 ns1c2 := collectionPvtdataInfoFromTemplate("ns1", "c2", identity.GetMSPIdentifier(), ts.hash, endorser, signature) 92 ineligiblens1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", "different-org", ts.hash, endorser, signature) 93 94 tests := []struct { 95 scenario string 96 storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool 97 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet 98 expectedDigKeys []privdatacommon.DigKey 99 pvtdataToRetrieve []*ledger.TxPvtdataInfo 100 expectedBlockPvtdata *ledger.BlockPvtdata 101 }{ 102 { 103 // Scenario I 104 scenario: "Scenario I: Only eligible private data in cache, no missing private data", 105 storePvtdataOfInvalidTx: true, 106 skipPullingInvalidTransactions: false, 107 rwSetsInCache: []rwSet{ 108 { 109 txID: "tx1", 110 namespace: "ns1", 111 collections: []string{"c1", "c2"}, 112 preHash: ts.preHash, 113 hash: ts.hash, 114 seqInBlock: 1, 115 }, 116 }, 117 rwSetsInTransientStore: []rwSet{}, 118 rwSetsInPeer: []rwSet{}, 119 expectedDigKeys: []privdatacommon.DigKey{}, 120 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 121 { 122 TxID: "tx1", 123 Invalid: false, 124 SeqInBlock: 1, 125 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 126 ns1c1, 127 ns1c2, 128 }, 129 }, 130 }, 131 expectedBlockPvtdata: &ledger.BlockPvtdata{ 132 PvtData: ledger.TxPvtDataMap{ 133 1: &ledger.TxPvtData{ 134 SeqInBlock: 1, 135 WriteSet: &rwset.TxPvtReadWriteSet{ 136 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 137 { 138 Namespace: "ns1", 139 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 140 preHash: ts.preHash, 141 collections: []string{"c1", "c2"}, 142 }), 143 }, 144 }, 145 }, 146 }, 147 }, 148 MissingPvtData: ledger.TxMissingPvtDataMap{}, 149 }, 150 }, 151 { 152 // Scenario II 153 scenario: "Scenario II: No eligible private data, skip ineligible private data from all sources even if found in cache", 154 storePvtdataOfInvalidTx: true, 155 skipPullingInvalidTransactions: false, 156 rwSetsInCache: []rwSet{ 157 { 158 txID: "tx1", 159 namespace: "ns1", 160 collections: []string{"c1"}, 161 preHash: ts.preHash, 162 hash: ts.hash, 163 seqInBlock: 1, 164 }, 165 }, 166 rwSetsInTransientStore: []rwSet{ 167 { 168 txID: "tx2", 169 namespace: "ns1", 170 collections: []string{"c1"}, 171 preHash: ts.preHash, 172 hash: ts.hash, 173 seqInBlock: 2, 174 }, 175 }, 176 rwSetsInPeer: []rwSet{ 177 { 178 txID: "tx3", 179 namespace: "ns1", 180 collections: []string{"c1"}, 181 preHash: ts.preHash, 182 hash: ts.hash, 183 seqInBlock: 3, 184 }, 185 }, 186 expectedDigKeys: []privdatacommon.DigKey{}, 187 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 188 { 189 TxID: "tx1", 190 Invalid: false, 191 SeqInBlock: 1, 192 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 193 ineligiblens1c1, 194 }, 195 }, 196 { 197 TxID: "tx2", 198 Invalid: false, 199 SeqInBlock: 2, 200 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 201 ineligiblens1c1, 202 }, 203 }, 204 { 205 TxID: "tx3", 206 Invalid: false, 207 SeqInBlock: 3, 208 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 209 ineligiblens1c1, 210 }, 211 }, 212 }, 213 expectedBlockPvtdata: &ledger.BlockPvtdata{ 214 PvtData: ledger.TxPvtDataMap{}, 215 MissingPvtData: ledger.TxMissingPvtDataMap{ 216 1: []*ledger.MissingPvtData{ 217 { 218 Namespace: "ns1", 219 Collection: "c1", 220 IsEligible: false, 221 }, 222 }, 223 2: []*ledger.MissingPvtData{ 224 { 225 Namespace: "ns1", 226 Collection: "c1", 227 IsEligible: false, 228 }, 229 }, 230 3: []*ledger.MissingPvtData{ 231 { 232 Namespace: "ns1", 233 Collection: "c1", 234 IsEligible: false, 235 }, 236 }, 237 }, 238 }, 239 }, 240 { 241 // Scenario III 242 scenario: "Scenario III: Missing private data in cache, found in transient store", 243 storePvtdataOfInvalidTx: true, 244 skipPullingInvalidTransactions: false, 245 rwSetsInCache: []rwSet{ 246 { 247 txID: "tx1", 248 namespace: "ns1", 249 collections: []string{"c1", "c2"}, 250 preHash: ts.preHash, 251 hash: ts.hash, 252 seqInBlock: 1, 253 }, 254 }, 255 rwSetsInTransientStore: []rwSet{ 256 { 257 txID: "tx2", 258 namespace: "ns1", 259 collections: []string{"c2"}, 260 preHash: ts.preHash, 261 hash: ts.hash, 262 seqInBlock: 2, 263 }, 264 }, 265 rwSetsInPeer: []rwSet{}, 266 expectedDigKeys: []privdatacommon.DigKey{}, 267 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 268 { 269 TxID: "tx1", 270 Invalid: false, 271 SeqInBlock: 1, 272 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 273 ns1c1, 274 ns1c2, 275 }, 276 }, 277 { 278 TxID: "tx2", 279 Invalid: false, 280 SeqInBlock: 2, 281 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 282 ns1c2, 283 }, 284 }, 285 }, 286 expectedBlockPvtdata: &ledger.BlockPvtdata{ 287 PvtData: ledger.TxPvtDataMap{ 288 1: &ledger.TxPvtData{ 289 SeqInBlock: 1, 290 WriteSet: &rwset.TxPvtReadWriteSet{ 291 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 292 { 293 Namespace: "ns1", 294 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 295 preHash: ts.preHash, 296 collections: []string{"c1", "c2"}, 297 }), 298 }, 299 }, 300 }, 301 }, 302 2: &ledger.TxPvtData{ 303 SeqInBlock: 2, 304 WriteSet: &rwset.TxPvtReadWriteSet{ 305 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 306 { 307 Namespace: "ns1", 308 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 309 preHash: ts.preHash, 310 collections: []string{"c2"}, 311 }), 312 }, 313 }, 314 }, 315 }, 316 }, 317 MissingPvtData: ledger.TxMissingPvtDataMap{}, 318 }, 319 }, 320 { 321 // Scenario IV 322 scenario: "Scenario IV: Missing private data in cache, found some in transient store and some in peer", 323 storePvtdataOfInvalidTx: true, 324 skipPullingInvalidTransactions: false, 325 rwSetsInCache: []rwSet{ 326 { 327 txID: "tx1", 328 namespace: "ns1", 329 collections: []string{"c1", "c2"}, 330 preHash: ts.preHash, 331 hash: ts.hash, 332 seqInBlock: 1, 333 }, 334 }, 335 rwSetsInTransientStore: []rwSet{ 336 { 337 txID: "tx2", 338 namespace: "ns1", 339 collections: []string{"c1", "c2"}, 340 preHash: ts.preHash, 341 hash: ts.hash, 342 seqInBlock: 2, 343 }, 344 }, 345 rwSetsInPeer: []rwSet{ 346 { 347 txID: "tx3", 348 namespace: "ns1", 349 collections: []string{"c1", "c2"}, 350 preHash: ts.preHash, 351 hash: ts.hash, 352 seqInBlock: 3, 353 }, 354 }, 355 expectedDigKeys: []privdatacommon.DigKey{ 356 { 357 TxId: "tx3", 358 Namespace: "ns1", 359 Collection: "c1", 360 BlockSeq: ts.blockNum, 361 SeqInBlock: 3, 362 }, 363 { 364 TxId: "tx3", 365 Namespace: "ns1", 366 Collection: "c2", 367 BlockSeq: ts.blockNum, 368 SeqInBlock: 3, 369 }, 370 }, 371 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 372 { 373 TxID: "tx1", 374 Invalid: false, 375 SeqInBlock: 1, 376 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 377 ns1c1, 378 ns1c2, 379 }, 380 }, 381 { 382 TxID: "tx2", 383 Invalid: false, 384 SeqInBlock: 2, 385 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 386 ns1c1, 387 ns1c2, 388 }, 389 }, 390 { 391 TxID: "tx3", 392 Invalid: false, 393 SeqInBlock: 3, 394 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 395 ns1c1, 396 ns1c2, 397 }, 398 }, 399 }, 400 expectedBlockPvtdata: &ledger.BlockPvtdata{ 401 PvtData: ledger.TxPvtDataMap{ 402 1: &ledger.TxPvtData{ 403 SeqInBlock: 1, 404 WriteSet: &rwset.TxPvtReadWriteSet{ 405 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 406 { 407 Namespace: "ns1", 408 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 409 preHash: ts.preHash, 410 collections: []string{"c1", "c2"}, 411 }), 412 }, 413 }, 414 }, 415 }, 416 2: &ledger.TxPvtData{ 417 SeqInBlock: 2, 418 WriteSet: &rwset.TxPvtReadWriteSet{ 419 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 420 { 421 Namespace: "ns1", 422 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 423 preHash: ts.preHash, 424 collections: []string{"c1", "c2"}, 425 }), 426 }, 427 }, 428 }, 429 }, 430 3: &ledger.TxPvtData{ 431 SeqInBlock: 3, 432 WriteSet: &rwset.TxPvtReadWriteSet{ 433 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 434 { 435 Namespace: "ns1", 436 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 437 preHash: ts.preHash, 438 collections: []string{"c1", "c2"}, 439 }), 440 }, 441 }, 442 }, 443 }, 444 }, 445 MissingPvtData: ledger.TxMissingPvtDataMap{}, 446 }, 447 }, 448 { 449 // Scenario V 450 scenario: "Scenario V: Skip invalid txs when storePvtdataOfInvalidTx is false", 451 storePvtdataOfInvalidTx: false, 452 skipPullingInvalidTransactions: false, 453 rwSetsInCache: []rwSet{ 454 { 455 txID: "tx1", 456 namespace: "ns1", 457 collections: []string{"c1"}, 458 preHash: ts.preHash, 459 hash: ts.hash, 460 seqInBlock: 1, 461 }, 462 { 463 txID: "tx2", 464 namespace: "ns1", 465 collections: []string{"c1"}, 466 preHash: ts.preHash, 467 hash: ts.hash, 468 seqInBlock: 2, 469 }, 470 }, 471 rwSetsInTransientStore: []rwSet{}, 472 rwSetsInPeer: []rwSet{}, 473 expectedDigKeys: []privdatacommon.DigKey{}, 474 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 475 { 476 TxID: "tx1", 477 Invalid: true, 478 SeqInBlock: 1, 479 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 480 ns1c1, 481 }, 482 }, 483 { 484 TxID: "tx2", 485 Invalid: false, 486 SeqInBlock: 2, 487 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 488 ns1c1, 489 }, 490 }, 491 }, 492 expectedBlockPvtdata: &ledger.BlockPvtdata{ 493 PvtData: ledger.TxPvtDataMap{ 494 2: &ledger.TxPvtData{ 495 SeqInBlock: 2, 496 WriteSet: &rwset.TxPvtReadWriteSet{ 497 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 498 { 499 Namespace: "ns1", 500 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 501 preHash: ts.preHash, 502 collections: []string{"c1"}, 503 }), 504 }, 505 }, 506 }, 507 }, 508 }, 509 MissingPvtData: ledger.TxMissingPvtDataMap{}, 510 }, 511 }, 512 { 513 // Scenario VI 514 scenario: "Scenario VI: Don't skip invalid txs when storePvtdataOfInvalidTx is true", 515 storePvtdataOfInvalidTx: true, 516 skipPullingInvalidTransactions: false, 517 rwSetsInCache: []rwSet{ 518 { 519 txID: "tx1", 520 namespace: "ns1", 521 collections: []string{"c1"}, 522 preHash: ts.preHash, 523 hash: ts.hash, 524 seqInBlock: 1, 525 }, 526 { 527 txID: "tx2", 528 namespace: "ns1", 529 collections: []string{"c1"}, 530 preHash: ts.preHash, 531 hash: ts.hash, 532 seqInBlock: 2, 533 }, 534 }, 535 rwSetsInTransientStore: []rwSet{}, 536 rwSetsInPeer: []rwSet{}, 537 expectedDigKeys: []privdatacommon.DigKey{}, 538 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 539 { 540 TxID: "tx1", 541 Invalid: true, 542 SeqInBlock: 1, 543 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 544 ns1c1, 545 }, 546 }, 547 { 548 TxID: "tx2", 549 Invalid: false, 550 SeqInBlock: 2, 551 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 552 ns1c1, 553 }, 554 }, 555 }, 556 expectedBlockPvtdata: &ledger.BlockPvtdata{ 557 PvtData: ledger.TxPvtDataMap{ 558 1: &ledger.TxPvtData{ 559 SeqInBlock: 1, 560 WriteSet: &rwset.TxPvtReadWriteSet{ 561 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 562 { 563 Namespace: "ns1", 564 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 565 preHash: ts.preHash, 566 collections: []string{"c1"}, 567 }), 568 }, 569 }, 570 }, 571 }, 572 2: &ledger.TxPvtData{ 573 SeqInBlock: 2, 574 WriteSet: &rwset.TxPvtReadWriteSet{ 575 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 576 { 577 Namespace: "ns1", 578 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 579 preHash: ts.preHash, 580 collections: []string{"c1"}, 581 }), 582 }, 583 }, 584 }, 585 }, 586 }, 587 MissingPvtData: ledger.TxMissingPvtDataMap{}, 588 }, 589 }, 590 { 591 // Scenario VII 592 scenario: "Scenario VII: Can't find eligible tx from any source", 593 storePvtdataOfInvalidTx: true, 594 rwSetsInCache: []rwSet{}, 595 rwSetsInTransientStore: []rwSet{}, 596 rwSetsInPeer: []rwSet{}, 597 expectedDigKeys: []privdatacommon.DigKey{ 598 { 599 TxId: "tx1", 600 Namespace: "ns1", 601 Collection: "c1", 602 BlockSeq: ts.blockNum, 603 SeqInBlock: 1, 604 }, 605 { 606 TxId: "tx1", 607 Namespace: "ns1", 608 Collection: "c2", 609 BlockSeq: ts.blockNum, 610 SeqInBlock: 1, 611 }, 612 }, 613 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 614 { 615 TxID: "tx1", 616 Invalid: false, 617 SeqInBlock: 1, 618 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 619 ns1c1, 620 ns1c2, 621 }, 622 }, 623 }, 624 expectedBlockPvtdata: &ledger.BlockPvtdata{ 625 PvtData: ledger.TxPvtDataMap{}, 626 MissingPvtData: ledger.TxMissingPvtDataMap{ 627 1: []*ledger.MissingPvtData{ 628 { 629 Namespace: "ns1", 630 Collection: "c1", 631 IsEligible: true, 632 }, 633 { 634 Namespace: "ns1", 635 Collection: "c2", 636 IsEligible: true, 637 }, 638 }, 639 }, 640 }, 641 }, 642 { 643 // Scenario VIII 644 scenario: "Scenario VIII: Extra data not requested", 645 storePvtdataOfInvalidTx: true, 646 skipPullingInvalidTransactions: false, 647 rwSetsInCache: []rwSet{ 648 { 649 txID: "tx1", 650 namespace: "ns1", 651 collections: []string{"c1", "c2"}, 652 preHash: ts.preHash, 653 hash: ts.hash, 654 seqInBlock: 1, 655 }, 656 }, 657 rwSetsInTransientStore: []rwSet{ 658 { 659 txID: "tx2", 660 namespace: "ns1", 661 collections: []string{"c1", "c2"}, 662 preHash: ts.preHash, 663 hash: ts.hash, 664 seqInBlock: 2, 665 }, 666 }, 667 rwSetsInPeer: []rwSet{ 668 { 669 txID: "tx3", 670 namespace: "ns1", 671 collections: []string{"c1", "c2"}, 672 preHash: ts.preHash, 673 hash: ts.hash, 674 seqInBlock: 3, 675 }, 676 }, 677 expectedDigKeys: []privdatacommon.DigKey{ 678 { 679 TxId: "tx3", 680 Namespace: "ns1", 681 Collection: "c1", 682 BlockSeq: ts.blockNum, 683 SeqInBlock: 3, 684 }, 685 }, 686 // Only requesting tx3, ns1, c1, should skip all extra data found in all sources 687 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 688 { 689 TxID: "tx3", 690 Invalid: false, 691 SeqInBlock: 3, 692 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 693 ns1c1, 694 }, 695 }, 696 }, 697 expectedBlockPvtdata: &ledger.BlockPvtdata{ 698 PvtData: ledger.TxPvtDataMap{ 699 3: &ledger.TxPvtData{ 700 SeqInBlock: 3, 701 WriteSet: &rwset.TxPvtReadWriteSet{ 702 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 703 { 704 Namespace: "ns1", 705 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 706 preHash: ts.preHash, 707 collections: []string{"c1"}, 708 }), 709 }, 710 }, 711 }, 712 }, 713 }, 714 MissingPvtData: ledger.TxMissingPvtDataMap{}, 715 }, 716 }, 717 { 718 // Scenario IX 719 scenario: "Scenario IX: Skip pulling invalid txs when skipPullingInvalidTransactions is true", 720 storePvtdataOfInvalidTx: true, 721 skipPullingInvalidTransactions: true, 722 rwSetsInCache: []rwSet{ 723 { 724 txID: "tx1", 725 namespace: "ns1", 726 collections: []string{"c1"}, 727 preHash: ts.preHash, 728 hash: ts.hash, 729 seqInBlock: 1, 730 }, 731 }, 732 rwSetsInTransientStore: []rwSet{ 733 { 734 txID: "tx2", 735 namespace: "ns1", 736 collections: []string{"c1"}, 737 preHash: ts.preHash, 738 hash: ts.hash, 739 seqInBlock: 2, 740 }, 741 }, 742 rwSetsInPeer: []rwSet{ 743 { 744 txID: "tx3", 745 namespace: "ns1", 746 collections: []string{"c1"}, 747 preHash: ts.preHash, 748 hash: ts.hash, 749 seqInBlock: 2, 750 }, 751 }, 752 expectedDigKeys: []privdatacommon.DigKey{}, 753 pvtdataToRetrieve: []*ledger.TxPvtdataInfo{ 754 { 755 TxID: "tx1", 756 Invalid: true, 757 SeqInBlock: 1, 758 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 759 ns1c1, 760 }, 761 }, 762 { 763 TxID: "tx2", 764 Invalid: true, 765 SeqInBlock: 2, 766 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 767 ns1c1, 768 }, 769 }, 770 { 771 TxID: "tx3", 772 Invalid: true, 773 SeqInBlock: 3, 774 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 775 ns1c1, 776 }, 777 }, 778 }, 779 // tx1 and tx2 are still fetched despite being invalid 780 expectedBlockPvtdata: &ledger.BlockPvtdata{ 781 PvtData: ledger.TxPvtDataMap{ 782 1: &ledger.TxPvtData{ 783 SeqInBlock: 1, 784 WriteSet: &rwset.TxPvtReadWriteSet{ 785 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 786 { 787 Namespace: "ns1", 788 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 789 preHash: ts.preHash, 790 collections: []string{"c1"}, 791 }), 792 }, 793 }, 794 }, 795 }, 796 2: &ledger.TxPvtData{ 797 SeqInBlock: 2, 798 WriteSet: &rwset.TxPvtReadWriteSet{ 799 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 800 { 801 Namespace: "ns1", 802 CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{ 803 preHash: ts.preHash, 804 collections: []string{"c1"}, 805 }), 806 }, 807 }, 808 }, 809 }, 810 }, 811 // Only tx3 is missing since we skip pulling invalid tx from peers 812 MissingPvtData: ledger.TxMissingPvtDataMap{ 813 3: []*ledger.MissingPvtData{ 814 { 815 Namespace: "ns1", 816 Collection: "c1", 817 IsEligible: true, 818 }, 819 }, 820 }, 821 }, 822 }, 823 } 824 825 for _, test := range tests { 826 t.Run(test.scenario, func(t *testing.T) { 827 testRetrievePvtdataSuccess(t, test.scenario, ts, test.storePvtdataOfInvalidTx, test.skipPullingInvalidTransactions, 828 test.rwSetsInCache, test.rwSetsInTransientStore, test.rwSetsInPeer, test.expectedDigKeys, test.pvtdataToRetrieve, test.expectedBlockPvtdata) 829 }) 830 } 831 } 832 833 func TestRetrievePvtdataFailure(t *testing.T) { 834 err := msptesttools.LoadMSPSetupForTesting() 835 require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err)) 836 837 identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault()) 838 serializedID, err := identity.Serialize() 839 require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err)) 840 data := []byte{1, 2, 3} 841 signature, err := identity.Sign(data) 842 require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err)) 843 peerSelfSignedData := protoutil.SignedData{ 844 Identity: serializedID, 845 Signature: signature, 846 Data: data, 847 } 848 endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{ 849 Mspid: identity.GetMSPIdentifier(), 850 IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())), 851 }) 852 853 ts := testSupport{ 854 preHash: []byte("rws-pre-image"), 855 hash: util2.ComputeSHA256([]byte("rws-pre-image")), 856 channelID: "testchannelid", 857 blockNum: uint64(1), 858 endorsers: []string{identity.GetMSPIdentifier()}, 859 peerSelfSignedData: peerSelfSignedData, 860 } 861 862 invalidns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature) 863 invalidns1c1.CollectionConfig.MemberOrgsPolicy = nil 864 865 scenario := "Scenario I: Invalid collection config policy" 866 storePvtdataOfInvalidTx := true 867 skipPullingInvalidTransactions := false 868 rwSetsInCache := []rwSet{} 869 rwSetsInTransientStore := []rwSet{} 870 rwSetsInPeer := []rwSet{} 871 expectedDigKeys := []privdatacommon.DigKey{} 872 pvtdataToRetrieve := []*ledger.TxPvtdataInfo{ 873 { 874 TxID: "tx1", 875 Invalid: false, 876 SeqInBlock: 1, 877 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 878 invalidns1c1, 879 }, 880 }, 881 } 882 883 expectedErr := "Collection config policy is nil" 884 885 testRetrievePvtdataFailure(t, scenario, ts, 886 peerSelfSignedData, storePvtdataOfInvalidTx, skipPullingInvalidTransactions, 887 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer, 888 expectedDigKeys, pvtdataToRetrieve, 889 expectedErr) 890 } 891 892 func TestRetryFetchFromPeer(t *testing.T) { 893 err := msptesttools.LoadMSPSetupForTesting() 894 require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err)) 895 896 identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault()) 897 serializedID, err := identity.Serialize() 898 require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err)) 899 data := []byte{1, 2, 3} 900 signature, err := identity.Sign(data) 901 require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err)) 902 peerSelfSignedData := protoutil.SignedData{ 903 Identity: serializedID, 904 Signature: signature, 905 Data: data, 906 } 907 endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{ 908 Mspid: identity.GetMSPIdentifier(), 909 IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())), 910 }) 911 912 ts := testSupport{ 913 preHash: []byte("rws-pre-image"), 914 hash: util2.ComputeSHA256([]byte("rws-pre-image")), 915 channelID: "testchannelid", 916 blockNum: uint64(1), 917 endorsers: []string{identity.GetMSPIdentifier()}, 918 peerSelfSignedData: peerSelfSignedData, 919 } 920 921 ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature) 922 ns1c2 := collectionPvtdataInfoFromTemplate("ns1", "c2", identity.GetMSPIdentifier(), ts.hash, endorser, signature) 923 924 tempdir, err := ioutil.TempDir("", "ts") 925 require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err)) 926 storeProvider, err := transientstore.NewStoreProvider(tempdir) 927 require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err)) 928 store, err := storeProvider.OpenStore(ts.channelID) 929 require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err)) 930 931 defer storeProvider.Close() 932 defer os.RemoveAll(tempdir) 933 934 storePvtdataOfInvalidTx := true 935 skipPullingInvalidTransactions := false 936 rwSetsInCache := []rwSet{} 937 rwSetsInTransientStore := []rwSet{} 938 rwSetsInPeer := []rwSet{} 939 expectedDigKeys := []privdatacommon.DigKey{ 940 { 941 TxId: "tx1", 942 Namespace: "ns1", 943 Collection: "c1", 944 BlockSeq: ts.blockNum, 945 SeqInBlock: 1, 946 }, 947 { 948 TxId: "tx1", 949 Namespace: "ns1", 950 Collection: "c2", 951 BlockSeq: ts.blockNum, 952 SeqInBlock: 1, 953 }, 954 } 955 pvtdataToRetrieve := []*ledger.TxPvtdataInfo{ 956 { 957 TxID: "tx1", 958 Invalid: false, 959 SeqInBlock: 1, 960 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 961 ns1c1, 962 ns1c2, 963 }, 964 }, 965 } 966 pdp := setupPrivateDataProvider(t, ts, testConfig, 967 storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store, 968 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer, 969 expectedDigKeys) 970 require.NotNil(t, pdp) 971 972 fakeSleeper := &mocks.Sleeper{} 973 SetSleeper(pdp, fakeSleeper) 974 fakeSleeper.SleepStub = func(sleepDur time.Duration) { 975 time.Sleep(sleepDur) 976 } 977 978 _, err = pdp.RetrievePvtdata(pvtdataToRetrieve) 979 assert.NoError(t, err) 980 var maxRetries int 981 982 maxRetries = int(testConfig.PullRetryThreshold / pullRetrySleepInterval) 983 assert.Equal(t, fakeSleeper.SleepCallCount() <= maxRetries, true) 984 assert.Equal(t, fakeSleeper.SleepArgsForCall(0), pullRetrySleepInterval) 985 } 986 987 func TestRetrievedPvtdataPurgeBelowHeight(t *testing.T) { 988 conf := testConfig 989 conf.TransientBlockRetention = 5 990 991 err := msptesttools.LoadMSPSetupForTesting() 992 require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err)) 993 994 identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault()) 995 serializedID, err := identity.Serialize() 996 require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err)) 997 data := []byte{1, 2, 3} 998 signature, err := identity.Sign(data) 999 require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err)) 1000 peerSelfSignedData := protoutil.SignedData{ 1001 Identity: serializedID, 1002 Signature: signature, 1003 Data: data, 1004 } 1005 endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{ 1006 Mspid: identity.GetMSPIdentifier(), 1007 IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())), 1008 }) 1009 1010 ts := testSupport{ 1011 preHash: []byte("rws-pre-image"), 1012 hash: util2.ComputeSHA256([]byte("rws-pre-image")), 1013 channelID: "testchannelid", 1014 blockNum: uint64(9), 1015 endorsers: []string{identity.GetMSPIdentifier()}, 1016 peerSelfSignedData: peerSelfSignedData, 1017 } 1018 1019 ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature) 1020 1021 tempdir, err := ioutil.TempDir("", "ts") 1022 require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err)) 1023 storeProvider, err := transientstore.NewStoreProvider(tempdir) 1024 require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err)) 1025 store, err := storeProvider.OpenStore(ts.channelID) 1026 require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err)) 1027 1028 defer storeProvider.Close() 1029 defer os.RemoveAll(tempdir) 1030 1031 // set up store with 9 existing private data write sets 1032 for i := 0; i < 9; i++ { 1033 txID := fmt.Sprintf("tx%d", i+1) 1034 store.Persist(txID, uint64(i), &tspb.TxPvtReadWriteSetWithConfigInfo{ 1035 PvtRwset: &rwset.TxPvtReadWriteSet{ 1036 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 1037 { 1038 Namespace: "ns1", 1039 CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{ 1040 { 1041 CollectionName: "c1", 1042 Rwset: []byte("rws-pre-image"), 1043 }, 1044 }, 1045 }, 1046 }, 1047 }, 1048 CollectionConfigs: make(map[string]*peer.CollectionConfigPackage), 1049 }) 1050 } 1051 1052 // test that the initial data shows up in the store 1053 for i := 1; i < 9; i++ { 1054 func() { 1055 txID := fmt.Sprintf("tx%d", i) 1056 iterator, err := store.GetTxPvtRWSetByTxid(txID, nil) 1057 defer iterator.Close() 1058 require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err)) 1059 res, err := iterator.Next() 1060 require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err)) 1061 assert.NotNil(t, res) 1062 }() 1063 } 1064 1065 storePvtdataOfInvalidTx := true 1066 skipPullingInvalidTransactions := false 1067 rwSetsInCache := []rwSet{ 1068 { 1069 txID: "tx9", 1070 namespace: "ns1", 1071 collections: []string{"c1"}, 1072 preHash: ts.preHash, 1073 hash: ts.hash, 1074 seqInBlock: 1, 1075 }, 1076 } 1077 rwSetsInTransientStore := []rwSet{} 1078 rwSetsInPeer := []rwSet{} 1079 expectedDigKeys := []privdatacommon.DigKey{} 1080 // request tx9 which is found in both the cache and transient store 1081 pvtdataToRetrieve := []*ledger.TxPvtdataInfo{ 1082 { 1083 TxID: "tx9", 1084 Invalid: false, 1085 SeqInBlock: 1, 1086 CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{ 1087 ns1c1, 1088 }, 1089 }, 1090 } 1091 pdp := setupPrivateDataProvider(t, ts, conf, 1092 storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store, 1093 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer, expectedDigKeys) 1094 require.NotNil(t, pdp) 1095 1096 retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve) 1097 require.NoError(t, err) 1098 1099 retrievedPvtdata.Purge() 1100 1101 for i := 1; i <= 9; i++ { 1102 func() { 1103 txID := fmt.Sprintf("tx%d", i) 1104 iterator, err := store.GetTxPvtRWSetByTxid(txID, nil) 1105 defer iterator.Close() 1106 require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err)) 1107 res, err := iterator.Next() 1108 require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err)) 1109 // Check that only the fetched private write set was purged because we haven't reached a blockNum that's a multiple of 5 yet 1110 if i == 9 { 1111 assert.Nil(t, res) 1112 } else { 1113 assert.NotNil(t, res) 1114 } 1115 }() 1116 } 1117 1118 // increment blockNum to a multiple of transientBlockRetention 1119 pdp.blockNum = 10 1120 retrievedPvtdata, err = pdp.RetrievePvtdata(pvtdataToRetrieve) 1121 require.NoError(t, err) 1122 1123 retrievedPvtdata.Purge() 1124 1125 for i := 1; i <= 9; i++ { 1126 func() { 1127 txID := fmt.Sprintf("tx%d", i) 1128 iterator, err := store.GetTxPvtRWSetByTxid(txID, nil) 1129 defer iterator.Close() 1130 require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err)) 1131 res, err := iterator.Next() 1132 require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err)) 1133 // Check that the first 5 sets have been purged alongside the 9th set purged earlier 1134 if i < 6 || i == 9 { 1135 assert.Nil(t, res) 1136 } else { 1137 assert.NotNil(t, res) 1138 } 1139 }() 1140 } 1141 } 1142 1143 func testRetrievePvtdataSuccess(t *testing.T, 1144 scenario string, 1145 ts testSupport, 1146 storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool, 1147 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet, 1148 expectedDigKeys []privdatacommon.DigKey, 1149 pvtdataToRetrieve []*ledger.TxPvtdataInfo, 1150 expectedBlockPvtdata *ledger.BlockPvtdata) { 1151 1152 fmt.Println("\n" + scenario) 1153 1154 tempdir, err := ioutil.TempDir("", "ts") 1155 require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err)) 1156 storeProvider, err := transientstore.NewStoreProvider(tempdir) 1157 require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err)) 1158 store, err := storeProvider.OpenStore(ts.channelID) 1159 require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err)) 1160 defer storeProvider.Close() 1161 defer os.RemoveAll(tempdir) 1162 1163 pdp := setupPrivateDataProvider(t, ts, testConfig, 1164 storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store, 1165 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer, 1166 expectedDigKeys) 1167 require.NotNil(t, pdp, scenario) 1168 1169 retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve) 1170 assert.NoError(t, err, scenario) 1171 1172 // sometimes the collection private write sets are added out of order 1173 // so we need to sort it to check equality with expected 1174 blockPvtdata := sortBlockPvtdata(retrievedPvtdata.GetBlockPvtdata()) 1175 assert.Equal(t, expectedBlockPvtdata, blockPvtdata, scenario) 1176 1177 // Test pvtdata is purged from store on Done() call 1178 testPurged(t, scenario, retrievedPvtdata, store, pvtdataToRetrieve) 1179 } 1180 1181 func testRetrievePvtdataFailure(t *testing.T, 1182 scenario string, 1183 ts testSupport, 1184 peerSelfSignedData protoutil.SignedData, 1185 storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool, 1186 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet, 1187 expectedDigKeys []privdatacommon.DigKey, 1188 pvtdataToRetrieve []*ledger.TxPvtdataInfo, 1189 expectedErr string) { 1190 1191 fmt.Println("\n" + scenario) 1192 1193 tempdir, err := ioutil.TempDir("", "ts") 1194 require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err)) 1195 storeProvider, err := transientstore.NewStoreProvider(tempdir) 1196 require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err)) 1197 store, err := storeProvider.OpenStore(ts.channelID) 1198 require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err)) 1199 defer storeProvider.Close() 1200 defer os.RemoveAll(tempdir) 1201 1202 pdp := setupPrivateDataProvider(t, ts, testConfig, 1203 storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store, 1204 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer, 1205 expectedDigKeys) 1206 require.NotNil(t, pdp, scenario) 1207 1208 _, err = pdp.RetrievePvtdata(pvtdataToRetrieve) 1209 assert.EqualError(t, err, expectedErr, scenario) 1210 } 1211 1212 func setupPrivateDataProvider(t *testing.T, 1213 ts testSupport, 1214 config CoordinatorConfig, 1215 storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool, store *transientstore.Store, 1216 rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet, 1217 expectedDigKeys []privdatacommon.DigKey) *PvtdataProvider { 1218 1219 metrics := metrics.NewGossipMetrics(&disabled.Provider{}).PrivdataMetrics 1220 1221 idDeserializerFactory := IdentityDeserializerFactoryFunc(func(chainID string) msp.IdentityDeserializer { 1222 return mspmgmt.GetManagerForChain(ts.channelID) 1223 }) 1224 1225 // set up data in cache 1226 prefetchedPvtdata := storePvtdataInCache(rwSetsInCache) 1227 // set up data in transient store 1228 err := storePvtdataInTransientStore(rwSetsInTransientStore, store) 1229 require.NoError(t, err, fmt.Sprintf("Failed to store private data in transient store: got err %s", err)) 1230 1231 // set up data in peer 1232 fetcher := &fetcherMock{t: t} 1233 storePvtdataInPeer(rwSetsInPeer, expectedDigKeys, fetcher, ts, skipPullingInvalidTransactions) 1234 1235 pdp := &PvtdataProvider{ 1236 selfSignedData: ts.peerSelfSignedData, 1237 logger: logger, 1238 listMissingPrivateDataDurationHistogram: metrics.ListMissingPrivateDataDuration.With("channel", ts.channelID), 1239 fetchDurationHistogram: metrics.FetchDuration.With("channel", ts.channelID), 1240 purgeDurationHistogram: metrics.PurgeDuration.With("channel", ts.channelID), 1241 transientStore: store, 1242 pullRetryThreshold: config.PullRetryThreshold, 1243 prefetchedPvtdata: prefetchedPvtdata, 1244 transientBlockRetention: config.TransientBlockRetention, 1245 channelID: ts.channelID, 1246 blockNum: ts.blockNum, 1247 storePvtdataOfInvalidTx: storePvtdataOfInvalidTx, 1248 skipPullingInvalidTransactions: skipPullingInvalidTransactions, 1249 fetcher: fetcher, 1250 idDeserializerFactory: idDeserializerFactory, 1251 } 1252 1253 return pdp 1254 } 1255 1256 func testPurged(t *testing.T, 1257 scenario string, 1258 retrievedPvtdata ledger.RetrievedPvtdata, 1259 store *transientstore.Store, 1260 txPvtdataInfo []*ledger.TxPvtdataInfo) { 1261 1262 retrievedPvtdata.Purge() 1263 for _, pvtdata := range retrievedPvtdata.GetBlockPvtdata().PvtData { 1264 func() { 1265 txID := getTxIDBySeqInBlock(pvtdata.SeqInBlock, txPvtdataInfo) 1266 require.NotEqual(t, txID, "", fmt.Sprintf("Could not find txID for SeqInBlock %d", pvtdata.SeqInBlock), scenario) 1267 1268 iterator, err := store.GetTxPvtRWSetByTxid(txID, nil) 1269 defer iterator.Close() 1270 require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err)) 1271 1272 res, err := iterator.Next() 1273 require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err)) 1274 1275 assert.Nil(t, res, scenario) 1276 }() 1277 } 1278 } 1279 1280 func storePvtdataInCache(rwsets []rwSet) util.PvtDataCollections { 1281 res := []*ledger.TxPvtData{} 1282 for _, rws := range rwsets { 1283 set := &rwset.TxPvtReadWriteSet{ 1284 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 1285 { 1286 Namespace: rws.namespace, 1287 CollectionPvtRwset: getCollectionPvtReadWriteSet(rws), 1288 }, 1289 }, 1290 } 1291 1292 res = append(res, &ledger.TxPvtData{ 1293 SeqInBlock: rws.seqInBlock, 1294 WriteSet: set, 1295 }) 1296 } 1297 1298 return res 1299 } 1300 1301 func storePvtdataInTransientStore(rwsets []rwSet, store *transientstore.Store) error { 1302 for _, rws := range rwsets { 1303 set := &tspb.TxPvtReadWriteSetWithConfigInfo{ 1304 PvtRwset: &rwset.TxPvtReadWriteSet{ 1305 NsPvtRwset: []*rwset.NsPvtReadWriteSet{ 1306 { 1307 Namespace: rws.namespace, 1308 CollectionPvtRwset: getCollectionPvtReadWriteSet(rws), 1309 }, 1310 }, 1311 }, 1312 CollectionConfigs: make(map[string]*peer.CollectionConfigPackage), 1313 } 1314 1315 err := store.Persist(rws.txID, 1, set) 1316 if err != nil { 1317 return err 1318 } 1319 } 1320 return nil 1321 } 1322 1323 func storePvtdataInPeer(rwSets []rwSet, expectedDigKeys []privdatacommon.DigKey, fetcher *fetcherMock, ts testSupport, skipPullingInvalidTransactions bool) { 1324 availableElements := []*proto.PvtDataElement{} 1325 for _, rws := range rwSets { 1326 for _, c := range rws.collections { 1327 availableElements = append(availableElements, &proto.PvtDataElement{ 1328 Digest: &proto.PvtDataDigest{ 1329 TxId: rws.txID, 1330 Namespace: rws.namespace, 1331 Collection: c, 1332 BlockSeq: ts.blockNum, 1333 SeqInBlock: rws.seqInBlock, 1334 }, 1335 Payload: [][]byte{ts.preHash}, 1336 }) 1337 } 1338 } 1339 1340 endorsers := []string{} 1341 if len(expectedDigKeys) > 0 { 1342 endorsers = ts.endorsers 1343 } 1344 fetcher.On("fetch", mock.Anything).expectingDigests(expectedDigKeys).expectingEndorsers(endorsers...).Return(&privdatacommon.FetchedPvtDataContainer{ 1345 AvailableElements: availableElements, 1346 }, nil) 1347 } 1348 1349 func getCollectionPvtReadWriteSet(rws rwSet) []*rwset.CollectionPvtReadWriteSet { 1350 colPvtRwSet := []*rwset.CollectionPvtReadWriteSet{} 1351 for _, c := range rws.collections { 1352 colPvtRwSet = append(colPvtRwSet, &rwset.CollectionPvtReadWriteSet{ 1353 CollectionName: c, 1354 Rwset: rws.preHash, 1355 }) 1356 } 1357 1358 sort.Slice(colPvtRwSet, func(i, j int) bool { 1359 return colPvtRwSet[i].CollectionName < colPvtRwSet[j].CollectionName 1360 }) 1361 1362 return colPvtRwSet 1363 } 1364 1365 func sortBlockPvtdata(blockPvtdata *ledger.BlockPvtdata) *ledger.BlockPvtdata { 1366 for _, pvtdata := range blockPvtdata.PvtData { 1367 for _, ws := range pvtdata.WriteSet.NsPvtRwset { 1368 sort.Slice(ws.CollectionPvtRwset, func(i, j int) bool { 1369 return ws.CollectionPvtRwset[i].CollectionName < ws.CollectionPvtRwset[j].CollectionName 1370 }) 1371 } 1372 } 1373 for _, missingPvtdata := range blockPvtdata.MissingPvtData { 1374 sort.Slice(missingPvtdata, func(i, j int) bool { 1375 return missingPvtdata[i].Collection < missingPvtdata[j].Collection 1376 }) 1377 } 1378 return blockPvtdata 1379 } 1380 1381 func collectionPvtdataInfoFromTemplate(namespace, collection, mspIdentifier string, hash, endorser, signature []byte) *ledger.CollectionPvtdataInfo { 1382 return &ledger.CollectionPvtdataInfo{ 1383 Collection: collection, 1384 Namespace: namespace, 1385 ExpectedHash: hash, 1386 Endorsers: []*peer.Endorsement{ 1387 { 1388 Endorser: endorser, 1389 Signature: signature, 1390 }, 1391 }, 1392 CollectionConfig: &peer.StaticCollectionConfig{ 1393 Name: collection, 1394 MemberOnlyRead: true, 1395 MemberOrgsPolicy: &peer.CollectionPolicyConfig{ 1396 Payload: &peer.CollectionPolicyConfig_SignaturePolicy{ 1397 SignaturePolicy: &common.SignaturePolicyEnvelope{ 1398 Rule: &common.SignaturePolicy{ 1399 Type: &common.SignaturePolicy_SignedBy{ 1400 SignedBy: 0, 1401 }, 1402 }, 1403 Identities: []*mspproto.MSPPrincipal{ 1404 { 1405 PrincipalClassification: mspproto.MSPPrincipal_ROLE, 1406 Principal: protoutil.MarshalOrPanic(&mspproto.MSPRole{ 1407 MspIdentifier: mspIdentifier, 1408 Role: mspproto.MSPRole_MEMBER, 1409 }), 1410 }, 1411 }, 1412 }, 1413 }, 1414 }, 1415 }, 1416 } 1417 }