github.com/yimialmonte/fabric@v2.1.1+incompatible/gossip/privdata/pvtdataprovider_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package privdata
     8  
     9  import (
    10  	"fmt"
    11  	"io/ioutil"
    12  	"os"
    13  	"sort"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/hyperledger/fabric-protos-go/common"
    18  	proto "github.com/hyperledger/fabric-protos-go/gossip"
    19  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    20  	mspproto "github.com/hyperledger/fabric-protos-go/msp"
    21  	"github.com/hyperledger/fabric-protos-go/peer"
    22  	tspb "github.com/hyperledger/fabric-protos-go/transientstore"
    23  	"github.com/hyperledger/fabric/bccsp/factory"
    24  	"github.com/hyperledger/fabric/common/metrics/disabled"
    25  	util2 "github.com/hyperledger/fabric/common/util"
    26  	"github.com/hyperledger/fabric/core/ledger"
    27  	"github.com/hyperledger/fabric/core/transientstore"
    28  	"github.com/hyperledger/fabric/gossip/metrics"
    29  	privdatacommon "github.com/hyperledger/fabric/gossip/privdata/common"
    30  	"github.com/hyperledger/fabric/gossip/privdata/mocks"
    31  	"github.com/hyperledger/fabric/gossip/util"
    32  	"github.com/hyperledger/fabric/msp"
    33  	mspmgmt "github.com/hyperledger/fabric/msp/mgmt"
    34  	msptesttools "github.com/hyperledger/fabric/msp/mgmt/testtools"
    35  	"github.com/hyperledger/fabric/protoutil"
    36  	"github.com/stretchr/testify/assert"
    37  	"github.com/stretchr/testify/mock"
    38  	"github.com/stretchr/testify/require"
    39  )
    40  
    41  type testSupport struct {
    42  	preHash, hash      []byte
    43  	channelID          string
    44  	blockNum           uint64
    45  	endorsers          []string
    46  	peerSelfSignedData protoutil.SignedData
    47  }
    48  
    49  type rwSet struct {
    50  	txID          string
    51  	namespace     string
    52  	collections   []string
    53  	preHash, hash []byte
    54  	seqInBlock    uint64
    55  }
    56  
    57  func init() {
    58  	util.SetupTestLoggingWithLevel("INFO")
    59  }
    60  
    61  func TestRetrievePvtdata(t *testing.T) {
    62  	err := msptesttools.LoadMSPSetupForTesting()
    63  	require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err))
    64  
    65  	identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault())
    66  	serializedID, err := identity.Serialize()
    67  	require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err))
    68  	data := []byte{1, 2, 3}
    69  	signature, err := identity.Sign(data)
    70  	require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err))
    71  	peerSelfSignedData := protoutil.SignedData{
    72  		Identity:  serializedID,
    73  		Signature: signature,
    74  		Data:      data,
    75  	}
    76  	endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{
    77  		Mspid:   identity.GetMSPIdentifier(),
    78  		IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())),
    79  	})
    80  
    81  	ts := testSupport{
    82  		preHash:            []byte("rws-pre-image"),
    83  		hash:               util2.ComputeSHA256([]byte("rws-pre-image")),
    84  		channelID:          "testchannelid",
    85  		blockNum:           uint64(1),
    86  		endorsers:          []string{identity.GetMSPIdentifier()},
    87  		peerSelfSignedData: peerSelfSignedData,
    88  	}
    89  
    90  	ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
    91  	ns1c2 := collectionPvtdataInfoFromTemplate("ns1", "c2", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
    92  	ineligiblens1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", "different-org", ts.hash, endorser, signature)
    93  
    94  	tests := []struct {
    95  		scenario                                                string
    96  		storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool
    97  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer     []rwSet
    98  		expectedDigKeys                                         []privdatacommon.DigKey
    99  		pvtdataToRetrieve                                       []*ledger.TxPvtdataInfo
   100  		expectedBlockPvtdata                                    *ledger.BlockPvtdata
   101  	}{
   102  		{
   103  			// Scenario I
   104  			scenario:                       "Scenario I: Only eligible private data in cache, no missing private data",
   105  			storePvtdataOfInvalidTx:        true,
   106  			skipPullingInvalidTransactions: false,
   107  			rwSetsInCache: []rwSet{
   108  				{
   109  					txID:        "tx1",
   110  					namespace:   "ns1",
   111  					collections: []string{"c1", "c2"},
   112  					preHash:     ts.preHash,
   113  					hash:        ts.hash,
   114  					seqInBlock:  1,
   115  				},
   116  			},
   117  			rwSetsInTransientStore: []rwSet{},
   118  			rwSetsInPeer:           []rwSet{},
   119  			expectedDigKeys:        []privdatacommon.DigKey{},
   120  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   121  				{
   122  					TxID:       "tx1",
   123  					Invalid:    false,
   124  					SeqInBlock: 1,
   125  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   126  						ns1c1,
   127  						ns1c2,
   128  					},
   129  				},
   130  			},
   131  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   132  				PvtData: ledger.TxPvtDataMap{
   133  					1: &ledger.TxPvtData{
   134  						SeqInBlock: 1,
   135  						WriteSet: &rwset.TxPvtReadWriteSet{
   136  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   137  								{
   138  									Namespace: "ns1",
   139  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   140  										preHash:     ts.preHash,
   141  										collections: []string{"c1", "c2"},
   142  									}),
   143  								},
   144  							},
   145  						},
   146  					},
   147  				},
   148  				MissingPvtData: ledger.TxMissingPvtDataMap{},
   149  			},
   150  		},
   151  		{
   152  			// Scenario II
   153  			scenario:                       "Scenario II: No eligible private data, skip ineligible private data from all sources even if found in cache",
   154  			storePvtdataOfInvalidTx:        true,
   155  			skipPullingInvalidTransactions: false,
   156  			rwSetsInCache: []rwSet{
   157  				{
   158  					txID:        "tx1",
   159  					namespace:   "ns1",
   160  					collections: []string{"c1"},
   161  					preHash:     ts.preHash,
   162  					hash:        ts.hash,
   163  					seqInBlock:  1,
   164  				},
   165  			},
   166  			rwSetsInTransientStore: []rwSet{
   167  				{
   168  					txID:        "tx2",
   169  					namespace:   "ns1",
   170  					collections: []string{"c1"},
   171  					preHash:     ts.preHash,
   172  					hash:        ts.hash,
   173  					seqInBlock:  2,
   174  				},
   175  			},
   176  			rwSetsInPeer: []rwSet{
   177  				{
   178  					txID:        "tx3",
   179  					namespace:   "ns1",
   180  					collections: []string{"c1"},
   181  					preHash:     ts.preHash,
   182  					hash:        ts.hash,
   183  					seqInBlock:  3,
   184  				},
   185  			},
   186  			expectedDigKeys: []privdatacommon.DigKey{},
   187  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   188  				{
   189  					TxID:       "tx1",
   190  					Invalid:    false,
   191  					SeqInBlock: 1,
   192  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   193  						ineligiblens1c1,
   194  					},
   195  				},
   196  				{
   197  					TxID:       "tx2",
   198  					Invalid:    false,
   199  					SeqInBlock: 2,
   200  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   201  						ineligiblens1c1,
   202  					},
   203  				},
   204  				{
   205  					TxID:       "tx3",
   206  					Invalid:    false,
   207  					SeqInBlock: 3,
   208  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   209  						ineligiblens1c1,
   210  					},
   211  				},
   212  			},
   213  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   214  				PvtData: ledger.TxPvtDataMap{},
   215  				MissingPvtData: ledger.TxMissingPvtDataMap{
   216  					1: []*ledger.MissingPvtData{
   217  						{
   218  							Namespace:  "ns1",
   219  							Collection: "c1",
   220  							IsEligible: false,
   221  						},
   222  					},
   223  					2: []*ledger.MissingPvtData{
   224  						{
   225  							Namespace:  "ns1",
   226  							Collection: "c1",
   227  							IsEligible: false,
   228  						},
   229  					},
   230  					3: []*ledger.MissingPvtData{
   231  						{
   232  							Namespace:  "ns1",
   233  							Collection: "c1",
   234  							IsEligible: false,
   235  						},
   236  					},
   237  				},
   238  			},
   239  		},
   240  		{
   241  			// Scenario III
   242  			scenario:                       "Scenario III: Missing private data in cache, found in transient store",
   243  			storePvtdataOfInvalidTx:        true,
   244  			skipPullingInvalidTransactions: false,
   245  			rwSetsInCache: []rwSet{
   246  				{
   247  					txID:        "tx1",
   248  					namespace:   "ns1",
   249  					collections: []string{"c1", "c2"},
   250  					preHash:     ts.preHash,
   251  					hash:        ts.hash,
   252  					seqInBlock:  1,
   253  				},
   254  			},
   255  			rwSetsInTransientStore: []rwSet{
   256  				{
   257  					txID:        "tx2",
   258  					namespace:   "ns1",
   259  					collections: []string{"c2"},
   260  					preHash:     ts.preHash,
   261  					hash:        ts.hash,
   262  					seqInBlock:  2,
   263  				},
   264  			},
   265  			rwSetsInPeer:    []rwSet{},
   266  			expectedDigKeys: []privdatacommon.DigKey{},
   267  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   268  				{
   269  					TxID:       "tx1",
   270  					Invalid:    false,
   271  					SeqInBlock: 1,
   272  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   273  						ns1c1,
   274  						ns1c2,
   275  					},
   276  				},
   277  				{
   278  					TxID:       "tx2",
   279  					Invalid:    false,
   280  					SeqInBlock: 2,
   281  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   282  						ns1c2,
   283  					},
   284  				},
   285  			},
   286  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   287  				PvtData: ledger.TxPvtDataMap{
   288  					1: &ledger.TxPvtData{
   289  						SeqInBlock: 1,
   290  						WriteSet: &rwset.TxPvtReadWriteSet{
   291  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   292  								{
   293  									Namespace: "ns1",
   294  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   295  										preHash:     ts.preHash,
   296  										collections: []string{"c1", "c2"},
   297  									}),
   298  								},
   299  							},
   300  						},
   301  					},
   302  					2: &ledger.TxPvtData{
   303  						SeqInBlock: 2,
   304  						WriteSet: &rwset.TxPvtReadWriteSet{
   305  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   306  								{
   307  									Namespace: "ns1",
   308  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   309  										preHash:     ts.preHash,
   310  										collections: []string{"c2"},
   311  									}),
   312  								},
   313  							},
   314  						},
   315  					},
   316  				},
   317  				MissingPvtData: ledger.TxMissingPvtDataMap{},
   318  			},
   319  		},
   320  		{
   321  			// Scenario IV
   322  			scenario:                       "Scenario IV: Missing private data in cache, found some in transient store and some in peer",
   323  			storePvtdataOfInvalidTx:        true,
   324  			skipPullingInvalidTransactions: false,
   325  			rwSetsInCache: []rwSet{
   326  				{
   327  					txID:        "tx1",
   328  					namespace:   "ns1",
   329  					collections: []string{"c1", "c2"},
   330  					preHash:     ts.preHash,
   331  					hash:        ts.hash,
   332  					seqInBlock:  1,
   333  				},
   334  			},
   335  			rwSetsInTransientStore: []rwSet{
   336  				{
   337  					txID:        "tx2",
   338  					namespace:   "ns1",
   339  					collections: []string{"c1", "c2"},
   340  					preHash:     ts.preHash,
   341  					hash:        ts.hash,
   342  					seqInBlock:  2,
   343  				},
   344  			},
   345  			rwSetsInPeer: []rwSet{
   346  				{
   347  					txID:        "tx3",
   348  					namespace:   "ns1",
   349  					collections: []string{"c1", "c2"},
   350  					preHash:     ts.preHash,
   351  					hash:        ts.hash,
   352  					seqInBlock:  3,
   353  				},
   354  			},
   355  			expectedDigKeys: []privdatacommon.DigKey{
   356  				{
   357  					TxId:       "tx3",
   358  					Namespace:  "ns1",
   359  					Collection: "c1",
   360  					BlockSeq:   ts.blockNum,
   361  					SeqInBlock: 3,
   362  				},
   363  				{
   364  					TxId:       "tx3",
   365  					Namespace:  "ns1",
   366  					Collection: "c2",
   367  					BlockSeq:   ts.blockNum,
   368  					SeqInBlock: 3,
   369  				},
   370  			},
   371  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   372  				{
   373  					TxID:       "tx1",
   374  					Invalid:    false,
   375  					SeqInBlock: 1,
   376  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   377  						ns1c1,
   378  						ns1c2,
   379  					},
   380  				},
   381  				{
   382  					TxID:       "tx2",
   383  					Invalid:    false,
   384  					SeqInBlock: 2,
   385  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   386  						ns1c1,
   387  						ns1c2,
   388  					},
   389  				},
   390  				{
   391  					TxID:       "tx3",
   392  					Invalid:    false,
   393  					SeqInBlock: 3,
   394  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   395  						ns1c1,
   396  						ns1c2,
   397  					},
   398  				},
   399  			},
   400  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   401  				PvtData: ledger.TxPvtDataMap{
   402  					1: &ledger.TxPvtData{
   403  						SeqInBlock: 1,
   404  						WriteSet: &rwset.TxPvtReadWriteSet{
   405  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   406  								{
   407  									Namespace: "ns1",
   408  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   409  										preHash:     ts.preHash,
   410  										collections: []string{"c1", "c2"},
   411  									}),
   412  								},
   413  							},
   414  						},
   415  					},
   416  					2: &ledger.TxPvtData{
   417  						SeqInBlock: 2,
   418  						WriteSet: &rwset.TxPvtReadWriteSet{
   419  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   420  								{
   421  									Namespace: "ns1",
   422  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   423  										preHash:     ts.preHash,
   424  										collections: []string{"c1", "c2"},
   425  									}),
   426  								},
   427  							},
   428  						},
   429  					},
   430  					3: &ledger.TxPvtData{
   431  						SeqInBlock: 3,
   432  						WriteSet: &rwset.TxPvtReadWriteSet{
   433  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   434  								{
   435  									Namespace: "ns1",
   436  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   437  										preHash:     ts.preHash,
   438  										collections: []string{"c1", "c2"},
   439  									}),
   440  								},
   441  							},
   442  						},
   443  					},
   444  				},
   445  				MissingPvtData: ledger.TxMissingPvtDataMap{},
   446  			},
   447  		},
   448  		{
   449  			// Scenario V
   450  			scenario:                       "Scenario V: Skip invalid txs when storePvtdataOfInvalidTx is false",
   451  			storePvtdataOfInvalidTx:        false,
   452  			skipPullingInvalidTransactions: false,
   453  			rwSetsInCache: []rwSet{
   454  				{
   455  					txID:        "tx1",
   456  					namespace:   "ns1",
   457  					collections: []string{"c1"},
   458  					preHash:     ts.preHash,
   459  					hash:        ts.hash,
   460  					seqInBlock:  1,
   461  				},
   462  				{
   463  					txID:        "tx2",
   464  					namespace:   "ns1",
   465  					collections: []string{"c1"},
   466  					preHash:     ts.preHash,
   467  					hash:        ts.hash,
   468  					seqInBlock:  2,
   469  				},
   470  			},
   471  			rwSetsInTransientStore: []rwSet{},
   472  			rwSetsInPeer:           []rwSet{},
   473  			expectedDigKeys:        []privdatacommon.DigKey{},
   474  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   475  				{
   476  					TxID:       "tx1",
   477  					Invalid:    true,
   478  					SeqInBlock: 1,
   479  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   480  						ns1c1,
   481  					},
   482  				},
   483  				{
   484  					TxID:       "tx2",
   485  					Invalid:    false,
   486  					SeqInBlock: 2,
   487  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   488  						ns1c1,
   489  					},
   490  				},
   491  			},
   492  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   493  				PvtData: ledger.TxPvtDataMap{
   494  					2: &ledger.TxPvtData{
   495  						SeqInBlock: 2,
   496  						WriteSet: &rwset.TxPvtReadWriteSet{
   497  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   498  								{
   499  									Namespace: "ns1",
   500  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   501  										preHash:     ts.preHash,
   502  										collections: []string{"c1"},
   503  									}),
   504  								},
   505  							},
   506  						},
   507  					},
   508  				},
   509  				MissingPvtData: ledger.TxMissingPvtDataMap{},
   510  			},
   511  		},
   512  		{
   513  			// Scenario VI
   514  			scenario:                       "Scenario VI: Don't skip invalid txs when storePvtdataOfInvalidTx is true",
   515  			storePvtdataOfInvalidTx:        true,
   516  			skipPullingInvalidTransactions: false,
   517  			rwSetsInCache: []rwSet{
   518  				{
   519  					txID:        "tx1",
   520  					namespace:   "ns1",
   521  					collections: []string{"c1"},
   522  					preHash:     ts.preHash,
   523  					hash:        ts.hash,
   524  					seqInBlock:  1,
   525  				},
   526  				{
   527  					txID:        "tx2",
   528  					namespace:   "ns1",
   529  					collections: []string{"c1"},
   530  					preHash:     ts.preHash,
   531  					hash:        ts.hash,
   532  					seqInBlock:  2,
   533  				},
   534  			},
   535  			rwSetsInTransientStore: []rwSet{},
   536  			rwSetsInPeer:           []rwSet{},
   537  			expectedDigKeys:        []privdatacommon.DigKey{},
   538  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   539  				{
   540  					TxID:       "tx1",
   541  					Invalid:    true,
   542  					SeqInBlock: 1,
   543  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   544  						ns1c1,
   545  					},
   546  				},
   547  				{
   548  					TxID:       "tx2",
   549  					Invalid:    false,
   550  					SeqInBlock: 2,
   551  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   552  						ns1c1,
   553  					},
   554  				},
   555  			},
   556  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   557  				PvtData: ledger.TxPvtDataMap{
   558  					1: &ledger.TxPvtData{
   559  						SeqInBlock: 1,
   560  						WriteSet: &rwset.TxPvtReadWriteSet{
   561  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   562  								{
   563  									Namespace: "ns1",
   564  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   565  										preHash:     ts.preHash,
   566  										collections: []string{"c1"},
   567  									}),
   568  								},
   569  							},
   570  						},
   571  					},
   572  					2: &ledger.TxPvtData{
   573  						SeqInBlock: 2,
   574  						WriteSet: &rwset.TxPvtReadWriteSet{
   575  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   576  								{
   577  									Namespace: "ns1",
   578  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   579  										preHash:     ts.preHash,
   580  										collections: []string{"c1"},
   581  									}),
   582  								},
   583  							},
   584  						},
   585  					},
   586  				},
   587  				MissingPvtData: ledger.TxMissingPvtDataMap{},
   588  			},
   589  		},
   590  		{
   591  			// Scenario VII
   592  			scenario:                "Scenario VII: Can't find eligible tx from any source",
   593  			storePvtdataOfInvalidTx: true,
   594  			rwSetsInCache:           []rwSet{},
   595  			rwSetsInTransientStore:  []rwSet{},
   596  			rwSetsInPeer:            []rwSet{},
   597  			expectedDigKeys: []privdatacommon.DigKey{
   598  				{
   599  					TxId:       "tx1",
   600  					Namespace:  "ns1",
   601  					Collection: "c1",
   602  					BlockSeq:   ts.blockNum,
   603  					SeqInBlock: 1,
   604  				},
   605  				{
   606  					TxId:       "tx1",
   607  					Namespace:  "ns1",
   608  					Collection: "c2",
   609  					BlockSeq:   ts.blockNum,
   610  					SeqInBlock: 1,
   611  				},
   612  			},
   613  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   614  				{
   615  					TxID:       "tx1",
   616  					Invalid:    false,
   617  					SeqInBlock: 1,
   618  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   619  						ns1c1,
   620  						ns1c2,
   621  					},
   622  				},
   623  			},
   624  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   625  				PvtData: ledger.TxPvtDataMap{},
   626  				MissingPvtData: ledger.TxMissingPvtDataMap{
   627  					1: []*ledger.MissingPvtData{
   628  						{
   629  							Namespace:  "ns1",
   630  							Collection: "c1",
   631  							IsEligible: true,
   632  						},
   633  						{
   634  							Namespace:  "ns1",
   635  							Collection: "c2",
   636  							IsEligible: true,
   637  						},
   638  					},
   639  				},
   640  			},
   641  		},
   642  		{
   643  			// Scenario VIII
   644  			scenario:                       "Scenario VIII: Extra data not requested",
   645  			storePvtdataOfInvalidTx:        true,
   646  			skipPullingInvalidTransactions: false,
   647  			rwSetsInCache: []rwSet{
   648  				{
   649  					txID:        "tx1",
   650  					namespace:   "ns1",
   651  					collections: []string{"c1", "c2"},
   652  					preHash:     ts.preHash,
   653  					hash:        ts.hash,
   654  					seqInBlock:  1,
   655  				},
   656  			},
   657  			rwSetsInTransientStore: []rwSet{
   658  				{
   659  					txID:        "tx2",
   660  					namespace:   "ns1",
   661  					collections: []string{"c1", "c2"},
   662  					preHash:     ts.preHash,
   663  					hash:        ts.hash,
   664  					seqInBlock:  2,
   665  				},
   666  			},
   667  			rwSetsInPeer: []rwSet{
   668  				{
   669  					txID:        "tx3",
   670  					namespace:   "ns1",
   671  					collections: []string{"c1", "c2"},
   672  					preHash:     ts.preHash,
   673  					hash:        ts.hash,
   674  					seqInBlock:  3,
   675  				},
   676  			},
   677  			expectedDigKeys: []privdatacommon.DigKey{
   678  				{
   679  					TxId:       "tx3",
   680  					Namespace:  "ns1",
   681  					Collection: "c1",
   682  					BlockSeq:   ts.blockNum,
   683  					SeqInBlock: 3,
   684  				},
   685  			},
   686  			// Only requesting tx3, ns1, c1, should skip all extra data found in all sources
   687  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   688  				{
   689  					TxID:       "tx3",
   690  					Invalid:    false,
   691  					SeqInBlock: 3,
   692  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   693  						ns1c1,
   694  					},
   695  				},
   696  			},
   697  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   698  				PvtData: ledger.TxPvtDataMap{
   699  					3: &ledger.TxPvtData{
   700  						SeqInBlock: 3,
   701  						WriteSet: &rwset.TxPvtReadWriteSet{
   702  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   703  								{
   704  									Namespace: "ns1",
   705  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   706  										preHash:     ts.preHash,
   707  										collections: []string{"c1"},
   708  									}),
   709  								},
   710  							},
   711  						},
   712  					},
   713  				},
   714  				MissingPvtData: ledger.TxMissingPvtDataMap{},
   715  			},
   716  		},
   717  		{
   718  			// Scenario IX
   719  			scenario:                       "Scenario IX: Skip pulling invalid txs when skipPullingInvalidTransactions is true",
   720  			storePvtdataOfInvalidTx:        true,
   721  			skipPullingInvalidTransactions: true,
   722  			rwSetsInCache: []rwSet{
   723  				{
   724  					txID:        "tx1",
   725  					namespace:   "ns1",
   726  					collections: []string{"c1"},
   727  					preHash:     ts.preHash,
   728  					hash:        ts.hash,
   729  					seqInBlock:  1,
   730  				},
   731  			},
   732  			rwSetsInTransientStore: []rwSet{
   733  				{
   734  					txID:        "tx2",
   735  					namespace:   "ns1",
   736  					collections: []string{"c1"},
   737  					preHash:     ts.preHash,
   738  					hash:        ts.hash,
   739  					seqInBlock:  2,
   740  				},
   741  			},
   742  			rwSetsInPeer: []rwSet{
   743  				{
   744  					txID:        "tx3",
   745  					namespace:   "ns1",
   746  					collections: []string{"c1"},
   747  					preHash:     ts.preHash,
   748  					hash:        ts.hash,
   749  					seqInBlock:  2,
   750  				},
   751  			},
   752  			expectedDigKeys: []privdatacommon.DigKey{},
   753  			pvtdataToRetrieve: []*ledger.TxPvtdataInfo{
   754  				{
   755  					TxID:       "tx1",
   756  					Invalid:    true,
   757  					SeqInBlock: 1,
   758  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   759  						ns1c1,
   760  					},
   761  				},
   762  				{
   763  					TxID:       "tx2",
   764  					Invalid:    true,
   765  					SeqInBlock: 2,
   766  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   767  						ns1c1,
   768  					},
   769  				},
   770  				{
   771  					TxID:       "tx3",
   772  					Invalid:    true,
   773  					SeqInBlock: 3,
   774  					CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   775  						ns1c1,
   776  					},
   777  				},
   778  			},
   779  			// tx1 and tx2 are still fetched despite being invalid
   780  			expectedBlockPvtdata: &ledger.BlockPvtdata{
   781  				PvtData: ledger.TxPvtDataMap{
   782  					1: &ledger.TxPvtData{
   783  						SeqInBlock: 1,
   784  						WriteSet: &rwset.TxPvtReadWriteSet{
   785  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   786  								{
   787  									Namespace: "ns1",
   788  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   789  										preHash:     ts.preHash,
   790  										collections: []string{"c1"},
   791  									}),
   792  								},
   793  							},
   794  						},
   795  					},
   796  					2: &ledger.TxPvtData{
   797  						SeqInBlock: 2,
   798  						WriteSet: &rwset.TxPvtReadWriteSet{
   799  							NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   800  								{
   801  									Namespace: "ns1",
   802  									CollectionPvtRwset: getCollectionPvtReadWriteSet(rwSet{
   803  										preHash:     ts.preHash,
   804  										collections: []string{"c1"},
   805  									}),
   806  								},
   807  							},
   808  						},
   809  					},
   810  				},
   811  				// Only tx3 is missing since we skip pulling invalid tx from peers
   812  				MissingPvtData: ledger.TxMissingPvtDataMap{
   813  					3: []*ledger.MissingPvtData{
   814  						{
   815  							Namespace:  "ns1",
   816  							Collection: "c1",
   817  							IsEligible: true,
   818  						},
   819  					},
   820  				},
   821  			},
   822  		},
   823  	}
   824  
   825  	for _, test := range tests {
   826  		t.Run(test.scenario, func(t *testing.T) {
   827  			testRetrievePvtdataSuccess(t, test.scenario, ts, test.storePvtdataOfInvalidTx, test.skipPullingInvalidTransactions,
   828  				test.rwSetsInCache, test.rwSetsInTransientStore, test.rwSetsInPeer, test.expectedDigKeys, test.pvtdataToRetrieve, test.expectedBlockPvtdata)
   829  		})
   830  	}
   831  }
   832  
   833  func TestRetrievePvtdataFailure(t *testing.T) {
   834  	err := msptesttools.LoadMSPSetupForTesting()
   835  	require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err))
   836  
   837  	identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault())
   838  	serializedID, err := identity.Serialize()
   839  	require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err))
   840  	data := []byte{1, 2, 3}
   841  	signature, err := identity.Sign(data)
   842  	require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err))
   843  	peerSelfSignedData := protoutil.SignedData{
   844  		Identity:  serializedID,
   845  		Signature: signature,
   846  		Data:      data,
   847  	}
   848  	endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{
   849  		Mspid:   identity.GetMSPIdentifier(),
   850  		IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())),
   851  	})
   852  
   853  	ts := testSupport{
   854  		preHash:            []byte("rws-pre-image"),
   855  		hash:               util2.ComputeSHA256([]byte("rws-pre-image")),
   856  		channelID:          "testchannelid",
   857  		blockNum:           uint64(1),
   858  		endorsers:          []string{identity.GetMSPIdentifier()},
   859  		peerSelfSignedData: peerSelfSignedData,
   860  	}
   861  
   862  	invalidns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
   863  	invalidns1c1.CollectionConfig.MemberOrgsPolicy = nil
   864  
   865  	scenario := "Scenario I: Invalid collection config policy"
   866  	storePvtdataOfInvalidTx := true
   867  	skipPullingInvalidTransactions := false
   868  	rwSetsInCache := []rwSet{}
   869  	rwSetsInTransientStore := []rwSet{}
   870  	rwSetsInPeer := []rwSet{}
   871  	expectedDigKeys := []privdatacommon.DigKey{}
   872  	pvtdataToRetrieve := []*ledger.TxPvtdataInfo{
   873  		{
   874  			TxID:       "tx1",
   875  			Invalid:    false,
   876  			SeqInBlock: 1,
   877  			CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   878  				invalidns1c1,
   879  			},
   880  		},
   881  	}
   882  
   883  	expectedErr := "Collection config policy is nil"
   884  
   885  	testRetrievePvtdataFailure(t, scenario, ts,
   886  		peerSelfSignedData, storePvtdataOfInvalidTx, skipPullingInvalidTransactions,
   887  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer,
   888  		expectedDigKeys, pvtdataToRetrieve,
   889  		expectedErr)
   890  }
   891  
   892  func TestRetryFetchFromPeer(t *testing.T) {
   893  	err := msptesttools.LoadMSPSetupForTesting()
   894  	require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err))
   895  
   896  	identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault())
   897  	serializedID, err := identity.Serialize()
   898  	require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err))
   899  	data := []byte{1, 2, 3}
   900  	signature, err := identity.Sign(data)
   901  	require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err))
   902  	peerSelfSignedData := protoutil.SignedData{
   903  		Identity:  serializedID,
   904  		Signature: signature,
   905  		Data:      data,
   906  	}
   907  	endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{
   908  		Mspid:   identity.GetMSPIdentifier(),
   909  		IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())),
   910  	})
   911  
   912  	ts := testSupport{
   913  		preHash:            []byte("rws-pre-image"),
   914  		hash:               util2.ComputeSHA256([]byte("rws-pre-image")),
   915  		channelID:          "testchannelid",
   916  		blockNum:           uint64(1),
   917  		endorsers:          []string{identity.GetMSPIdentifier()},
   918  		peerSelfSignedData: peerSelfSignedData,
   919  	}
   920  
   921  	ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
   922  	ns1c2 := collectionPvtdataInfoFromTemplate("ns1", "c2", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
   923  
   924  	tempdir, err := ioutil.TempDir("", "ts")
   925  	require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err))
   926  	storeProvider, err := transientstore.NewStoreProvider(tempdir)
   927  	require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err))
   928  	store, err := storeProvider.OpenStore(ts.channelID)
   929  	require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err))
   930  
   931  	defer storeProvider.Close()
   932  	defer os.RemoveAll(tempdir)
   933  
   934  	storePvtdataOfInvalidTx := true
   935  	skipPullingInvalidTransactions := false
   936  	rwSetsInCache := []rwSet{}
   937  	rwSetsInTransientStore := []rwSet{}
   938  	rwSetsInPeer := []rwSet{}
   939  	expectedDigKeys := []privdatacommon.DigKey{
   940  		{
   941  			TxId:       "tx1",
   942  			Namespace:  "ns1",
   943  			Collection: "c1",
   944  			BlockSeq:   ts.blockNum,
   945  			SeqInBlock: 1,
   946  		},
   947  		{
   948  			TxId:       "tx1",
   949  			Namespace:  "ns1",
   950  			Collection: "c2",
   951  			BlockSeq:   ts.blockNum,
   952  			SeqInBlock: 1,
   953  		},
   954  	}
   955  	pvtdataToRetrieve := []*ledger.TxPvtdataInfo{
   956  		{
   957  			TxID:       "tx1",
   958  			Invalid:    false,
   959  			SeqInBlock: 1,
   960  			CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
   961  				ns1c1,
   962  				ns1c2,
   963  			},
   964  		},
   965  	}
   966  	pdp := setupPrivateDataProvider(t, ts, testConfig,
   967  		storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store,
   968  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer,
   969  		expectedDigKeys)
   970  	require.NotNil(t, pdp)
   971  
   972  	fakeSleeper := &mocks.Sleeper{}
   973  	SetSleeper(pdp, fakeSleeper)
   974  	fakeSleeper.SleepStub = func(sleepDur time.Duration) {
   975  		time.Sleep(sleepDur)
   976  	}
   977  
   978  	_, err = pdp.RetrievePvtdata(pvtdataToRetrieve)
   979  	assert.NoError(t, err)
   980  	var maxRetries int
   981  
   982  	maxRetries = int(testConfig.PullRetryThreshold / pullRetrySleepInterval)
   983  	assert.Equal(t, fakeSleeper.SleepCallCount() <= maxRetries, true)
   984  	assert.Equal(t, fakeSleeper.SleepArgsForCall(0), pullRetrySleepInterval)
   985  }
   986  
   987  func TestSkipPullingAllInvalidTransactions(t *testing.T) {
   988  	err := msptesttools.LoadMSPSetupForTesting()
   989  	require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err))
   990  
   991  	identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault())
   992  	serializedID, err := identity.Serialize()
   993  	require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err))
   994  	data := []byte{1, 2, 3}
   995  	signature, err := identity.Sign(data)
   996  	require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err))
   997  	peerSelfSignedData := protoutil.SignedData{
   998  		Identity:  serializedID,
   999  		Signature: signature,
  1000  		Data:      data,
  1001  	}
  1002  	endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{
  1003  		Mspid:   identity.GetMSPIdentifier(),
  1004  		IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())),
  1005  	})
  1006  
  1007  	ts := testSupport{
  1008  		preHash:            []byte("rws-pre-image"),
  1009  		hash:               util2.ComputeSHA256([]byte("rws-pre-image")),
  1010  		channelID:          "testchannelid",
  1011  		blockNum:           uint64(1),
  1012  		endorsers:          []string{identity.GetMSPIdentifier()},
  1013  		peerSelfSignedData: peerSelfSignedData,
  1014  	}
  1015  
  1016  	ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
  1017  	ns1c2 := collectionPvtdataInfoFromTemplate("ns1", "c2", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
  1018  
  1019  	tempdir, err := ioutil.TempDir("", "ts")
  1020  	require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err))
  1021  	storeProvider, err := transientstore.NewStoreProvider(tempdir)
  1022  	require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err))
  1023  	store, err := storeProvider.OpenStore(ts.channelID)
  1024  	require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err))
  1025  
  1026  	defer storeProvider.Close()
  1027  	defer os.RemoveAll(tempdir)
  1028  
  1029  	storePvtdataOfInvalidTx := true
  1030  	skipPullingInvalidTransactions := true
  1031  	rwSetsInCache := []rwSet{}
  1032  	rwSetsInTransientStore := []rwSet{}
  1033  	rwSetsInPeer := []rwSet{}
  1034  	expectedDigKeys := []privdatacommon.DigKey{}
  1035  	expectedBlockPvtdata := &ledger.BlockPvtdata{
  1036  		PvtData: ledger.TxPvtDataMap{},
  1037  		MissingPvtData: ledger.TxMissingPvtDataMap{
  1038  			1: []*ledger.MissingPvtData{
  1039  				{
  1040  					Namespace:  "ns1",
  1041  					Collection: "c1",
  1042  					IsEligible: true,
  1043  				},
  1044  				{
  1045  					Namespace:  "ns1",
  1046  					Collection: "c2",
  1047  					IsEligible: true,
  1048  				},
  1049  			},
  1050  		},
  1051  	}
  1052  	pvtdataToRetrieve := []*ledger.TxPvtdataInfo{
  1053  		{
  1054  			TxID:       "tx1",
  1055  			Invalid:    true,
  1056  			SeqInBlock: 1,
  1057  			CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
  1058  				ns1c1,
  1059  				ns1c2,
  1060  			},
  1061  		},
  1062  	}
  1063  	pdp := setupPrivateDataProvider(t, ts, testConfig,
  1064  		storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store,
  1065  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer,
  1066  		expectedDigKeys)
  1067  	require.NotNil(t, pdp)
  1068  
  1069  	fakeSleeper := &mocks.Sleeper{}
  1070  	SetSleeper(pdp, fakeSleeper)
  1071  	newFetcher := &fetcherMock{t: t}
  1072  	pdp.fetcher = newFetcher
  1073  
  1074  	retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve)
  1075  	assert.NoError(t, err)
  1076  
  1077  	blockPvtdata := sortBlockPvtdata(retrievedPvtdata.GetBlockPvtdata())
  1078  	assert.Equal(t, expectedBlockPvtdata, blockPvtdata)
  1079  
  1080  	// Check sleep and fetch were never called
  1081  	assert.Equal(t, fakeSleeper.SleepCallCount(), 0)
  1082  	assert.Len(t, newFetcher.Calls, 0)
  1083  }
  1084  
  1085  func TestRetrievedPvtdataPurgeBelowHeight(t *testing.T) {
  1086  	conf := testConfig
  1087  	conf.TransientBlockRetention = 5
  1088  
  1089  	err := msptesttools.LoadMSPSetupForTesting()
  1090  	require.NoError(t, err, fmt.Sprintf("Failed to setup local msp for testing, got err %s", err))
  1091  
  1092  	identity := mspmgmt.GetLocalSigningIdentityOrPanic(factory.GetDefault())
  1093  	serializedID, err := identity.Serialize()
  1094  	require.NoError(t, err, fmt.Sprintf("Serialize should have succeeded, got err %s", err))
  1095  	data := []byte{1, 2, 3}
  1096  	signature, err := identity.Sign(data)
  1097  	require.NoError(t, err, fmt.Sprintf("Could not sign identity, got err %s", err))
  1098  	peerSelfSignedData := protoutil.SignedData{
  1099  		Identity:  serializedID,
  1100  		Signature: signature,
  1101  		Data:      data,
  1102  	}
  1103  	endorser := protoutil.MarshalOrPanic(&mspproto.SerializedIdentity{
  1104  		Mspid:   identity.GetMSPIdentifier(),
  1105  		IdBytes: []byte(fmt.Sprintf("p0%s", identity.GetMSPIdentifier())),
  1106  	})
  1107  
  1108  	ts := testSupport{
  1109  		preHash:            []byte("rws-pre-image"),
  1110  		hash:               util2.ComputeSHA256([]byte("rws-pre-image")),
  1111  		channelID:          "testchannelid",
  1112  		blockNum:           uint64(9),
  1113  		endorsers:          []string{identity.GetMSPIdentifier()},
  1114  		peerSelfSignedData: peerSelfSignedData,
  1115  	}
  1116  
  1117  	ns1c1 := collectionPvtdataInfoFromTemplate("ns1", "c1", identity.GetMSPIdentifier(), ts.hash, endorser, signature)
  1118  
  1119  	tempdir, err := ioutil.TempDir("", "ts")
  1120  	require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err))
  1121  	storeProvider, err := transientstore.NewStoreProvider(tempdir)
  1122  	require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err))
  1123  	store, err := storeProvider.OpenStore(ts.channelID)
  1124  	require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err))
  1125  
  1126  	defer storeProvider.Close()
  1127  	defer os.RemoveAll(tempdir)
  1128  
  1129  	// set up store with 9 existing private data write sets
  1130  	for i := 0; i < 9; i++ {
  1131  		txID := fmt.Sprintf("tx%d", i+1)
  1132  		store.Persist(txID, uint64(i), &tspb.TxPvtReadWriteSetWithConfigInfo{
  1133  			PvtRwset: &rwset.TxPvtReadWriteSet{
  1134  				NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1135  					{
  1136  						Namespace: "ns1",
  1137  						CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1138  							{
  1139  								CollectionName: "c1",
  1140  								Rwset:          []byte("rws-pre-image"),
  1141  							},
  1142  						},
  1143  					},
  1144  				},
  1145  			},
  1146  			CollectionConfigs: make(map[string]*peer.CollectionConfigPackage),
  1147  		})
  1148  	}
  1149  
  1150  	// test that the initial data shows up in the store
  1151  	for i := 1; i < 9; i++ {
  1152  		func() {
  1153  			txID := fmt.Sprintf("tx%d", i)
  1154  			iterator, err := store.GetTxPvtRWSetByTxid(txID, nil)
  1155  			defer iterator.Close()
  1156  			require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err))
  1157  			res, err := iterator.Next()
  1158  			require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err))
  1159  			assert.NotNil(t, res)
  1160  		}()
  1161  	}
  1162  
  1163  	storePvtdataOfInvalidTx := true
  1164  	skipPullingInvalidTransactions := false
  1165  	rwSetsInCache := []rwSet{
  1166  		{
  1167  			txID:        "tx9",
  1168  			namespace:   "ns1",
  1169  			collections: []string{"c1"},
  1170  			preHash:     ts.preHash,
  1171  			hash:        ts.hash,
  1172  			seqInBlock:  1,
  1173  		},
  1174  	}
  1175  	rwSetsInTransientStore := []rwSet{}
  1176  	rwSetsInPeer := []rwSet{}
  1177  	expectedDigKeys := []privdatacommon.DigKey{}
  1178  	// request tx9 which is found in both the cache and transient store
  1179  	pvtdataToRetrieve := []*ledger.TxPvtdataInfo{
  1180  		{
  1181  			TxID:       "tx9",
  1182  			Invalid:    false,
  1183  			SeqInBlock: 1,
  1184  			CollectionPvtdataInfo: []*ledger.CollectionPvtdataInfo{
  1185  				ns1c1,
  1186  			},
  1187  		},
  1188  	}
  1189  	pdp := setupPrivateDataProvider(t, ts, conf,
  1190  		storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store,
  1191  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer, expectedDigKeys)
  1192  	require.NotNil(t, pdp)
  1193  
  1194  	retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve)
  1195  	require.NoError(t, err)
  1196  
  1197  	retrievedPvtdata.Purge()
  1198  
  1199  	for i := 1; i <= 9; i++ {
  1200  		func() {
  1201  			txID := fmt.Sprintf("tx%d", i)
  1202  			iterator, err := store.GetTxPvtRWSetByTxid(txID, nil)
  1203  			defer iterator.Close()
  1204  			require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err))
  1205  			res, err := iterator.Next()
  1206  			require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err))
  1207  			// Check that only the fetched private write set was purged because we haven't reached a blockNum that's a multiple of 5 yet
  1208  			if i == 9 {
  1209  				assert.Nil(t, res)
  1210  			} else {
  1211  				assert.NotNil(t, res)
  1212  			}
  1213  		}()
  1214  	}
  1215  
  1216  	// increment blockNum to a multiple of transientBlockRetention
  1217  	pdp.blockNum = 10
  1218  	retrievedPvtdata, err = pdp.RetrievePvtdata(pvtdataToRetrieve)
  1219  	require.NoError(t, err)
  1220  
  1221  	retrievedPvtdata.Purge()
  1222  
  1223  	for i := 1; i <= 9; i++ {
  1224  		func() {
  1225  			txID := fmt.Sprintf("tx%d", i)
  1226  			iterator, err := store.GetTxPvtRWSetByTxid(txID, nil)
  1227  			defer iterator.Close()
  1228  			require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err))
  1229  			res, err := iterator.Next()
  1230  			require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err))
  1231  			// Check that the first 5 sets have been purged alongside the 9th set purged earlier
  1232  			if i < 6 || i == 9 {
  1233  				assert.Nil(t, res)
  1234  			} else {
  1235  				assert.NotNil(t, res)
  1236  			}
  1237  		}()
  1238  	}
  1239  }
  1240  
  1241  func testRetrievePvtdataSuccess(t *testing.T,
  1242  	scenario string,
  1243  	ts testSupport,
  1244  	storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool,
  1245  	rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet,
  1246  	expectedDigKeys []privdatacommon.DigKey,
  1247  	pvtdataToRetrieve []*ledger.TxPvtdataInfo,
  1248  	expectedBlockPvtdata *ledger.BlockPvtdata) {
  1249  
  1250  	fmt.Println("\n" + scenario)
  1251  
  1252  	tempdir, err := ioutil.TempDir("", "ts")
  1253  	require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err))
  1254  	storeProvider, err := transientstore.NewStoreProvider(tempdir)
  1255  	require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err))
  1256  	store, err := storeProvider.OpenStore(ts.channelID)
  1257  	require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err))
  1258  	defer storeProvider.Close()
  1259  	defer os.RemoveAll(tempdir)
  1260  
  1261  	pdp := setupPrivateDataProvider(t, ts, testConfig,
  1262  		storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store,
  1263  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer,
  1264  		expectedDigKeys)
  1265  	require.NotNil(t, pdp, scenario)
  1266  
  1267  	retrievedPvtdata, err := pdp.RetrievePvtdata(pvtdataToRetrieve)
  1268  	assert.NoError(t, err, scenario)
  1269  
  1270  	// sometimes the collection private write sets are added out of order
  1271  	// so we need to sort it to check equality with expected
  1272  	blockPvtdata := sortBlockPvtdata(retrievedPvtdata.GetBlockPvtdata())
  1273  	assert.Equal(t, expectedBlockPvtdata, blockPvtdata, scenario)
  1274  
  1275  	// Test pvtdata is purged from store on Done() call
  1276  	testPurged(t, scenario, retrievedPvtdata, store, pvtdataToRetrieve)
  1277  }
  1278  
  1279  func testRetrievePvtdataFailure(t *testing.T,
  1280  	scenario string,
  1281  	ts testSupport,
  1282  	peerSelfSignedData protoutil.SignedData,
  1283  	storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool,
  1284  	rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet,
  1285  	expectedDigKeys []privdatacommon.DigKey,
  1286  	pvtdataToRetrieve []*ledger.TxPvtdataInfo,
  1287  	expectedErr string) {
  1288  
  1289  	fmt.Println("\n" + scenario)
  1290  
  1291  	tempdir, err := ioutil.TempDir("", "ts")
  1292  	require.NoError(t, err, fmt.Sprintf("Failed to create test directory, got err %s", err))
  1293  	storeProvider, err := transientstore.NewStoreProvider(tempdir)
  1294  	require.NoError(t, err, fmt.Sprintf("Failed to create store provider, got err %s", err))
  1295  	store, err := storeProvider.OpenStore(ts.channelID)
  1296  	require.NoError(t, err, fmt.Sprintf("Failed to open store, got err %s", err))
  1297  	defer storeProvider.Close()
  1298  	defer os.RemoveAll(tempdir)
  1299  
  1300  	pdp := setupPrivateDataProvider(t, ts, testConfig,
  1301  		storePvtdataOfInvalidTx, skipPullingInvalidTransactions, store,
  1302  		rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer,
  1303  		expectedDigKeys)
  1304  	require.NotNil(t, pdp, scenario)
  1305  
  1306  	_, err = pdp.RetrievePvtdata(pvtdataToRetrieve)
  1307  	assert.EqualError(t, err, expectedErr, scenario)
  1308  }
  1309  
  1310  func setupPrivateDataProvider(t *testing.T,
  1311  	ts testSupport,
  1312  	config CoordinatorConfig,
  1313  	storePvtdataOfInvalidTx, skipPullingInvalidTransactions bool, store *transientstore.Store,
  1314  	rwSetsInCache, rwSetsInTransientStore, rwSetsInPeer []rwSet,
  1315  	expectedDigKeys []privdatacommon.DigKey) *PvtdataProvider {
  1316  
  1317  	metrics := metrics.NewGossipMetrics(&disabled.Provider{}).PrivdataMetrics
  1318  
  1319  	idDeserializerFactory := IdentityDeserializerFactoryFunc(func(chainID string) msp.IdentityDeserializer {
  1320  		return mspmgmt.GetManagerForChain(ts.channelID)
  1321  	})
  1322  
  1323  	// set up data in cache
  1324  	prefetchedPvtdata := storePvtdataInCache(rwSetsInCache)
  1325  	// set up data in transient store
  1326  	err := storePvtdataInTransientStore(rwSetsInTransientStore, store)
  1327  	require.NoError(t, err, fmt.Sprintf("Failed to store private data in transient store: got err %s", err))
  1328  
  1329  	// set up data in peer
  1330  	fetcher := &fetcherMock{t: t}
  1331  	storePvtdataInPeer(rwSetsInPeer, expectedDigKeys, fetcher, ts, skipPullingInvalidTransactions)
  1332  
  1333  	pdp := &PvtdataProvider{
  1334  		mspID:                                   "Org1MSP",
  1335  		selfSignedData:                          ts.peerSelfSignedData,
  1336  		logger:                                  logger,
  1337  		listMissingPrivateDataDurationHistogram: metrics.ListMissingPrivateDataDuration.With("channel", ts.channelID),
  1338  		fetchDurationHistogram:                  metrics.FetchDuration.With("channel", ts.channelID),
  1339  		purgeDurationHistogram:                  metrics.PurgeDuration.With("channel", ts.channelID),
  1340  		transientStore:                          store,
  1341  		pullRetryThreshold:                      config.PullRetryThreshold,
  1342  		prefetchedPvtdata:                       prefetchedPvtdata,
  1343  		transientBlockRetention:                 config.TransientBlockRetention,
  1344  		channelID:                               ts.channelID,
  1345  		blockNum:                                ts.blockNum,
  1346  		storePvtdataOfInvalidTx:                 storePvtdataOfInvalidTx,
  1347  		skipPullingInvalidTransactions:          skipPullingInvalidTransactions,
  1348  		fetcher:                                 fetcher,
  1349  		idDeserializerFactory:                   idDeserializerFactory,
  1350  	}
  1351  
  1352  	return pdp
  1353  }
  1354  
  1355  func testPurged(t *testing.T,
  1356  	scenario string,
  1357  	retrievedPvtdata ledger.RetrievedPvtdata,
  1358  	store *transientstore.Store,
  1359  	txPvtdataInfo []*ledger.TxPvtdataInfo) {
  1360  
  1361  	retrievedPvtdata.Purge()
  1362  	for _, pvtdata := range retrievedPvtdata.GetBlockPvtdata().PvtData {
  1363  		func() {
  1364  			txID := getTxIDBySeqInBlock(pvtdata.SeqInBlock, txPvtdataInfo)
  1365  			require.NotEqual(t, txID, "", fmt.Sprintf("Could not find txID for SeqInBlock %d", pvtdata.SeqInBlock), scenario)
  1366  
  1367  			iterator, err := store.GetTxPvtRWSetByTxid(txID, nil)
  1368  			defer iterator.Close()
  1369  			require.NoError(t, err, fmt.Sprintf("Failed obtaining iterator from transient store, got err %s", err))
  1370  
  1371  			res, err := iterator.Next()
  1372  			require.NoError(t, err, fmt.Sprintf("Failed iterating, got err %s", err))
  1373  
  1374  			assert.Nil(t, res, scenario)
  1375  		}()
  1376  	}
  1377  }
  1378  
  1379  func storePvtdataInCache(rwsets []rwSet) util.PvtDataCollections {
  1380  	res := []*ledger.TxPvtData{}
  1381  	for _, rws := range rwsets {
  1382  		set := &rwset.TxPvtReadWriteSet{
  1383  			NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1384  				{
  1385  					Namespace:          rws.namespace,
  1386  					CollectionPvtRwset: getCollectionPvtReadWriteSet(rws),
  1387  				},
  1388  			},
  1389  		}
  1390  
  1391  		res = append(res, &ledger.TxPvtData{
  1392  			SeqInBlock: rws.seqInBlock,
  1393  			WriteSet:   set,
  1394  		})
  1395  	}
  1396  
  1397  	return res
  1398  }
  1399  
  1400  func storePvtdataInTransientStore(rwsets []rwSet, store *transientstore.Store) error {
  1401  	for _, rws := range rwsets {
  1402  		set := &tspb.TxPvtReadWriteSetWithConfigInfo{
  1403  			PvtRwset: &rwset.TxPvtReadWriteSet{
  1404  				NsPvtRwset: []*rwset.NsPvtReadWriteSet{
  1405  					{
  1406  						Namespace:          rws.namespace,
  1407  						CollectionPvtRwset: getCollectionPvtReadWriteSet(rws),
  1408  					},
  1409  				},
  1410  			},
  1411  			CollectionConfigs: make(map[string]*peer.CollectionConfigPackage),
  1412  		}
  1413  
  1414  		err := store.Persist(rws.txID, 1, set)
  1415  		if err != nil {
  1416  			return err
  1417  		}
  1418  	}
  1419  	return nil
  1420  }
  1421  
  1422  func storePvtdataInPeer(rwSets []rwSet, expectedDigKeys []privdatacommon.DigKey, fetcher *fetcherMock, ts testSupport, skipPullingInvalidTransactions bool) {
  1423  	availableElements := []*proto.PvtDataElement{}
  1424  	for _, rws := range rwSets {
  1425  		for _, c := range rws.collections {
  1426  			availableElements = append(availableElements, &proto.PvtDataElement{
  1427  				Digest: &proto.PvtDataDigest{
  1428  					TxId:       rws.txID,
  1429  					Namespace:  rws.namespace,
  1430  					Collection: c,
  1431  					BlockSeq:   ts.blockNum,
  1432  					SeqInBlock: rws.seqInBlock,
  1433  				},
  1434  				Payload: [][]byte{ts.preHash},
  1435  			})
  1436  		}
  1437  	}
  1438  
  1439  	endorsers := []string{}
  1440  	if len(expectedDigKeys) > 0 {
  1441  		endorsers = ts.endorsers
  1442  	}
  1443  	fetcher.On("fetch", mock.Anything).expectingDigests(expectedDigKeys).expectingEndorsers(endorsers...).Return(&privdatacommon.FetchedPvtDataContainer{
  1444  		AvailableElements: availableElements,
  1445  	}, nil)
  1446  }
  1447  
  1448  func getCollectionPvtReadWriteSet(rws rwSet) []*rwset.CollectionPvtReadWriteSet {
  1449  	colPvtRwSet := []*rwset.CollectionPvtReadWriteSet{}
  1450  	for _, c := range rws.collections {
  1451  		colPvtRwSet = append(colPvtRwSet, &rwset.CollectionPvtReadWriteSet{
  1452  			CollectionName: c,
  1453  			Rwset:          rws.preHash,
  1454  		})
  1455  	}
  1456  
  1457  	sort.Slice(colPvtRwSet, func(i, j int) bool {
  1458  		return colPvtRwSet[i].CollectionName < colPvtRwSet[j].CollectionName
  1459  	})
  1460  
  1461  	return colPvtRwSet
  1462  }
  1463  
  1464  func sortBlockPvtdata(blockPvtdata *ledger.BlockPvtdata) *ledger.BlockPvtdata {
  1465  	for _, pvtdata := range blockPvtdata.PvtData {
  1466  		for _, ws := range pvtdata.WriteSet.NsPvtRwset {
  1467  			sort.Slice(ws.CollectionPvtRwset, func(i, j int) bool {
  1468  				return ws.CollectionPvtRwset[i].CollectionName < ws.CollectionPvtRwset[j].CollectionName
  1469  			})
  1470  		}
  1471  	}
  1472  	for _, missingPvtdata := range blockPvtdata.MissingPvtData {
  1473  		sort.Slice(missingPvtdata, func(i, j int) bool {
  1474  			return missingPvtdata[i].Collection < missingPvtdata[j].Collection
  1475  		})
  1476  	}
  1477  	return blockPvtdata
  1478  }
  1479  
  1480  func collectionPvtdataInfoFromTemplate(namespace, collection, mspIdentifier string, hash, endorser, signature []byte) *ledger.CollectionPvtdataInfo {
  1481  	return &ledger.CollectionPvtdataInfo{
  1482  		Collection:   collection,
  1483  		Namespace:    namespace,
  1484  		ExpectedHash: hash,
  1485  		Endorsers: []*peer.Endorsement{
  1486  			{
  1487  				Endorser:  endorser,
  1488  				Signature: signature,
  1489  			},
  1490  		},
  1491  		CollectionConfig: &peer.StaticCollectionConfig{
  1492  			Name:           collection,
  1493  			MemberOnlyRead: true,
  1494  			MemberOrgsPolicy: &peer.CollectionPolicyConfig{
  1495  				Payload: &peer.CollectionPolicyConfig_SignaturePolicy{
  1496  					SignaturePolicy: &common.SignaturePolicyEnvelope{
  1497  						Rule: &common.SignaturePolicy{
  1498  							Type: &common.SignaturePolicy_SignedBy{
  1499  								SignedBy: 0,
  1500  							},
  1501  						},
  1502  						Identities: []*mspproto.MSPPrincipal{
  1503  							{
  1504  								PrincipalClassification: mspproto.MSPPrincipal_ROLE,
  1505  								Principal: protoutil.MarshalOrPanic(&mspproto.MSPRole{
  1506  									MspIdentifier: mspIdentifier,
  1507  									Role:          mspproto.MSPRole_MEMBER,
  1508  								}),
  1509  							},
  1510  						},
  1511  					},
  1512  				},
  1513  			},
  1514  		},
  1515  	}
  1516  }