github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/core/ledger/kvledger/kv_ledger_test.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package kvledger
     8  
     9  import (
    10  	"os"
    11  	"testing"
    12  
    13  	"github.com/golang/protobuf/proto"
    14  	"github.com/hechain20/hechain/common/flogging"
    15  	"github.com/hechain20/hechain/common/ledger/testutil"
    16  	"github.com/hechain20/hechain/common/util"
    17  	"github.com/hechain20/hechain/core/ledger"
    18  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/validation"
    19  	"github.com/hechain20/hechain/core/ledger/mock"
    20  	"github.com/hechain20/hechain/core/ledger/pvtdatapolicy"
    21  	btltestutil "github.com/hechain20/hechain/core/ledger/pvtdatapolicy/testutil"
    22  	"github.com/hechain20/hechain/internal/pkg/txflags"
    23  	"github.com/hechain20/hechain/protoutil"
    24  	"github.com/hyperledger/fabric-protos-go/common"
    25  	"github.com/hyperledger/fabric-protos-go/ledger/queryresult"
    26  	"github.com/hyperledger/fabric-protos-go/ledger/rwset"
    27  	"github.com/hyperledger/fabric-protos-go/peer"
    28  	"github.com/pkg/errors"
    29  	"github.com/stretchr/testify/require"
    30  )
    31  
    32  var (
    33  	couchDBAddress  string
    34  	stopCouchDBFunc func()
    35  )
    36  
    37  func TestMain(m *testing.M) {
    38  	flogging.ActivateSpec("lockbasedtxmgr,statevalidator,valimpl,confighistory,pvtstatepurgemgmt=debug")
    39  	exitCode := m.Run()
    40  	if couchDBAddress != "" {
    41  		couchDBAddress = ""
    42  		stopCouchDBFunc()
    43  	}
    44  	os.Exit(exitCode)
    45  }
    46  
    47  func TestKVLedgerNilHistoryDBProvider(t *testing.T) {
    48  	kvl := &kvLedger{}
    49  	qe, err := kvl.NewHistoryQueryExecutor()
    50  	require.Nil(
    51  		t,
    52  		qe,
    53  		"NewHistoryQueryExecutor should return nil when history db provider is nil",
    54  	)
    55  	require.NoError(
    56  		t,
    57  		err,
    58  		"NewHistoryQueryExecutor should return an error when history db provider is nil",
    59  	)
    60  }
    61  
    62  func TestKVLedgerBlockStorage(t *testing.T) {
    63  	t.Run("green-path", func(t *testing.T) {
    64  		conf, cleanup := testConfig(t)
    65  		defer cleanup()
    66  		provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
    67  		defer provider.Close()
    68  
    69  		bg, gb := testutil.NewBlockGenerator(t, "testLedger", false)
    70  		gbHash := protoutil.BlockHeaderHash(gb.Header)
    71  		lgr, err := provider.CreateFromGenesisBlock(gb)
    72  		require.NoError(t, err)
    73  		defer lgr.Close()
    74  
    75  		bcInfo, _ := lgr.GetBlockchainInfo()
    76  		require.Equal(t, &common.BlockchainInfo{
    77  			Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
    78  		}, bcInfo)
    79  
    80  		txid := util.GenerateUUID()
    81  		simulator, _ := lgr.NewTxSimulator(txid)
    82  		require.NoError(t, simulator.SetState("ns1", "key1", []byte("value1")))
    83  		require.NoError(t, simulator.SetState("ns1", "key2", []byte("value2")))
    84  		require.NoError(t, simulator.SetState("ns1", "key3", []byte("value3")))
    85  		simulator.Done()
    86  		simRes, _ := simulator.GetTxSimulationResults()
    87  		pubSimBytes, _ := simRes.GetPubSimulationBytes()
    88  		block1 := bg.NextBlock([][]byte{pubSimBytes})
    89  		require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block1}, &ledger.CommitOptions{}))
    90  
    91  		bcInfo, _ = lgr.GetBlockchainInfo()
    92  		block1Hash := protoutil.BlockHeaderHash(block1.Header)
    93  		require.Equal(t, &common.BlockchainInfo{
    94  			Height: 2, CurrentBlockHash: block1Hash, PreviousBlockHash: gbHash,
    95  		}, bcInfo)
    96  
    97  		txid = util.GenerateUUID()
    98  		simulator, _ = lgr.NewTxSimulator(txid)
    99  		require.NoError(t, simulator.SetState("ns1", "key1", []byte("value4")))
   100  		require.NoError(t, simulator.SetState("ns1", "key2", []byte("value5")))
   101  		require.NoError(t, simulator.SetState("ns1", "key3", []byte("value6")))
   102  		simulator.Done()
   103  		simRes, _ = simulator.GetTxSimulationResults()
   104  		pubSimBytes, _ = simRes.GetPubSimulationBytes()
   105  		block2 := bg.NextBlock([][]byte{pubSimBytes})
   106  		require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block2}, &ledger.CommitOptions{}))
   107  
   108  		bcInfo, _ = lgr.GetBlockchainInfo()
   109  		block2Hash := protoutil.BlockHeaderHash(block2.Header)
   110  		require.Equal(t, &common.BlockchainInfo{
   111  			Height: 3, CurrentBlockHash: block2Hash, PreviousBlockHash: block1Hash,
   112  		}, bcInfo)
   113  
   114  		b0, _ := lgr.GetBlockByHash(gbHash)
   115  		require.True(t, proto.Equal(b0, gb), "proto messages are not equal")
   116  
   117  		b1, _ := lgr.GetBlockByHash(block1Hash)
   118  		require.True(t, proto.Equal(b1, block1), "proto messages are not equal")
   119  
   120  		b0, _ = lgr.GetBlockByNumber(0)
   121  		require.True(t, proto.Equal(b0, gb), "proto messages are not equal")
   122  
   123  		b1, _ = lgr.GetBlockByNumber(1)
   124  		require.Equal(t, block1, b1)
   125  
   126  		// get the tran id from the 2nd block, then use it to test GetTransactionByID()
   127  		txEnvBytes2 := block1.Data.Data[0]
   128  		txEnv2, err := protoutil.GetEnvelopeFromBlock(txEnvBytes2)
   129  		require.NoError(t, err, "Error upon GetEnvelopeFromBlock")
   130  		payload2, err := protoutil.UnmarshalPayload(txEnv2.Payload)
   131  		require.NoError(t, err, "Error upon GetPayload")
   132  		chdr, err := protoutil.UnmarshalChannelHeader(payload2.Header.ChannelHeader)
   133  		require.NoError(t, err, "Error upon GetChannelHeaderFromBytes")
   134  		txID2 := chdr.TxId
   135  
   136  		exists, err := lgr.TxIDExists(txID2)
   137  		require.NoError(t, err)
   138  		require.True(t, exists)
   139  
   140  		processedTran2, err := lgr.GetTransactionByID(txID2)
   141  		require.NoError(t, err, "Error upon GetTransactionByID")
   142  		// get the tran envelope from the retrieved ProcessedTransaction
   143  		retrievedTxEnv2 := processedTran2.TransactionEnvelope
   144  		require.Equal(t, txEnv2, retrievedTxEnv2)
   145  
   146  		//  get the tran id from the 2nd block, then use it to test GetBlockByTxID
   147  		b1, _ = lgr.GetBlockByTxID(txID2)
   148  		require.True(t, proto.Equal(b1, block1), "proto messages are not equal")
   149  
   150  		// get the transaction validation code for this transaction id
   151  		validCode, blkNum, err := lgr.GetTxValidationCodeByTxID(txID2)
   152  		require.NoError(t, err)
   153  		require.Equal(t, peer.TxValidationCode_VALID, validCode)
   154  		require.Equal(t, uint64(1), blkNum)
   155  
   156  		exists, err = lgr.TxIDExists("random-txid")
   157  		require.NoError(t, err)
   158  		require.False(t, exists)
   159  	})
   160  
   161  	t.Run("error-path", func(t *testing.T) {
   162  		conf, cleanup := testConfig(t)
   163  		defer cleanup()
   164  		provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   165  		defer provider.Close()
   166  
   167  		_, gb := testutil.NewBlockGenerator(t, "testLedger", false)
   168  		lgr, err := provider.CreateFromGenesisBlock(gb)
   169  		require.NoError(t, err)
   170  		defer lgr.Close()
   171  
   172  		provider.blkStoreProvider.Close()
   173  		exists, err := lgr.TxIDExists("random-txid")
   174  		require.EqualError(t, err, "error while trying to check the presence of TXID [random-txid]: internal leveldb error while obtaining db iterator: leveldb: closed")
   175  		require.False(t, exists)
   176  	})
   177  }
   178  
   179  func TestAddCommitHash(t *testing.T) {
   180  	conf, cleanup := testConfig(t)
   181  	defer cleanup()
   182  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   183  	defer provider.Close()
   184  
   185  	bg, gb := testutil.NewBlockGenerator(t, "testLedger", false)
   186  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   187  	lgr, err := provider.CreateFromGenesisBlock(gb)
   188  	require.NoError(t, err)
   189  	defer lgr.Close()
   190  
   191  	// metadata associated with the above created geneis block is
   192  	// empty. Hence, no commitHash would be empty.
   193  	commitHash, err := lgr.(*kvLedger).lastPersistedCommitHash()
   194  	require.NoError(t, err)
   195  	require.Equal(t, commitHash, lgr.(*kvLedger).commitHash)
   196  	require.Equal(t, len(commitHash), 0)
   197  
   198  	bcInfo, _ := lgr.GetBlockchainInfo()
   199  	require.Equal(t, &common.BlockchainInfo{
   200  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   201  	}, bcInfo)
   202  
   203  	txid := util.GenerateUUID()
   204  	simulator, _ := lgr.NewTxSimulator(txid)
   205  	require.NoError(t, simulator.SetState("ns1", "key1", []byte("value1")))
   206  	require.NoError(t, simulator.SetState("ns1", "key2", []byte("value2")))
   207  	require.NoError(t, simulator.SetState("ns1", "key3", []byte("value3")))
   208  	simulator.Done()
   209  	simRes, _ := simulator.GetTxSimulationResults()
   210  	pubSimBytes, _ := simRes.GetPubSimulationBytes()
   211  	block1 := bg.NextBlock([][]byte{pubSimBytes})
   212  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block1}, &ledger.CommitOptions{}))
   213  
   214  	commitHash, err = lgr.(*kvLedger).lastPersistedCommitHash()
   215  	require.NoError(t, err)
   216  	require.Equal(t, commitHash, lgr.(*kvLedger).commitHash)
   217  	require.Equal(t, len(commitHash), 32)
   218  
   219  	// if the kvledger.commitHash is nil and the block number is > 1, the
   220  	// commitHash should not be added to the block
   221  	block2 := bg.NextBlock([][]byte{pubSimBytes})
   222  	lgr.(*kvLedger).commitHash = nil
   223  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block2}, &ledger.CommitOptions{}))
   224  
   225  	commitHash, err = lgr.(*kvLedger).lastPersistedCommitHash()
   226  	require.NoError(t, err)
   227  	require.Equal(t, commitHash, lgr.(*kvLedger).commitHash)
   228  	require.Equal(t, len(commitHash), 0)
   229  }
   230  
   231  func TestKVLedgerBlockStorageWithPvtdata(t *testing.T) {
   232  	t.Skip()
   233  	conf, cleanup := testConfig(t)
   234  	defer cleanup()
   235  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   236  	defer provider.Close()
   237  
   238  	bg, gb := testutil.NewBlockGenerator(t, "testLedger", false)
   239  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   240  	lgr, err := provider.CreateFromGenesisBlock(gb)
   241  	require.NoError(t, err)
   242  	defer lgr.Close()
   243  
   244  	bcInfo, _ := lgr.GetBlockchainInfo()
   245  	require.Equal(t, &common.BlockchainInfo{
   246  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   247  	}, bcInfo)
   248  
   249  	txid := util.GenerateUUID()
   250  	simulator, _ := lgr.NewTxSimulator(txid)
   251  	require.NoError(t, simulator.SetState("ns1", "key1", []byte("value1")))
   252  	require.NoError(t, simulator.SetPrivateData("ns1", "coll1", "key2", []byte("value2")))
   253  	require.NoError(t, simulator.SetPrivateData("ns1", "coll2", "key2", []byte("value3")))
   254  	simulator.Done()
   255  	simRes, _ := simulator.GetTxSimulationResults()
   256  	pubSimBytes, _ := simRes.GetPubSimulationBytes()
   257  	block1 := bg.NextBlockWithTxid([][]byte{pubSimBytes}, []string{txid})
   258  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block1}, &ledger.CommitOptions{}))
   259  
   260  	bcInfo, _ = lgr.GetBlockchainInfo()
   261  	block1Hash := protoutil.BlockHeaderHash(block1.Header)
   262  	require.Equal(t, &common.BlockchainInfo{
   263  		Height: 2, CurrentBlockHash: block1Hash, PreviousBlockHash: gbHash,
   264  	}, bcInfo)
   265  
   266  	txid = util.GenerateUUID()
   267  	simulator, _ = lgr.NewTxSimulator(txid)
   268  	require.NoError(t, simulator.SetState("ns1", "key1", []byte("value4")))
   269  	require.NoError(t, simulator.SetState("ns1", "key2", []byte("value5")))
   270  	require.NoError(t, simulator.SetState("ns1", "key3", []byte("value6")))
   271  	simulator.Done()
   272  	simRes, _ = simulator.GetTxSimulationResults()
   273  	pubSimBytes, _ = simRes.GetPubSimulationBytes()
   274  	block2 := bg.NextBlock([][]byte{pubSimBytes})
   275  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block2}, &ledger.CommitOptions{}))
   276  
   277  	bcInfo, _ = lgr.GetBlockchainInfo()
   278  	block2Hash := protoutil.BlockHeaderHash(block2.Header)
   279  	require.Equal(t, &common.BlockchainInfo{
   280  		Height: 3, CurrentBlockHash: block2Hash, PreviousBlockHash: block1Hash,
   281  	}, bcInfo)
   282  
   283  	pvtdataAndBlock, _ := lgr.GetPvtDataAndBlockByNum(0, nil)
   284  	require.Equal(t, gb, pvtdataAndBlock.Block)
   285  	require.Nil(t, pvtdataAndBlock.PvtData)
   286  
   287  	pvtdataAndBlock, _ = lgr.GetPvtDataAndBlockByNum(1, nil)
   288  	require.Equal(t, block1, pvtdataAndBlock.Block)
   289  	require.NotNil(t, pvtdataAndBlock.PvtData)
   290  	require.True(t, pvtdataAndBlock.PvtData[0].Has("ns1", "coll1"))
   291  	require.True(t, pvtdataAndBlock.PvtData[0].Has("ns1", "coll2"))
   292  
   293  	pvtdataAndBlock, _ = lgr.GetPvtDataAndBlockByNum(2, nil)
   294  	require.Equal(t, block2, pvtdataAndBlock.Block)
   295  	require.Nil(t, pvtdataAndBlock.PvtData)
   296  }
   297  
   298  func TestKVLedgerDBRecovery(t *testing.T) {
   299  	conf, cleanup := testConfig(t)
   300  	defer cleanup()
   301  	nsCollBtlConfs := []*nsCollBtlConfig{
   302  		{
   303  			namespace: "ns",
   304  			btlConfig: map[string]uint64{"coll": 0},
   305  		},
   306  	}
   307  	provider1 := testutilNewProviderWithCollectionConfig(
   308  		t,
   309  		nsCollBtlConfs,
   310  		conf,
   311  	)
   312  	defer provider1.Close()
   313  
   314  	testLedgerid := "testLedger"
   315  	bg, gb := testutil.NewBlockGenerator(t, testLedgerid, false)
   316  	ledger1, err := provider1.CreateFromGenesisBlock(gb)
   317  	require.NoError(t, err)
   318  	defer ledger1.Close()
   319  
   320  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   321  	checkBCSummaryForTest(t, ledger1,
   322  		&bcSummary{
   323  			bcInfo: &common.BlockchainInfo{Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil},
   324  		},
   325  	)
   326  
   327  	// creating and committing the second data block
   328  	blockAndPvtdata1 := prepareNextBlockForTest(t, ledger1, bg, "SimulateForBlk1",
   329  		map[string]string{"key1": "value1.1", "key2": "value2.1", "key3": "value3.1"},
   330  		map[string]string{"key1": "pvtValue1.1", "key2": "pvtValue2.1", "key3": "pvtValue3.1"})
   331  	require.NoError(t, ledger1.CommitLegacy(blockAndPvtdata1, &ledger.CommitOptions{}))
   332  	checkBCSummaryForTest(t, ledger1,
   333  		&bcSummary{
   334  			bcInfo: &common.BlockchainInfo{
   335  				Height:            2,
   336  				CurrentBlockHash:  protoutil.BlockHeaderHash(blockAndPvtdata1.Block.Header),
   337  				PreviousBlockHash: gbHash,
   338  			},
   339  		},
   340  	)
   341  
   342  	//======================================================================================
   343  	// SCENARIO 1: peer writes the second block to the block storage and fails
   344  	// before committing the block to state DB and history DB
   345  	//======================================================================================
   346  	blockAndPvtdata2 := prepareNextBlockForTest(t, ledger1, bg, "SimulateForBlk2",
   347  		map[string]string{"key1": "value1.2", "key2": "value2.2", "key3": "value3.2"},
   348  		map[string]string{"key1": "pvtValue1.2", "key2": "pvtValue2.2", "key3": "pvtValue3.2"})
   349  
   350  	_, _, err = ledger1.(*kvLedger).txmgr.ValidateAndPrepare(blockAndPvtdata2, true)
   351  	require.NoError(t, err)
   352  	require.NoError(t, ledger1.(*kvLedger).commitToPvtAndBlockStore(blockAndPvtdata2))
   353  
   354  	// block storage should be as of block-2 but the state and history db should be as of block-1
   355  	checkBCSummaryForTest(t, ledger1,
   356  		&bcSummary{
   357  			bcInfo: &common.BlockchainInfo{
   358  				Height:            3,
   359  				CurrentBlockHash:  protoutil.BlockHeaderHash(blockAndPvtdata2.Block.Header),
   360  				PreviousBlockHash: protoutil.BlockHeaderHash(blockAndPvtdata1.Block.Header),
   361  			},
   362  
   363  			stateDBSavePoint: uint64(1),
   364  			stateDBKVs:       map[string]string{"key1": "value1.1", "key2": "value2.1", "key3": "value3.1"},
   365  			stateDBPvtKVs:    map[string]string{"key1": "pvtValue1.1", "key2": "pvtValue2.1", "key3": "pvtValue3.1"},
   366  
   367  			historyDBSavePoint: uint64(1),
   368  			historyKey:         "key1",
   369  			historyVals:        []string{"value1.1"},
   370  		},
   371  	)
   372  	// Now, assume that peer fails here before committing the transaction to the statedb and historydb
   373  	ledger1.Close()
   374  	provider1.Close()
   375  
   376  	// Here the peer comes online and calls NewKVLedger to get a handler for the ledger
   377  	// StateDB and HistoryDB should be recovered before returning from NewKVLedger call
   378  	provider2 := testutilNewProviderWithCollectionConfig(
   379  		t,
   380  		nsCollBtlConfs,
   381  		conf,
   382  	)
   383  	defer provider2.Close()
   384  	ledger2, err := provider2.Open(testLedgerid)
   385  	require.NoError(t, err)
   386  	defer ledger2.Close()
   387  	checkBCSummaryForTest(t, ledger2,
   388  		&bcSummary{
   389  			stateDBSavePoint: uint64(2),
   390  			stateDBKVs:       map[string]string{"key1": "value1.2", "key2": "value2.2", "key3": "value3.2"},
   391  			stateDBPvtKVs:    map[string]string{"key1": "pvtValue1.2", "key2": "pvtValue2.2", "key3": "pvtValue3.2"},
   392  
   393  			historyDBSavePoint: uint64(2),
   394  			historyKey:         "key1",
   395  			historyVals:        []string{"value1.2", "value1.1"},
   396  		},
   397  	)
   398  
   399  	//======================================================================================
   400  	// SCENARIO 2: peer fails after committing the third block to the block storage and state DB
   401  	// but before committing to history DB
   402  	//======================================================================================
   403  	blockAndPvtdata3 := prepareNextBlockForTest(t, ledger2, bg, "SimulateForBlk3",
   404  		map[string]string{"key1": "value1.3", "key2": "value2.3", "key3": "value3.3"},
   405  		map[string]string{"key1": "pvtValue1.3", "key2": "pvtValue2.3", "key3": "pvtValue3.3"},
   406  	)
   407  	_, _, err = ledger2.(*kvLedger).txmgr.ValidateAndPrepare(blockAndPvtdata3, true)
   408  	require.NoError(t, err)
   409  	require.NoError(t, ledger2.(*kvLedger).commitToPvtAndBlockStore(blockAndPvtdata3))
   410  	// committing the transaction to state DB
   411  	require.NoError(t, ledger2.(*kvLedger).txmgr.Commit())
   412  
   413  	// assume that peer fails here after committing the transaction to state DB but before history DB
   414  	checkBCSummaryForTest(t, ledger2,
   415  		&bcSummary{
   416  			bcInfo: &common.BlockchainInfo{
   417  				Height:            4,
   418  				CurrentBlockHash:  protoutil.BlockHeaderHash(blockAndPvtdata3.Block.Header),
   419  				PreviousBlockHash: protoutil.BlockHeaderHash(blockAndPvtdata2.Block.Header),
   420  			},
   421  
   422  			stateDBSavePoint: uint64(3),
   423  			stateDBKVs:       map[string]string{"key1": "value1.3", "key2": "value2.3", "key3": "value3.3"},
   424  			stateDBPvtKVs:    map[string]string{"key1": "pvtValue1.3", "key2": "pvtValue2.3", "key3": "pvtValue3.3"},
   425  
   426  			historyDBSavePoint: uint64(2),
   427  			historyKey:         "key1",
   428  			historyVals:        []string{"value1.2", "value1.1"},
   429  		},
   430  	)
   431  	ledger2.Close()
   432  	provider2.Close()
   433  
   434  	// we assume here that the peer comes online and calls NewKVLedger to get a handler for the ledger
   435  	// history DB should be recovered before returning from NewKVLedger call
   436  	provider3 := testutilNewProviderWithCollectionConfig(
   437  		t,
   438  		nsCollBtlConfs,
   439  		conf,
   440  	)
   441  	defer provider3.Close()
   442  	ledger3, err := provider3.Open(testLedgerid)
   443  	require.NoError(t, err)
   444  	defer ledger3.Close()
   445  
   446  	checkBCSummaryForTest(t, ledger3,
   447  		&bcSummary{
   448  			stateDBSavePoint: uint64(3),
   449  			stateDBKVs:       map[string]string{"key1": "value1.3", "key2": "value2.3", "key3": "value3.3"},
   450  			stateDBPvtKVs:    map[string]string{"key1": "pvtValue1.3", "key2": "pvtValue2.3", "key3": "pvtValue3.3"},
   451  
   452  			historyDBSavePoint: uint64(3),
   453  			historyKey:         "key1",
   454  			historyVals:        []string{"value1.3", "value1.2", "value1.1"},
   455  		},
   456  	)
   457  
   458  	// Rare scenario
   459  	//======================================================================================
   460  	// SCENARIO 3: peer fails after committing the fourth block to the block storgae
   461  	// and history DB but before committing to state DB
   462  	//======================================================================================
   463  	blockAndPvtdata4 := prepareNextBlockForTest(t, ledger3, bg, "SimulateForBlk4",
   464  		map[string]string{"key1": "value1.4", "key2": "value2.4", "key3": "value3.4"},
   465  		map[string]string{"key1": "pvtValue1.4", "key2": "pvtValue2.4", "key3": "pvtValue3.4"},
   466  	)
   467  
   468  	_, _, err = ledger3.(*kvLedger).txmgr.ValidateAndPrepare(blockAndPvtdata4, true)
   469  	require.NoError(t, err)
   470  	require.NoError(t, ledger3.(*kvLedger).commitToPvtAndBlockStore(blockAndPvtdata4))
   471  	require.NoError(t, ledger3.(*kvLedger).historyDB.Commit(blockAndPvtdata4.Block))
   472  
   473  	checkBCSummaryForTest(t, ledger3,
   474  		&bcSummary{
   475  			bcInfo: &common.BlockchainInfo{
   476  				Height:            5,
   477  				CurrentBlockHash:  protoutil.BlockHeaderHash(blockAndPvtdata4.Block.Header),
   478  				PreviousBlockHash: protoutil.BlockHeaderHash(blockAndPvtdata3.Block.Header),
   479  			},
   480  
   481  			stateDBSavePoint: uint64(3),
   482  			stateDBKVs:       map[string]string{"key1": "value1.3", "key2": "value2.3", "key3": "value3.3"},
   483  			stateDBPvtKVs:    map[string]string{"key1": "pvtValue1.3", "key2": "pvtValue2.3", "key3": "pvtValue3.3"},
   484  
   485  			historyDBSavePoint: uint64(4),
   486  			historyKey:         "key1",
   487  			historyVals:        []string{"value1.4", "value1.3", "value1.2", "value1.1"},
   488  		},
   489  	)
   490  	ledger3.Close()
   491  	provider3.Close()
   492  
   493  	// we assume here that the peer comes online and calls NewKVLedger to get a handler for the ledger
   494  	// state DB should be recovered before returning from NewKVLedger call
   495  	provider4 := testutilNewProviderWithCollectionConfig(
   496  		t,
   497  		nsCollBtlConfs,
   498  		conf,
   499  	)
   500  	defer provider4.Close()
   501  	ledger4, err := provider4.Open(testLedgerid)
   502  	require.NoError(t, err)
   503  	defer ledger4.Close()
   504  	checkBCSummaryForTest(t, ledger4,
   505  		&bcSummary{
   506  			stateDBSavePoint: uint64(4),
   507  			stateDBKVs:       map[string]string{"key1": "value1.4", "key2": "value2.4", "key3": "value3.4"},
   508  			stateDBPvtKVs:    map[string]string{"key1": "pvtValue1.4", "key2": "pvtValue2.4", "key3": "pvtValue3.4"},
   509  
   510  			historyDBSavePoint: uint64(4),
   511  			historyKey:         "key1",
   512  			historyVals:        []string{"value1.4", "value1.3", "value1.2", "value1.1"},
   513  		},
   514  	)
   515  }
   516  
   517  func TestLedgerWithCouchDbEnabledWithBinaryAndJSONData(t *testing.T) {
   518  	conf, cleanup := testConfig(t)
   519  	defer cleanup()
   520  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   521  	defer provider.Close()
   522  	bg, gb := testutil.NewBlockGenerator(t, "testLedger", false)
   523  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   524  	lgr, err := provider.CreateFromGenesisBlock(gb)
   525  	require.NoError(t, err)
   526  	defer lgr.Close()
   527  
   528  	bcInfo, _ := lgr.GetBlockchainInfo()
   529  	require.Equal(t, &common.BlockchainInfo{
   530  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   531  	}, bcInfo)
   532  
   533  	txid := util.GenerateUUID()
   534  	simulator, _ := lgr.NewTxSimulator(txid)
   535  	require.NoError(t, simulator.SetState("ns1", "key4", []byte("value1")))
   536  	require.NoError(t, simulator.SetState("ns1", "key5", []byte("value2")))
   537  	require.NoError(t, simulator.SetState("ns1", "key6", []byte("{\"shipmentID\":\"161003PKC7300\",\"customsInvoice\":{\"methodOfTransport\":\"GROUND\",\"invoiceNumber\":\"00091622\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   538  	require.NoError(t, simulator.SetState("ns1", "key7", []byte("{\"shipmentID\":\"161003PKC7600\",\"customsInvoice\":{\"methodOfTransport\":\"AIR MAYBE\",\"invoiceNumber\":\"00091624\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   539  	simulator.Done()
   540  	simRes, _ := simulator.GetTxSimulationResults()
   541  	pubSimBytes, _ := simRes.GetPubSimulationBytes()
   542  	block1 := bg.NextBlock([][]byte{pubSimBytes})
   543  
   544  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block1}, &ledger.CommitOptions{}))
   545  
   546  	bcInfo, _ = lgr.GetBlockchainInfo()
   547  	block1Hash := protoutil.BlockHeaderHash(block1.Header)
   548  	require.Equal(t, &common.BlockchainInfo{
   549  		Height: 2, CurrentBlockHash: block1Hash, PreviousBlockHash: gbHash,
   550  	}, bcInfo)
   551  
   552  	simulationResults := [][]byte{}
   553  	txid = util.GenerateUUID()
   554  	simulator, _ = lgr.NewTxSimulator(txid)
   555  	require.NoError(t, simulator.SetState("ns1", "key4", []byte("value3")))
   556  	require.NoError(t, simulator.SetState("ns1", "key5", []byte("{\"shipmentID\":\"161003PKC7500\",\"customsInvoice\":{\"methodOfTransport\":\"AIR FREIGHT\",\"invoiceNumber\":\"00091623\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   557  	require.NoError(t, simulator.SetState("ns1", "key6", []byte("value4")))
   558  	require.NoError(t, simulator.SetState("ns1", "key7", []byte("{\"shipmentID\":\"161003PKC7600\",\"customsInvoice\":{\"methodOfTransport\":\"GROUND\",\"invoiceNumber\":\"00091624\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   559  	require.NoError(t, simulator.SetState("ns1", "key8", []byte("{\"shipmentID\":\"161003PKC7700\",\"customsInvoice\":{\"methodOfTransport\":\"SHIP\",\"invoiceNumber\":\"00091625\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   560  	simulator.Done()
   561  	simRes, _ = simulator.GetTxSimulationResults()
   562  	pubSimBytes, _ = simRes.GetPubSimulationBytes()
   563  	simulationResults = append(simulationResults, pubSimBytes)
   564  	// add a 2nd transaction
   565  	txid2 := util.GenerateUUID()
   566  	simulator2, _ := lgr.NewTxSimulator(txid2)
   567  	require.NoError(t, simulator2.SetState("ns1", "key7", []byte("{\"shipmentID\":\"161003PKC7600\",\"customsInvoice\":{\"methodOfTransport\":\"TRAIN\",\"invoiceNumber\":\"00091624\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   568  	require.NoError(t, simulator2.SetState("ns1", "key9", []byte("value5")))
   569  	require.NoError(t, simulator2.SetState("ns1", "key10", []byte("{\"shipmentID\":\"261003PKC8000\",\"customsInvoice\":{\"methodOfTransport\":\"DONKEY\",\"invoiceNumber\":\"00091626\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")))
   570  	simulator2.Done()
   571  	simRes2, _ := simulator2.GetTxSimulationResults()
   572  	pubSimBytes2, _ := simRes2.GetPubSimulationBytes()
   573  	simulationResults = append(simulationResults, pubSimBytes2)
   574  
   575  	block2 := bg.NextBlock(simulationResults)
   576  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block2}, &ledger.CommitOptions{}))
   577  
   578  	bcInfo, _ = lgr.GetBlockchainInfo()
   579  	block2Hash := protoutil.BlockHeaderHash(block2.Header)
   580  	require.Equal(t, &common.BlockchainInfo{
   581  		Height: 3, CurrentBlockHash: block2Hash, PreviousBlockHash: block1Hash,
   582  	}, bcInfo)
   583  
   584  	b0, _ := lgr.GetBlockByHash(gbHash)
   585  	require.True(t, proto.Equal(b0, gb), "proto messages are not equal")
   586  
   587  	b1, _ := lgr.GetBlockByHash(block1Hash)
   588  	require.True(t, proto.Equal(b1, block1), "proto messages are not equal")
   589  
   590  	b2, _ := lgr.GetBlockByHash(block2Hash)
   591  	require.True(t, proto.Equal(b2, block2), "proto messages are not equal")
   592  
   593  	b0, _ = lgr.GetBlockByNumber(0)
   594  	require.True(t, proto.Equal(b0, gb), "proto messages are not equal")
   595  
   596  	b1, _ = lgr.GetBlockByNumber(1)
   597  	require.True(t, proto.Equal(b1, block1), "proto messages are not equal")
   598  
   599  	b2, _ = lgr.GetBlockByNumber(2)
   600  	require.True(t, proto.Equal(b2, block2), "proto messages are not equal")
   601  
   602  	// Similar test has been pushed down to historyleveldb_test.go as well
   603  	if conf.HistoryDBConfig.Enabled {
   604  		logger.Debugf("History is enabled\n")
   605  		qhistory, err := lgr.NewHistoryQueryExecutor()
   606  		require.NoError(t, err, "Error when trying to retrieve history database executor")
   607  
   608  		itr, err2 := qhistory.GetHistoryForKey("ns1", "key7")
   609  		require.NoError(t, err2, "Error upon GetHistoryForKey")
   610  
   611  		var retrievedValue []byte
   612  		count := 0
   613  		for {
   614  			kmod, _ := itr.Next()
   615  			if kmod == nil {
   616  				break
   617  			}
   618  			retrievedValue = kmod.(*queryresult.KeyModification).Value
   619  			count++
   620  		}
   621  		require.Equal(t, 3, count)
   622  		// test the last value in the history matches the first value set for key7
   623  		expectedValue := []byte("{\"shipmentID\":\"161003PKC7600\",\"customsInvoice\":{\"methodOfTransport\":\"AIR MAYBE\",\"invoiceNumber\":\"00091624\"},\"weightUnitOfMeasure\":\"KGM\",\"volumeUnitOfMeasure\": \"CO\",\"dimensionUnitOfMeasure\":\"CM\",\"currency\":\"USD\"}")
   624  		require.Equal(t, expectedValue, retrievedValue)
   625  
   626  	}
   627  }
   628  
   629  func TestPvtDataAPIs(t *testing.T) {
   630  	conf, cleanup := testConfig(t)
   631  	defer cleanup()
   632  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   633  	defer provider.Close()
   634  
   635  	ledgerID := "testLedger"
   636  	bg, gb := testutil.NewBlockGenerator(t, ledgerID, false)
   637  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   638  	lgr, err := provider.CreateFromGenesisBlock(gb)
   639  	require.NoError(t, err)
   640  	defer lgr.Close()
   641  	lgr.(*kvLedger).pvtdataStore.Init(btlPolicyForSampleData())
   642  
   643  	bcInfo, _ := lgr.GetBlockchainInfo()
   644  	require.Equal(t, &common.BlockchainInfo{
   645  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   646  	}, bcInfo)
   647  
   648  	kvlgr := lgr.(*kvLedger)
   649  
   650  	sampleData := sampleDataWithPvtdataForSelectiveTx(t, bg)
   651  	for _, sampleDatum := range sampleData {
   652  		require.NoError(t, kvlgr.commitToPvtAndBlockStore(sampleDatum))
   653  	}
   654  
   655  	// block 2 has no pvt data
   656  	pvtdata, err := lgr.GetPvtDataByNum(2, nil)
   657  	require.NoError(t, err)
   658  	require.Nil(t, pvtdata)
   659  
   660  	// block 5 has no pvt data
   661  	pvtdata, err = lgr.GetPvtDataByNum(5, nil)
   662  	require.NoError(t, err)
   663  	require.Equal(t, 0, len(pvtdata))
   664  
   665  	// block 3 has pvt data for tx 3, 5 and 6. Though the tx 6
   666  	// is marked as invalid in the block, the pvtData should
   667  	// have been stored
   668  	pvtdata, err = lgr.GetPvtDataByNum(3, nil)
   669  	require.NoError(t, err)
   670  	require.Equal(t, 3, len(pvtdata))
   671  	require.Equal(t, uint64(3), pvtdata[0].SeqInBlock)
   672  	require.Equal(t, uint64(5), pvtdata[1].SeqInBlock)
   673  	require.Equal(t, uint64(6), pvtdata[2].SeqInBlock)
   674  
   675  	// block 4 has pvt data for tx 4 and 6 only
   676  	pvtdata, err = lgr.GetPvtDataByNum(4, nil)
   677  	require.NoError(t, err)
   678  	require.Equal(t, 2, len(pvtdata))
   679  	require.Equal(t, uint64(4), pvtdata[0].SeqInBlock)
   680  	require.Equal(t, uint64(6), pvtdata[1].SeqInBlock)
   681  
   682  	blockAndPvtdata, err := lgr.GetPvtDataAndBlockByNum(3, nil)
   683  	require.NoError(t, err)
   684  	require.True(t, proto.Equal(sampleData[2].Block, blockAndPvtdata.Block))
   685  
   686  	blockAndPvtdata, err = lgr.GetPvtDataAndBlockByNum(4, nil)
   687  	require.NoError(t, err)
   688  	require.True(t, proto.Equal(sampleData[3].Block, blockAndPvtdata.Block))
   689  
   690  	// pvt data retrieval for block 3 with filter should return filtered pvtdata
   691  	filter := ledger.NewPvtNsCollFilter()
   692  	filter.Add("ns-1", "coll-1")
   693  	blockAndPvtdata, err = lgr.GetPvtDataAndBlockByNum(4, filter)
   694  	require.NoError(t, err)
   695  	require.Equal(t, sampleData[3].Block, blockAndPvtdata.Block)
   696  	// two transactions should be present
   697  	require.Equal(t, 2, len(blockAndPvtdata.PvtData))
   698  	// both tran number 4 and 6 should have only one collection because of filter
   699  	require.Equal(t, 1, len(blockAndPvtdata.PvtData[4].WriteSet.NsPvtRwset))
   700  	require.Equal(t, 1, len(blockAndPvtdata.PvtData[6].WriteSet.NsPvtRwset))
   701  	// any other transaction entry should be nil
   702  	require.Nil(t, blockAndPvtdata.PvtData[2])
   703  
   704  	// test missing data retrieval in the presence of invalid tx. Block 6 had
   705  	// missing data (for tx4 and tx5). Though tx5 was marked as invalid tx,
   706  	// both tx4 and tx5 missing data should be returned
   707  	expectedMissingDataInfo := make(ledger.MissingPvtDataInfo)
   708  	expectedMissingDataInfo.Add(6, 4, "ns-4", "coll-4")
   709  	expectedMissingDataInfo.Add(6, 5, "ns-5", "coll-5")
   710  	missingDataInfo, err := lgr.(*kvLedger).GetMissingPvtDataInfoForMostRecentBlocks(1)
   711  	require.NoError(t, err)
   712  	require.Equal(t, expectedMissingDataInfo, missingDataInfo)
   713  }
   714  
   715  func TestCrashAfterPvtdataStoreCommit(t *testing.T) {
   716  	conf, cleanup := testConfig(t)
   717  	defer cleanup()
   718  	ccInfoProvider := &mock.DeployedChaincodeInfoProvider{}
   719  	ccInfoProvider.CollectionInfoReturns(&peer.StaticCollectionConfig{BlockToLive: 0}, nil)
   720  	provider := testutilNewProvider(conf, t, ccInfoProvider)
   721  	defer provider.Close()
   722  
   723  	ledgerID := "testLedger"
   724  	bg, gb := testutil.NewBlockGenerator(t, ledgerID, false)
   725  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   726  	lgr, err := provider.CreateFromGenesisBlock(gb)
   727  	require.NoError(t, err)
   728  	defer lgr.Close()
   729  
   730  	bcInfo, _ := lgr.GetBlockchainInfo()
   731  	require.Equal(t, &common.BlockchainInfo{
   732  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   733  	}, bcInfo)
   734  
   735  	sampleData := sampleDataWithPvtdataForAllTxs(t, bg)
   736  	dataBeforeCrash := sampleData[0:3]
   737  	dataAtCrash := sampleData[3]
   738  
   739  	for _, sampleDatum := range dataBeforeCrash {
   740  		require.NoError(t, lgr.(*kvLedger).commitToPvtAndBlockStore(sampleDatum))
   741  	}
   742  	blockNumAtCrash := dataAtCrash.Block.Header.Number
   743  	var pvtdataAtCrash []*ledger.TxPvtData
   744  	for _, p := range dataAtCrash.PvtData {
   745  		pvtdataAtCrash = append(pvtdataAtCrash, p)
   746  	}
   747  	// call Commit on pvt data store and mimic a crash before committing the block to block store
   748  	require.NoError(t, lgr.(*kvLedger).pvtdataStore.Commit(blockNumAtCrash, pvtdataAtCrash, nil))
   749  
   750  	// Now, assume that peer fails here before committing the block to blockstore.
   751  	lgr.Close()
   752  	provider.Close()
   753  
   754  	// mimic peer restart
   755  	provider1 := testutilNewProvider(conf, t, ccInfoProvider)
   756  	defer provider1.Close()
   757  	lgr1, err := provider1.Open(ledgerID)
   758  	require.NoError(t, err)
   759  	defer lgr1.Close()
   760  
   761  	isPvtStoreAhead, err := lgr1.(*kvLedger).isPvtDataStoreAheadOfBlockStore()
   762  	require.NoError(t, err)
   763  	require.True(t, isPvtStoreAhead)
   764  
   765  	// When starting the storage after a crash, we should be able to fetch the pvtData from pvtStore
   766  	testVerifyPvtData(t, lgr1, blockNumAtCrash, dataAtCrash.PvtData)
   767  	bcInfo, err = lgr.GetBlockchainInfo()
   768  	require.NoError(t, err)
   769  	require.Equal(t, blockNumAtCrash, bcInfo.Height)
   770  
   771  	// we should be able to write the last block again
   772  	// to ensure that the pvtdataStore is not updated, we send a different pvtData for
   773  	// the same block such that we can retrieve the pvtData and compare.
   774  	expectedPvtData := dataAtCrash.PvtData
   775  	dataAtCrash.PvtData = make(ledger.TxPvtDataMap)
   776  	dataAtCrash.PvtData[0] = &ledger.TxPvtData{
   777  		SeqInBlock: 0,
   778  		WriteSet: &rwset.TxPvtReadWriteSet{
   779  			NsPvtRwset: []*rwset.NsPvtReadWriteSet{
   780  				{
   781  					Namespace: "ns-1",
   782  					CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
   783  						{
   784  							CollectionName: "coll-1",
   785  							Rwset:          []byte("pvtdata"),
   786  						},
   787  					},
   788  				},
   789  			},
   790  		},
   791  	}
   792  	require.NoError(t, lgr1.(*kvLedger).commitToPvtAndBlockStore(dataAtCrash))
   793  	testVerifyPvtData(t, lgr1, blockNumAtCrash, expectedPvtData)
   794  	bcInfo, err = lgr1.GetBlockchainInfo()
   795  	require.NoError(t, err)
   796  	require.Equal(t, blockNumAtCrash+1, bcInfo.Height)
   797  
   798  	isPvtStoreAhead, err = lgr1.(*kvLedger).isPvtDataStoreAheadOfBlockStore()
   799  	require.NoError(t, err)
   800  	require.False(t, isPvtStoreAhead)
   801  }
   802  
   803  func testVerifyPvtData(t *testing.T, lgr ledger.PeerLedger, blockNum uint64, expectedPvtData ledger.TxPvtDataMap) {
   804  	pvtdata, err := lgr.GetPvtDataByNum(blockNum, nil)
   805  	require.NoError(t, err)
   806  	constructed := constructPvtdataMap(pvtdata)
   807  	require.Equal(t, len(expectedPvtData), len(constructed))
   808  	for k, v := range expectedPvtData {
   809  		ov, ok := constructed[k]
   810  		require.True(t, ok)
   811  		require.Equal(t, v.SeqInBlock, ov.SeqInBlock)
   812  		require.True(t, proto.Equal(v.WriteSet, ov.WriteSet))
   813  	}
   814  }
   815  
   816  func TestPvtStoreAheadOfBlockStore(t *testing.T) {
   817  	conf, cleanup := testConfig(t)
   818  	defer cleanup()
   819  	ccInfoProvider := &mock.DeployedChaincodeInfoProvider{}
   820  	ccInfoProvider.CollectionInfoReturns(&peer.StaticCollectionConfig{BlockToLive: 0}, nil)
   821  	provider := testutilNewProvider(conf, t, ccInfoProvider)
   822  	defer provider.Close()
   823  
   824  	ledgerID := "testLedger"
   825  	bg, gb := testutil.NewBlockGenerator(t, ledgerID, false)
   826  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   827  	lgr, err := provider.CreateFromGenesisBlock(gb)
   828  	require.NoError(t, err)
   829  	defer lgr.Close()
   830  
   831  	bcInfo, _ := lgr.GetBlockchainInfo()
   832  	require.Equal(t, &common.BlockchainInfo{
   833  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   834  	}, bcInfo)
   835  
   836  	// when both stores contain genesis block only, isPvtstoreAheadOfBlockstore should be false
   837  	kvlgr := lgr.(*kvLedger)
   838  	isPvtStoreAhead, err := kvlgr.isPvtDataStoreAheadOfBlockStore()
   839  	require.NoError(t, err)
   840  	require.False(t, isPvtStoreAhead)
   841  
   842  	sampleData := sampleDataWithPvtdataForSelectiveTx(t, bg)
   843  	for _, d := range sampleData[0:9] { // commit block number 0 to 8
   844  		require.NoError(t, kvlgr.commitToPvtAndBlockStore(d))
   845  	}
   846  
   847  	isPvtStoreAhead, err = kvlgr.isPvtDataStoreAheadOfBlockStore()
   848  	require.NoError(t, err)
   849  	require.False(t, isPvtStoreAhead)
   850  
   851  	// close and reopen.
   852  	lgr.Close()
   853  	provider.Close()
   854  
   855  	provider1 := testutilNewProvider(conf, t, ccInfoProvider)
   856  	defer provider1.Close()
   857  	lgr1, err := provider1.Open(ledgerID)
   858  	require.NoError(t, err)
   859  	defer lgr1.Close()
   860  	kvlgr = lgr1.(*kvLedger)
   861  
   862  	// as both stores are at the same block height, isPvtstoreAheadOfBlockstore should be false
   863  	info, err := lgr1.GetBlockchainInfo()
   864  	require.NoError(t, err)
   865  	require.Equal(t, uint64(10), info.Height)
   866  	pvtStoreHt, err := kvlgr.pvtdataStore.LastCommittedBlockHeight()
   867  	require.NoError(t, err)
   868  	require.Equal(t, uint64(10), pvtStoreHt)
   869  	isPvtStoreAhead, err = kvlgr.isPvtDataStoreAheadOfBlockStore()
   870  	require.NoError(t, err)
   871  	require.False(t, isPvtStoreAhead)
   872  
   873  	lastBlkAndPvtData := sampleData[9]
   874  	// Add the last block directly to the pvtdataStore but not to blockstore. This would make
   875  	// the pvtdatastore height greater than the block store height.
   876  	validTxPvtData, validTxMissingPvtData := constructPvtDataAndMissingData(lastBlkAndPvtData)
   877  	err = kvlgr.pvtdataStore.Commit(lastBlkAndPvtData.Block.Header.Number, validTxPvtData, validTxMissingPvtData)
   878  	require.NoError(t, err)
   879  
   880  	// close and reopen.
   881  	lgr1.Close()
   882  	provider1.Close()
   883  
   884  	provider2 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   885  	defer provider2.Close()
   886  	lgr2, err := provider2.Open(ledgerID)
   887  	require.NoError(t, err)
   888  	defer lgr2.Close()
   889  	kvlgr = lgr2.(*kvLedger)
   890  
   891  	// pvtdataStore should be ahead of blockstore
   892  	info, err = lgr2.GetBlockchainInfo()
   893  	require.NoError(t, err)
   894  	require.Equal(t, uint64(10), info.Height)
   895  	pvtStoreHt, err = kvlgr.pvtdataStore.LastCommittedBlockHeight()
   896  	require.NoError(t, err)
   897  	require.Equal(t, uint64(11), pvtStoreHt)
   898  	isPvtStoreAhead, err = kvlgr.isPvtDataStoreAheadOfBlockStore()
   899  	require.NoError(t, err)
   900  	require.True(t, isPvtStoreAhead)
   901  
   902  	// bring the height of BlockStore equal to pvtdataStore
   903  	require.NoError(t, kvlgr.commitToPvtAndBlockStore(lastBlkAndPvtData))
   904  	info, err = lgr2.GetBlockchainInfo()
   905  	require.NoError(t, err)
   906  	require.Equal(t, uint64(11), info.Height)
   907  	pvtStoreHt, err = kvlgr.pvtdataStore.LastCommittedBlockHeight()
   908  	require.NoError(t, err)
   909  	require.Equal(t, uint64(11), pvtStoreHt)
   910  	isPvtStoreAhead, err = kvlgr.isPvtDataStoreAheadOfBlockStore()
   911  	require.NoError(t, err)
   912  	require.False(t, isPvtStoreAhead)
   913  }
   914  
   915  func TestCommitToPvtAndBlockstoreError(t *testing.T) {
   916  	conf, cleanup := testConfig(t)
   917  	defer cleanup()
   918  	ccInfoProvider := &mock.DeployedChaincodeInfoProvider{}
   919  	ccInfoProvider.CollectionInfoReturns(&peer.StaticCollectionConfig{BlockToLive: 0}, nil)
   920  	provider1 := testutilNewProvider(conf, t, ccInfoProvider)
   921  	defer provider1.Close()
   922  
   923  	ledgerID := "testLedger"
   924  	bg, gb := testutil.NewBlockGenerator(t, ledgerID, false)
   925  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   926  	lgr1, err := provider1.CreateFromGenesisBlock(gb)
   927  	require.NoError(t, err)
   928  	defer lgr1.Close()
   929  
   930  	bcInfo, _ := lgr1.GetBlockchainInfo()
   931  	require.Equal(t, &common.BlockchainInfo{
   932  		Height: 1, CurrentBlockHash: gbHash, PreviousBlockHash: nil,
   933  	}, bcInfo)
   934  
   935  	kvlgr := lgr1.(*kvLedger)
   936  	sampleData := sampleDataWithPvtdataForSelectiveTx(t, bg)
   937  	for _, d := range sampleData[0:9] { // commit block number 1 to 9
   938  		require.NoError(t, kvlgr.commitToPvtAndBlockStore(d))
   939  	}
   940  
   941  	// try to write the last block again. The function should return an
   942  	// error from the private data store.
   943  	err = kvlgr.commitToPvtAndBlockStore(sampleData[8]) // block 9
   944  	require.EqualError(t, err, "expected block number=10, received block number=9")
   945  
   946  	lastBlkAndPvtData := sampleData[9] // block 10
   947  	// Add the block directly to blockstore
   948  	require.NoError(t, kvlgr.blockStore.AddBlock(lastBlkAndPvtData.Block))
   949  	// Adding the same block should cause passing on the error caused by the block storgae
   950  	err = kvlgr.commitToPvtAndBlockStore(lastBlkAndPvtData)
   951  	require.EqualError(t, err, "block number should have been 11 but was 10")
   952  	// At the end, the pvt store status should be changed
   953  	pvtStoreCommitHt, err := kvlgr.pvtdataStore.LastCommittedBlockHeight()
   954  	require.NoError(t, err)
   955  	require.Equal(t, uint64(11), pvtStoreCommitHt)
   956  }
   957  
   958  func TestCollectionConfigHistoryRetriever(t *testing.T) {
   959  	var cleanup func()
   960  	ledgerID := "testLedger"
   961  	chaincodeName := "testChaincode"
   962  
   963  	var provider *Provider
   964  	var mockDeployedCCInfoProvider *mock.DeployedChaincodeInfoProvider
   965  	var lgr ledger.PeerLedger
   966  
   967  	init := func() {
   968  		var err error
   969  		conf, cleanupFunc := testConfig(t)
   970  		mockDeployedCCInfoProvider = &mock.DeployedChaincodeInfoProvider{}
   971  		provider = testutilNewProvider(conf, t, mockDeployedCCInfoProvider)
   972  		ledgerID := "testLedger"
   973  		_, gb := testutil.NewBlockGenerator(t, ledgerID, false)
   974  		lgr, err = provider.CreateFromGenesisBlock(gb)
   975  		require.NoError(t, err)
   976  		cleanup = func() {
   977  			lgr.Close()
   978  			provider.Close()
   979  			cleanupFunc()
   980  		}
   981  	}
   982  
   983  	testcases := []struct {
   984  		name                        string
   985  		implicitCollConfigs         []*peer.StaticCollectionConfig
   986  		explicitCollConfigs         *peer.CollectionConfigPackage
   987  		explicitCollConfigsBlockNum uint64
   988  		expectedOutput              *ledger.CollectionConfigInfo
   989  	}{
   990  		{
   991  			name: "both-implicit-and-explicit-coll-configs-exist",
   992  			implicitCollConfigs: []*peer.StaticCollectionConfig{
   993  				{
   994  					Name: "implicit-coll",
   995  				},
   996  			},
   997  			explicitCollConfigs: testutilCollConfigPkg(
   998  				[]*peer.StaticCollectionConfig{
   999  					{
  1000  						Name: "explicit-coll",
  1001  					},
  1002  				},
  1003  			),
  1004  			explicitCollConfigsBlockNum: 25,
  1005  
  1006  			expectedOutput: &ledger.CollectionConfigInfo{
  1007  				CollectionConfig: testutilCollConfigPkg(
  1008  					[]*peer.StaticCollectionConfig{
  1009  						{
  1010  							Name: "explicit-coll",
  1011  						},
  1012  						{
  1013  							Name: "implicit-coll",
  1014  						},
  1015  					},
  1016  				),
  1017  				CommittingBlockNum: 25,
  1018  			},
  1019  		},
  1020  
  1021  		{
  1022  			name: "only-implicit-coll-configs-exist",
  1023  			implicitCollConfigs: []*peer.StaticCollectionConfig{
  1024  				{
  1025  					Name: "implicit-coll",
  1026  				},
  1027  			},
  1028  			explicitCollConfigs: nil,
  1029  			expectedOutput: &ledger.CollectionConfigInfo{
  1030  				CollectionConfig: testutilCollConfigPkg(
  1031  					[]*peer.StaticCollectionConfig{
  1032  						{
  1033  							Name: "implicit-coll",
  1034  						},
  1035  					},
  1036  				),
  1037  			},
  1038  		},
  1039  
  1040  		{
  1041  			name: "only-explicit-coll-configs-exist",
  1042  			explicitCollConfigs: testutilCollConfigPkg(
  1043  				[]*peer.StaticCollectionConfig{
  1044  					{
  1045  						Name: "explicit-coll",
  1046  					},
  1047  				},
  1048  			),
  1049  			explicitCollConfigsBlockNum: 25,
  1050  			expectedOutput: &ledger.CollectionConfigInfo{
  1051  				CollectionConfig: testutilCollConfigPkg(
  1052  					[]*peer.StaticCollectionConfig{
  1053  						{
  1054  							Name: "explicit-coll",
  1055  						},
  1056  					},
  1057  				),
  1058  				CommittingBlockNum: 25,
  1059  			},
  1060  		},
  1061  
  1062  		{
  1063  			name:           "no-coll-configs-exist",
  1064  			expectedOutput: nil,
  1065  		},
  1066  	}
  1067  
  1068  	for _, testcase := range testcases {
  1069  		t.Run(
  1070  			testcase.name,
  1071  			func(t *testing.T) {
  1072  				init()
  1073  				defer cleanup()
  1074  				// setup mock for implicit collections
  1075  				mockDeployedCCInfoProvider.ImplicitCollectionsReturns(testcase.implicitCollConfigs, nil)
  1076  				// setup mock so that it causes persisting the explicit collections in collection config history mgr
  1077  				if testcase.explicitCollConfigs != nil {
  1078  					testutilPersistExplicitCollectionConfig(
  1079  						t,
  1080  						provider,
  1081  						mockDeployedCCInfoProvider,
  1082  						ledgerID,
  1083  						chaincodeName,
  1084  						testcase.explicitCollConfigs,
  1085  						testcase.explicitCollConfigsBlockNum,
  1086  					)
  1087  				}
  1088  
  1089  				r, err := lgr.GetConfigHistoryRetriever()
  1090  				require.NoError(t, err)
  1091  
  1092  				actualOutput, err := r.MostRecentCollectionConfigBelow(testcase.explicitCollConfigsBlockNum+1, chaincodeName)
  1093  				require.NoError(t, err)
  1094  				require.Equal(t, testcase.expectedOutput, actualOutput)
  1095  			},
  1096  		)
  1097  	}
  1098  
  1099  	t.Run("implicit-collection-retrieval-causes-error", func(t *testing.T) {
  1100  		init()
  1101  		defer cleanup()
  1102  
  1103  		mockDeployedCCInfoProvider.ImplicitCollectionsReturns(nil, errors.New("cannot-serve-implicit-collections"))
  1104  		r, err := lgr.GetConfigHistoryRetriever()
  1105  		require.NoError(t, err)
  1106  
  1107  		_, err = r.MostRecentCollectionConfigBelow(50, chaincodeName)
  1108  		require.EqualError(t, err, "error while retrieving implicit collections: cannot-serve-implicit-collections")
  1109  	})
  1110  
  1111  	t.Run("explicit-collection-retrieval-causes-error", func(t *testing.T) {
  1112  		init()
  1113  		defer cleanup()
  1114  		provider.configHistoryMgr.Close()
  1115  
  1116  		r, err := lgr.GetConfigHistoryRetriever()
  1117  		require.NoError(t, err)
  1118  
  1119  		_, err = r.MostRecentCollectionConfigBelow(50, chaincodeName)
  1120  		require.Contains(t, err.Error(), "error while retrieving explicit collections")
  1121  	})
  1122  }
  1123  
  1124  func TestCommitNotifications(t *testing.T) {
  1125  	var lgr *kvLedger
  1126  	var doneChannel chan struct{}
  1127  	var dataChannel <-chan *ledger.CommitNotification
  1128  
  1129  	setup := func() {
  1130  		var err error
  1131  		lgr = &kvLedger{}
  1132  		doneChannel = make(chan struct{})
  1133  		dataChannel, err = lgr.CommitNotificationsChannel(doneChannel)
  1134  		require.NoError(t, err)
  1135  	}
  1136  
  1137  	t.Run("only first txid is included in notification", func(t *testing.T) {
  1138  		setup()
  1139  		lgr.sendCommitNotification(1, []*validation.TxStatInfo{
  1140  			{
  1141  				TxIDFromChannelHeader: "txid_1",
  1142  				ValidationCode:        peer.TxValidationCode_BAD_RWSET,
  1143  				ChaincodeID:           &peer.ChaincodeID{Name: "cc1"},
  1144  				ChaincodeEventData:    []byte("cc1_event"),
  1145  				TxType:                common.HeaderType_ENDORSER_TRANSACTION,
  1146  			},
  1147  			{
  1148  				TxIDFromChannelHeader: "txid_1",
  1149  				ValidationCode:        peer.TxValidationCode_DUPLICATE_TXID,
  1150  			},
  1151  			{
  1152  				TxIDFromChannelHeader: "txid_2",
  1153  				ValidationCode:        peer.TxValidationCode_VALID,
  1154  				ChaincodeID:           &peer.ChaincodeID{Name: "cc2"},
  1155  				ChaincodeEventData:    []byte("cc2_event"),
  1156  				TxType:                common.HeaderType_ENDORSER_TRANSACTION,
  1157  			},
  1158  		})
  1159  
  1160  		commitNotification := <-dataChannel
  1161  		require.Equal(t,
  1162  			&ledger.CommitNotification{
  1163  				BlockNumber: 1,
  1164  				TxsInfo: []*ledger.CommitNotificationTxInfo{
  1165  					{
  1166  						TxID:               "txid_1",
  1167  						TxType:             common.HeaderType_ENDORSER_TRANSACTION,
  1168  						ValidationCode:     peer.TxValidationCode_BAD_RWSET,
  1169  						ChaincodeID:        &peer.ChaincodeID{Name: "cc1"},
  1170  						ChaincodeEventData: []byte("cc1_event"),
  1171  					},
  1172  					{
  1173  						TxID:               "txid_2",
  1174  						TxType:             common.HeaderType_ENDORSER_TRANSACTION,
  1175  						ValidationCode:     peer.TxValidationCode_VALID,
  1176  						ChaincodeID:        &peer.ChaincodeID{Name: "cc2"},
  1177  						ChaincodeEventData: []byte("cc2_event"),
  1178  					},
  1179  				},
  1180  			},
  1181  			commitNotification,
  1182  		)
  1183  	})
  1184  
  1185  	t.Run("empty txids are not included in notification", func(t *testing.T) {
  1186  		setup()
  1187  		lgr.sendCommitNotification(1, []*validation.TxStatInfo{
  1188  			{
  1189  				TxIDFromChannelHeader: "",
  1190  				ValidationCode:        peer.TxValidationCode_BAD_RWSET,
  1191  			},
  1192  			{
  1193  				TxIDFromChannelHeader: "",
  1194  				ValidationCode:        peer.TxValidationCode_DUPLICATE_TXID,
  1195  			},
  1196  		})
  1197  
  1198  		commitNotification := <-dataChannel
  1199  		require.Equal(t,
  1200  			&ledger.CommitNotification{
  1201  				BlockNumber: 1,
  1202  				TxsInfo:     []*ledger.CommitNotificationTxInfo{},
  1203  			},
  1204  			commitNotification,
  1205  		)
  1206  	})
  1207  
  1208  	t.Run("second time calling CommitNotificationsChannel returns error", func(t *testing.T) {
  1209  		setup()
  1210  		_, err := lgr.CommitNotificationsChannel(make(chan struct{}))
  1211  		require.EqualError(t, err, "only one commit notifications channel is allowed at a time")
  1212  	})
  1213  
  1214  	t.Run("closing done channel closes the data channel on next commit", func(t *testing.T) {
  1215  		setup()
  1216  		lgr.sendCommitNotification(1, []*validation.TxStatInfo{})
  1217  		_, ok := <-dataChannel
  1218  		require.True(t, ok)
  1219  
  1220  		close(doneChannel)
  1221  		lgr.sendCommitNotification(2, []*validation.TxStatInfo{})
  1222  
  1223  		_, ok = <-dataChannel
  1224  		require.False(t, ok)
  1225  	})
  1226  }
  1227  
  1228  func TestCommitNotificationsOnBlockCommit(t *testing.T) {
  1229  	conf, cleanup := testConfig(t)
  1230  	defer cleanup()
  1231  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
  1232  	defer provider.Close()
  1233  
  1234  	_, gb := testutil.NewBlockGenerator(t, "testLedger", false)
  1235  	l, err := provider.CreateFromGenesisBlock(gb)
  1236  	require.NoError(t, err)
  1237  	defer l.Close()
  1238  
  1239  	lgr := l.(*kvLedger)
  1240  	doneChannel := make(chan struct{})
  1241  	dataChannel, err := lgr.CommitNotificationsChannel(doneChannel)
  1242  	require.NoError(t, err)
  1243  
  1244  	s, err := lgr.NewTxSimulator("")
  1245  	require.NoError(t, err)
  1246  	_, err = s.GetState("ns1", "key1")
  1247  	require.NoError(t, err)
  1248  	require.NoError(t, s.SetState("ns1", "key1", []byte("val1")))
  1249  	sr, err := s.GetTxSimulationResults()
  1250  	require.NoError(t, err)
  1251  	srBytes, err := sr.GetPubSimulationBytes()
  1252  	require.NoError(t, err)
  1253  	s.Done()
  1254  
  1255  	block := testutil.ConstructBlockFromBlockDetails(
  1256  		t, &testutil.BlockDetails{
  1257  			BlockNum:     1,
  1258  			PreviousHash: protoutil.BlockHeaderHash(gb.Header),
  1259  			Txs: []*testutil.TxDetails{
  1260  				{
  1261  					Type:              common.HeaderType_ENDORSER_TRANSACTION,
  1262  					TxID:              "txid_1",
  1263  					ChaincodeName:     "foo",
  1264  					ChaincodeVersion:  "v1",
  1265  					SimulationResults: srBytes,
  1266  					ChaincodeEvents:   []byte("foo-event"),
  1267  				},
  1268  
  1269  				{
  1270  					Type:              common.HeaderType_ENDORSER_TRANSACTION,
  1271  					TxID:              "txid_2",
  1272  					ChaincodeName:     "bar",
  1273  					ChaincodeVersion:  "v2",
  1274  					SimulationResults: srBytes, // same read-sets: should cause mvcc conflict
  1275  					ChaincodeEvents:   []byte("bar-event"),
  1276  				},
  1277  			},
  1278  		}, false,
  1279  	)
  1280  
  1281  	require.NoError(t, lgr.CommitLegacy(&ledger.BlockAndPvtData{Block: block}, &ledger.CommitOptions{}))
  1282  	commitNotification := <-dataChannel
  1283  	require.Equal(t,
  1284  		&ledger.CommitNotification{
  1285  			BlockNumber: 1,
  1286  			TxsInfo: []*ledger.CommitNotificationTxInfo{
  1287  				{
  1288  					TxType:             common.HeaderType_ENDORSER_TRANSACTION,
  1289  					TxID:               "txid_1",
  1290  					ValidationCode:     peer.TxValidationCode_VALID,
  1291  					ChaincodeID:        &peer.ChaincodeID{Name: "foo", Version: "v1"},
  1292  					ChaincodeEventData: []byte("foo-event"),
  1293  				},
  1294  				{
  1295  					TxType:             common.HeaderType_ENDORSER_TRANSACTION,
  1296  					TxID:               "txid_2",
  1297  					ValidationCode:     peer.TxValidationCode_MVCC_READ_CONFLICT,
  1298  					ChaincodeID:        &peer.ChaincodeID{Name: "bar", Version: "v2"},
  1299  					ChaincodeEventData: []byte("bar-event"),
  1300  				},
  1301  			},
  1302  		},
  1303  		commitNotification,
  1304  	)
  1305  }
  1306  
  1307  func testutilPersistExplicitCollectionConfig(
  1308  	t *testing.T,
  1309  	provider *Provider,
  1310  	mockDeployedCCInfoProvider *mock.DeployedChaincodeInfoProvider,
  1311  	ledgerID string,
  1312  	chaincodeName string,
  1313  	collConfigPkg *peer.CollectionConfigPackage,
  1314  	committingBlockNum uint64,
  1315  ) {
  1316  	mockDeployedCCInfoProvider.UpdatedChaincodesReturns(
  1317  		[]*ledger.ChaincodeLifecycleInfo{
  1318  			{
  1319  				Name: chaincodeName,
  1320  			},
  1321  		},
  1322  		nil,
  1323  	)
  1324  	mockDeployedCCInfoProvider.ChaincodeInfoReturns(
  1325  		&ledger.DeployedChaincodeInfo{
  1326  			Name:                        chaincodeName,
  1327  			ExplicitCollectionConfigPkg: collConfigPkg,
  1328  		},
  1329  		nil,
  1330  	)
  1331  	err := provider.configHistoryMgr.HandleStateUpdates(
  1332  		&ledger.StateUpdateTrigger{
  1333  			LedgerID:           ledgerID,
  1334  			CommittingBlockNum: committingBlockNum,
  1335  		},
  1336  	)
  1337  	require.NoError(t, err)
  1338  }
  1339  
  1340  func testutilCollConfigPkg(colls []*peer.StaticCollectionConfig) *peer.CollectionConfigPackage {
  1341  	if len(colls) == 0 {
  1342  		return nil
  1343  	}
  1344  	pkg := &peer.CollectionConfigPackage{
  1345  		Config: []*peer.CollectionConfig{},
  1346  	}
  1347  	for _, coll := range colls {
  1348  		pkg.Config = append(pkg.Config,
  1349  			&peer.CollectionConfig{
  1350  				Payload: &peer.CollectionConfig_StaticCollectionConfig{
  1351  					StaticCollectionConfig: coll,
  1352  				},
  1353  			},
  1354  		)
  1355  	}
  1356  	return pkg
  1357  }
  1358  
  1359  func sampleDataWithPvtdataForSelectiveTx(t *testing.T, bg *testutil.BlockGenerator) []*ledger.BlockAndPvtData {
  1360  	var blockAndpvtdata []*ledger.BlockAndPvtData
  1361  	blocks := bg.NextTestBlocks(10)
  1362  	for i := 0; i < 10; i++ {
  1363  		blockAndpvtdata = append(blockAndpvtdata, &ledger.BlockAndPvtData{Block: blocks[i]})
  1364  	}
  1365  
  1366  	// txNum 3, 5, 6 in block 2 has pvtdata but txNum 6 is invalid
  1367  	blockAndpvtdata[2].PvtData = samplePvtData(t, []uint64{3, 5, 6})
  1368  	txFilter := txflags.ValidationFlags(blockAndpvtdata[2].Block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
  1369  	txFilter.SetFlag(6, peer.TxValidationCode_INVALID_WRITESET)
  1370  	blockAndpvtdata[2].Block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txFilter
  1371  
  1372  	// txNum 4, 6 in block 3 has pvtdata
  1373  	blockAndpvtdata[3].PvtData = samplePvtData(t, []uint64{4, 6})
  1374  
  1375  	// txNum 4, 5 in block 5 has missing pvt data but txNum 5 is invalid
  1376  	missingData := make(ledger.TxMissingPvtData)
  1377  	missingData.Add(4, "ns-4", "coll-4", true)
  1378  	missingData.Add(5, "ns-5", "coll-5", true)
  1379  	blockAndpvtdata[5].MissingPvtData = missingData
  1380  	txFilter = txflags.ValidationFlags(blockAndpvtdata[5].Block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
  1381  	txFilter.SetFlag(5, peer.TxValidationCode_INVALID_WRITESET)
  1382  	blockAndpvtdata[5].Block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER] = txFilter
  1383  
  1384  	return blockAndpvtdata
  1385  }
  1386  
  1387  func sampleDataWithPvtdataForAllTxs(t *testing.T, bg *testutil.BlockGenerator) []*ledger.BlockAndPvtData {
  1388  	var blockAndpvtdata []*ledger.BlockAndPvtData
  1389  	blocks := bg.NextTestBlocks(10)
  1390  	for i := 0; i < 10; i++ {
  1391  		blockAndpvtdata = append(blockAndpvtdata,
  1392  			&ledger.BlockAndPvtData{
  1393  				Block:   blocks[i],
  1394  				PvtData: samplePvtData(t, []uint64{uint64(i), uint64(i + 1)}),
  1395  			},
  1396  		)
  1397  	}
  1398  	return blockAndpvtdata
  1399  }
  1400  
  1401  func samplePvtData(t *testing.T, txNums []uint64) map[uint64]*ledger.TxPvtData {
  1402  	pvtWriteSet := &rwset.TxPvtReadWriteSet{DataModel: rwset.TxReadWriteSet_KV}
  1403  	pvtWriteSet.NsPvtRwset = []*rwset.NsPvtReadWriteSet{
  1404  		{
  1405  			Namespace: "ns-1",
  1406  			CollectionPvtRwset: []*rwset.CollectionPvtReadWriteSet{
  1407  				{
  1408  					CollectionName: "coll-1",
  1409  					Rwset:          []byte("RandomBytes-PvtRWSet-ns1-coll1"),
  1410  				},
  1411  				{
  1412  					CollectionName: "coll-2",
  1413  					Rwset:          []byte("RandomBytes-PvtRWSet-ns1-coll2"),
  1414  				},
  1415  			},
  1416  		},
  1417  	}
  1418  	var pvtData []*ledger.TxPvtData
  1419  	for _, txNum := range txNums {
  1420  		pvtData = append(pvtData, &ledger.TxPvtData{SeqInBlock: txNum, WriteSet: pvtWriteSet})
  1421  	}
  1422  	return constructPvtdataMap(pvtData)
  1423  }
  1424  
  1425  func btlPolicyForSampleData() pvtdatapolicy.BTLPolicy {
  1426  	return btltestutil.SampleBTLPolicy(
  1427  		map[[2]string]uint64{
  1428  			{"ns-1", "coll-1"}: 0,
  1429  			{"ns-1", "coll-2"}: 0,
  1430  		},
  1431  	)
  1432  }
  1433  
  1434  func prepareNextBlockForTest(t *testing.T, l ledger.PeerLedger, bg *testutil.BlockGenerator,
  1435  	txid string, pubKVs map[string]string, pvtKVs map[string]string) *ledger.BlockAndPvtData {
  1436  	simulator, _ := l.NewTxSimulator(txid)
  1437  	// simulating transaction
  1438  	for k, v := range pubKVs {
  1439  		require.NoError(t, simulator.SetState("ns", k, []byte(v)))
  1440  	}
  1441  	for k, v := range pvtKVs {
  1442  		require.NoError(t, simulator.SetPrivateData("ns", "coll", k, []byte(v)))
  1443  	}
  1444  	simulator.Done()
  1445  	simRes, _ := simulator.GetTxSimulationResults()
  1446  	pubSimBytes, _ := simRes.GetPubSimulationBytes()
  1447  	block := bg.NextBlock([][]byte{pubSimBytes})
  1448  	blkAndPvtData := &ledger.BlockAndPvtData{Block: block}
  1449  	if len(pvtKVs) != 0 {
  1450  		blkAndPvtData.PvtData = ledger.TxPvtDataMap{
  1451  			0: {SeqInBlock: 0, WriteSet: simRes.PvtSimulationResults},
  1452  		}
  1453  	}
  1454  	return blkAndPvtData
  1455  }
  1456  
  1457  func checkBCSummaryForTest(t *testing.T, l ledger.PeerLedger, expectedBCSummary *bcSummary) {
  1458  	if expectedBCSummary.bcInfo != nil {
  1459  		actualBCInfo, _ := l.GetBlockchainInfo()
  1460  		require.Equal(t, expectedBCSummary.bcInfo, actualBCInfo)
  1461  	}
  1462  
  1463  	if expectedBCSummary.stateDBSavePoint != 0 {
  1464  		actualStateDBSavepoint, _ := l.(*kvLedger).txmgr.GetLastSavepoint()
  1465  		require.Equal(t, expectedBCSummary.stateDBSavePoint, actualStateDBSavepoint.BlockNum)
  1466  	}
  1467  
  1468  	if !(expectedBCSummary.stateDBKVs == nil && expectedBCSummary.stateDBPvtKVs == nil) {
  1469  		checkStateDBForTest(t, l, expectedBCSummary.stateDBKVs, expectedBCSummary.stateDBPvtKVs)
  1470  	}
  1471  
  1472  	if expectedBCSummary.historyDBSavePoint != 0 {
  1473  		actualHistoryDBSavepoint, _ := l.(*kvLedger).historyDB.GetLastSavepoint()
  1474  		require.Equal(t, expectedBCSummary.historyDBSavePoint, actualHistoryDBSavepoint.BlockNum)
  1475  	}
  1476  
  1477  	if expectedBCSummary.historyKey != "" {
  1478  		checkHistoryDBForTest(t, l, expectedBCSummary.historyKey, expectedBCSummary.historyVals)
  1479  	}
  1480  }
  1481  
  1482  func checkStateDBForTest(t *testing.T, l ledger.PeerLedger, expectedKVs map[string]string, expectedPvtKVs map[string]string) {
  1483  	simulator, _ := l.NewTxSimulator("checkStateDBForTest")
  1484  	defer simulator.Done()
  1485  	for expectedKey, expectedVal := range expectedKVs {
  1486  		actualVal, _ := simulator.GetState("ns", expectedKey)
  1487  		require.Equal(t, []byte(expectedVal), actualVal)
  1488  	}
  1489  
  1490  	for expectedPvtKey, expectedPvtVal := range expectedPvtKVs {
  1491  		actualPvtVal, _ := simulator.GetPrivateData("ns", "coll", expectedPvtKey)
  1492  		require.Equal(t, []byte(expectedPvtVal), actualPvtVal)
  1493  	}
  1494  }
  1495  
  1496  func checkHistoryDBForTest(t *testing.T, l ledger.PeerLedger, key string, expectedVals []string) {
  1497  	qhistory, _ := l.NewHistoryQueryExecutor()
  1498  	itr, _ := qhistory.GetHistoryForKey("ns", key)
  1499  	var actualVals []string
  1500  	for {
  1501  		kmod, err := itr.Next()
  1502  		require.NoError(t, err, "Error upon Next()")
  1503  		if kmod == nil {
  1504  			break
  1505  		}
  1506  		retrievedValue := kmod.(*queryresult.KeyModification).Value
  1507  		actualVals = append(actualVals, string(retrievedValue))
  1508  	}
  1509  	require.Equal(t, expectedVals, actualVals)
  1510  }
  1511  
  1512  type bcSummary struct {
  1513  	bcInfo             *common.BlockchainInfo
  1514  	stateDBSavePoint   uint64
  1515  	stateDBKVs         map[string]string
  1516  	stateDBPvtKVs      map[string]string
  1517  	historyDBSavePoint uint64
  1518  	historyKey         string
  1519  	historyVals        []string
  1520  }