github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/core/ledger/kvledger/kv_ledger_provider_test.go (about)

     1  /*
     2  Copyright IBM Corp. 2016 All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package kvledger
     8  
     9  import (
    10  	"fmt"
    11  	"io/ioutil"
    12  	"os"
    13  	"path/filepath"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/golang/protobuf/proto"
    18  	"github.com/hyperledger/fabric-protos-go/common"
    19  	"github.com/hyperledger/fabric-protos-go/ledger/queryresult"
    20  	"github.com/hyperledger/fabric-protos-go/peer"
    21  	"github.com/osdi23p228/fabric/bccsp/sw"
    22  	configtxtest "github.com/osdi23p228/fabric/common/configtx/test"
    23  	"github.com/osdi23p228/fabric/common/ledger/blkstorage"
    24  	"github.com/osdi23p228/fabric/common/ledger/dataformat"
    25  	"github.com/osdi23p228/fabric/common/ledger/testutil"
    26  	"github.com/osdi23p228/fabric/common/ledger/util/leveldbhelper"
    27  	"github.com/osdi23p228/fabric/common/metrics/disabled"
    28  	"github.com/osdi23p228/fabric/common/util"
    29  	"github.com/osdi23p228/fabric/core/ledger"
    30  	lgr "github.com/osdi23p228/fabric/core/ledger"
    31  	"github.com/osdi23p228/fabric/core/ledger/kvledger/msgs"
    32  	"github.com/osdi23p228/fabric/core/ledger/mock"
    33  	"github.com/osdi23p228/fabric/protoutil"
    34  	"github.com/stretchr/testify/require"
    35  )
    36  
    37  func TestLedgerProvider(t *testing.T) {
    38  	conf, cleanup := testConfig(t)
    39  	defer cleanup()
    40  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
    41  	numLedgers := 10
    42  	existingLedgerIDs, err := provider.List()
    43  	require.NoError(t, err)
    44  	require.Len(t, existingLedgerIDs, 0)
    45  	genesisBlocks := make([]*common.Block, numLedgers)
    46  	for i := 0; i < numLedgers; i++ {
    47  		genesisBlock, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(i))
    48  		genesisBlocks[i] = genesisBlock
    49  		provider.Create(genesisBlock)
    50  	}
    51  	existingLedgerIDs, err = provider.List()
    52  	require.NoError(t, err)
    53  	require.Len(t, existingLedgerIDs, numLedgers)
    54  
    55  	// verify formatKey is present in idStore
    56  	s := provider.idStore
    57  	val, err := s.db.Get(formatKey)
    58  	require.NoError(t, err)
    59  	require.Equal(t, []byte(dataformat.CurrentFormat), val)
    60  
    61  	provider.Close()
    62  
    63  	provider = testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
    64  	defer provider.Close()
    65  	ledgerIds, _ := provider.List()
    66  	require.Len(t, ledgerIds, numLedgers)
    67  	for i := 0; i < numLedgers; i++ {
    68  		require.Equal(t, constructTestLedgerID(i), ledgerIds[i])
    69  	}
    70  	for i := 0; i < numLedgers; i++ {
    71  		ledgerid := constructTestLedgerID(i)
    72  		status, _ := provider.Exists(ledgerid)
    73  		require.True(t, status)
    74  		ledger, err := provider.Open(ledgerid)
    75  		require.NoError(t, err)
    76  		bcInfo, err := ledger.GetBlockchainInfo()
    77  		ledger.Close()
    78  		require.NoError(t, err)
    79  		require.Equal(t, uint64(1), bcInfo.Height)
    80  
    81  		// check that the genesis block was persisted in the provider's db
    82  		s := provider.idStore
    83  		gbBytesInProviderStore, err := s.db.Get(s.encodeLedgerKey(ledgerid, ledgerKeyPrefix))
    84  		require.NoError(t, err)
    85  		gb := &common.Block{}
    86  		require.NoError(t, proto.Unmarshal(gbBytesInProviderStore, gb))
    87  		require.True(t, proto.Equal(gb, genesisBlocks[i]), "proto messages are not equal")
    88  
    89  		// check that ledger metadata keys were persisted in idStore with active status
    90  		val, err := s.db.Get(s.encodeLedgerKey(ledgerid, metadataKeyPrefix))
    91  		require.NoError(t, err)
    92  		metadata := &msgs.LedgerMetadata{}
    93  		require.NoError(t, proto.Unmarshal(val, metadata))
    94  		require.Equal(t, msgs.Status_ACTIVE, metadata.Status)
    95  	}
    96  	gb, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(2))
    97  	_, err = provider.Create(gb)
    98  	require.Equal(t, ErrLedgerIDExists, err)
    99  
   100  	status, err := provider.Exists(constructTestLedgerID(numLedgers))
   101  	require.NoError(t, err, "Failed to check for ledger existence")
   102  	require.Equal(t, status, false)
   103  
   104  	_, err = provider.Open(constructTestLedgerID(numLedgers))
   105  	require.Equal(t, ErrNonExistingLedgerID, err)
   106  }
   107  
   108  func TestGetActiveLedgerIDsIteratorError(t *testing.T) {
   109  	conf, cleanup := testConfig(t)
   110  	defer cleanup()
   111  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   112  
   113  	for i := 0; i < 2; i++ {
   114  		genesisBlock, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(i))
   115  		provider.Create(genesisBlock)
   116  	}
   117  
   118  	// close provider to trigger db error
   119  	provider.Close()
   120  	_, err := provider.idStore.getActiveLedgerIDs()
   121  	require.EqualError(t, err, "error getting ledger ids from idStore: leveldb: closed")
   122  }
   123  
   124  func TestLedgerMetataDataUnmarshalError(t *testing.T) {
   125  	conf, cleanup := testConfig(t)
   126  	defer cleanup()
   127  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   128  	defer provider.Close()
   129  
   130  	ledgerID := constructTestLedgerID(0)
   131  	genesisBlock, _ := configtxtest.MakeGenesisBlock(ledgerID)
   132  	provider.Create(genesisBlock)
   133  
   134  	// put invalid bytes for the metatdata key
   135  	provider.idStore.db.Put(provider.idStore.encodeLedgerKey(ledgerID, metadataKeyPrefix), []byte("invalid"), true)
   136  
   137  	_, err := provider.List()
   138  	require.EqualError(t, err, "error unmarshalling ledger metadata: unexpected EOF")
   139  
   140  	_, err = provider.Open(ledgerID)
   141  	require.EqualError(t, err, "error unmarshalling ledger metadata: unexpected EOF")
   142  }
   143  
   144  func TestNewProviderIdStoreFormatError(t *testing.T) {
   145  	conf, cleanup := testConfig(t)
   146  	defer cleanup()
   147  
   148  	require.NoError(t, testutil.Unzip("tests/testdata/v11/sample_ledgers/ledgersData.zip", conf.RootFSPath, false))
   149  
   150  	// NewProvider fails because ledgerProvider (idStore) has old format
   151  	_, err := NewProvider(
   152  		&lgr.Initializer{
   153  			DeployedChaincodeInfoProvider: &mock.DeployedChaincodeInfoProvider{},
   154  			MetricsProvider:               &disabled.Provider{},
   155  			Config:                        conf,
   156  		},
   157  	)
   158  	require.EqualError(t, err, fmt.Sprintf("unexpected format. db info = [leveldb for channel-IDs at [%s]], data format = [], expected format = [2.0]", LedgerProviderPath(conf.RootFSPath)))
   159  }
   160  
   161  func TestUpgradeIDStoreFormatDBError(t *testing.T) {
   162  	conf, cleanup := testConfig(t)
   163  	defer cleanup()
   164  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   165  	provider.Close()
   166  
   167  	err := provider.idStore.upgradeFormat()
   168  	dbPath := LedgerProviderPath(conf.RootFSPath)
   169  	require.EqualError(t, err, fmt.Sprintf("error while trying to see if the leveldb at path [%s] is empty: leveldb: closed", dbPath))
   170  }
   171  
   172  func TestCheckUpgradeEligibilityV1x(t *testing.T) {
   173  	conf, cleanup := testConfig(t)
   174  	defer cleanup()
   175  	dbPath := LedgerProviderPath(conf.RootFSPath)
   176  	db := leveldbhelper.CreateDB(&leveldbhelper.Conf{DBPath: dbPath})
   177  	idStore := &idStore{db, dbPath}
   178  	db.Open()
   179  	defer db.Close()
   180  
   181  	// write a tmpKey so that idStore is not be empty
   182  	err := idStore.db.Put([]byte("tmpKey"), []byte("tmpValue"), true)
   183  	require.NoError(t, err)
   184  
   185  	eligible, err := idStore.checkUpgradeEligibility()
   186  	require.NoError(t, err)
   187  	require.True(t, eligible)
   188  }
   189  
   190  func TestCheckUpgradeEligibilityCurrentVersion(t *testing.T) {
   191  	conf, cleanup := testConfig(t)
   192  	defer cleanup()
   193  	dbPath := LedgerProviderPath(conf.RootFSPath)
   194  	db := leveldbhelper.CreateDB(&leveldbhelper.Conf{DBPath: dbPath})
   195  	idStore := &idStore{db, dbPath}
   196  	db.Open()
   197  	defer db.Close()
   198  
   199  	err := idStore.db.Put(formatKey, []byte(dataformat.CurrentFormat), true)
   200  	require.NoError(t, err)
   201  
   202  	eligible, err := idStore.checkUpgradeEligibility()
   203  	require.NoError(t, err)
   204  	require.False(t, eligible)
   205  }
   206  
   207  func TestCheckUpgradeEligibilityBadFormat(t *testing.T) {
   208  	conf, cleanup := testConfig(t)
   209  	defer cleanup()
   210  	dbPath := LedgerProviderPath(conf.RootFSPath)
   211  	db := leveldbhelper.CreateDB(&leveldbhelper.Conf{DBPath: dbPath})
   212  	idStore := &idStore{db, dbPath}
   213  	db.Open()
   214  	defer db.Close()
   215  
   216  	err := idStore.db.Put(formatKey, []byte("x.0"), true)
   217  	require.NoError(t, err)
   218  
   219  	expectedErr := &dataformat.ErrFormatMismatch{
   220  		ExpectedFormat: dataformat.PreviousFormat,
   221  		Format:         "x.0",
   222  		DBInfo:         fmt.Sprintf("leveldb for channel-IDs at [%s]", LedgerProviderPath(conf.RootFSPath)),
   223  	}
   224  	eligible, err := idStore.checkUpgradeEligibility()
   225  	require.EqualError(t, err, expectedErr.Error())
   226  	require.False(t, eligible)
   227  }
   228  
   229  func TestCheckUpgradeEligibilityEmptyDB(t *testing.T) {
   230  	conf, cleanup := testConfig(t)
   231  	defer cleanup()
   232  	dbPath := LedgerProviderPath(conf.RootFSPath)
   233  	db := leveldbhelper.CreateDB(&leveldbhelper.Conf{DBPath: dbPath})
   234  	idStore := &idStore{db, dbPath}
   235  	db.Open()
   236  	defer db.Close()
   237  
   238  	eligible, err := idStore.checkUpgradeEligibility()
   239  	require.NoError(t, err)
   240  	require.False(t, eligible)
   241  }
   242  
   243  func TestLedgerProviderHistoryDBDisabled(t *testing.T) {
   244  	conf, cleanup := testConfig(t)
   245  	conf.HistoryDBConfig.Enabled = false
   246  	defer cleanup()
   247  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   248  	numLedgers := 10
   249  	existingLedgerIDs, err := provider.List()
   250  	require.NoError(t, err)
   251  	require.Len(t, existingLedgerIDs, 0)
   252  	genesisBlocks := make([]*common.Block, numLedgers)
   253  	for i := 0; i < numLedgers; i++ {
   254  		genesisBlock, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(i))
   255  		genesisBlocks[i] = genesisBlock
   256  		provider.Create(genesisBlock)
   257  	}
   258  	existingLedgerIDs, err = provider.List()
   259  	require.NoError(t, err)
   260  	require.Len(t, existingLedgerIDs, numLedgers)
   261  
   262  	provider.Close()
   263  
   264  	provider = testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   265  	defer provider.Close()
   266  	ledgerIds, _ := provider.List()
   267  	require.Len(t, ledgerIds, numLedgers)
   268  	t.Logf("ledgerIDs=%#v", ledgerIds)
   269  	for i := 0; i < numLedgers; i++ {
   270  		require.Equal(t, constructTestLedgerID(i), ledgerIds[i])
   271  	}
   272  	for i := 0; i < numLedgers; i++ {
   273  		ledgerid := constructTestLedgerID(i)
   274  		status, _ := provider.Exists(ledgerid)
   275  		require.True(t, status)
   276  		ledger, err := provider.Open(ledgerid)
   277  		require.NoError(t, err)
   278  		bcInfo, err := ledger.GetBlockchainInfo()
   279  		ledger.Close()
   280  		require.NoError(t, err)
   281  		require.Equal(t, uint64(1), bcInfo.Height)
   282  
   283  		// check that the genesis block was persisted in the provider's db
   284  		s := provider.idStore
   285  		gbBytesInProviderStore, err := s.db.Get(s.encodeLedgerKey(ledgerid, ledgerKeyPrefix))
   286  		require.NoError(t, err)
   287  		gb := &common.Block{}
   288  		require.NoError(t, proto.Unmarshal(gbBytesInProviderStore, gb))
   289  		require.True(t, proto.Equal(gb, genesisBlocks[i]), "proto messages are not equal")
   290  	}
   291  	gb, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(2))
   292  	_, err = provider.Create(gb)
   293  	require.Equal(t, ErrLedgerIDExists, err)
   294  
   295  	status, err := provider.Exists(constructTestLedgerID(numLedgers))
   296  	require.NoError(t, err, "Failed to check for ledger existence")
   297  	require.Equal(t, status, false)
   298  
   299  	_, err = provider.Open(constructTestLedgerID(numLedgers))
   300  	require.Equal(t, ErrNonExistingLedgerID, err)
   301  
   302  }
   303  
   304  func TestRecovery(t *testing.T) {
   305  	conf, cleanup := testConfig(t)
   306  	defer cleanup()
   307  	provider1 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   308  	defer provider1.Close()
   309  
   310  	// now create the genesis block
   311  	genesisBlock, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(1))
   312  	ledger, err := provider1.open(constructTestLedgerID(1))
   313  	require.NoError(t, err)
   314  	ledger.CommitLegacy(&lgr.BlockAndPvtData{Block: genesisBlock}, &lgr.CommitOptions{})
   315  	ledger.Close()
   316  
   317  	// Case 1: assume a crash happens, force underconstruction flag to be set to simulate
   318  	// a failure where ledgerid is being created - ie., block is written but flag is not unset
   319  	provider1.idStore.setUnderConstructionFlag(constructTestLedgerID(1))
   320  	provider1.Close()
   321  
   322  	// construct a new provider1 to invoke recovery
   323  	provider1 = testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   324  	// verify the underecoveryflag and open the ledger
   325  	flag, err := provider1.idStore.getUnderConstructionFlag()
   326  	require.NoError(t, err, "Failed to read the underconstruction flag")
   327  	require.Equal(t, "", flag)
   328  	ledger, err = provider1.Open(constructTestLedgerID(1))
   329  	require.NoError(t, err, "Failed to open the ledger")
   330  	ledger.Close()
   331  
   332  	// Case 0: assume a crash happens before the genesis block of ledger 2 is committed
   333  	// Open the ID store (inventory of chainIds/ledgerIds)
   334  	provider1.idStore.setUnderConstructionFlag(constructTestLedgerID(2))
   335  	provider1.Close()
   336  
   337  	// construct a new provider to invoke recovery
   338  	provider2 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   339  	defer provider2.Close()
   340  	require.NoError(t, err, "Provider failed to recover an underConstructionLedger")
   341  	flag, err = provider2.idStore.getUnderConstructionFlag()
   342  	require.NoError(t, err, "Failed to read the underconstruction flag")
   343  	require.Equal(t, "", flag)
   344  }
   345  
   346  func TestRecoveryHistoryDBDisabled(t *testing.T) {
   347  	conf, cleanup := testConfig(t)
   348  	conf.HistoryDBConfig.Enabled = false
   349  	defer cleanup()
   350  	provider1 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   351  	defer provider1.Close()
   352  
   353  	// now create the genesis block
   354  	genesisBlock, _ := configtxtest.MakeGenesisBlock(constructTestLedgerID(1))
   355  	ledger, err := provider1.open(constructTestLedgerID(1))
   356  	require.NoError(t, err, "Failed to open the ledger")
   357  	ledger.CommitLegacy(&lgr.BlockAndPvtData{Block: genesisBlock}, &lgr.CommitOptions{})
   358  	ledger.Close()
   359  
   360  	// Case 1: assume a crash happens, force underconstruction flag to be set to simulate
   361  	// a failure where ledgerid is being created - ie., block is written but flag is not unset
   362  	provider1.idStore.setUnderConstructionFlag(constructTestLedgerID(1))
   363  	provider1.Close()
   364  
   365  	// construct a new provider to invoke recovery
   366  	provider2 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   367  	defer provider2.Close()
   368  	// verify the underecoveryflag and open the ledger
   369  	flag, err := provider2.idStore.getUnderConstructionFlag()
   370  	require.NoError(t, err, "Failed to read the underconstruction flag")
   371  	require.Equal(t, "", flag)
   372  	ledger, err = provider2.Open(constructTestLedgerID(1))
   373  	require.NoError(t, err, "Failed to open the ledger")
   374  	ledger.Close()
   375  
   376  	// Case 0: assume a crash happens before the genesis block of ledger 2 is committed
   377  	// Open the ID store (inventory of chainIds/ledgerIds)
   378  	provider2.idStore.setUnderConstructionFlag(constructTestLedgerID(2))
   379  	provider2.Close()
   380  
   381  	// construct a new provider to invoke recovery
   382  	provider3 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   383  	defer provider3.Close()
   384  	require.NoError(t, err, "Provider failed to recover an underConstructionLedger")
   385  	flag, err = provider3.idStore.getUnderConstructionFlag()
   386  	require.NoError(t, err, "Failed to read the underconstruction flag")
   387  	require.Equal(t, "", flag)
   388  }
   389  
   390  func TestMultipleLedgerBasicRW(t *testing.T) {
   391  	conf, cleanup := testConfig(t)
   392  	defer cleanup()
   393  	provider1 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   394  	defer provider1.Close()
   395  
   396  	numLedgers := 10
   397  	ledgers := make([]lgr.PeerLedger, numLedgers)
   398  	for i := 0; i < numLedgers; i++ {
   399  		bg, gb := testutil.NewBlockGenerator(t, constructTestLedgerID(i), false)
   400  		l, err := provider1.Create(gb)
   401  		require.NoError(t, err)
   402  		ledgers[i] = l
   403  		txid := util.GenerateUUID()
   404  		s, _ := l.NewTxSimulator(txid)
   405  		err = s.SetState("ns", "testKey", []byte(fmt.Sprintf("testValue_%d", i)))
   406  		s.Done()
   407  		require.NoError(t, err)
   408  		res, err := s.GetTxSimulationResults()
   409  		require.NoError(t, err)
   410  		pubSimBytes, _ := res.GetPubSimulationBytes()
   411  		b := bg.NextBlock([][]byte{pubSimBytes})
   412  		err = l.CommitLegacy(&lgr.BlockAndPvtData{Block: b}, &ledger.CommitOptions{})
   413  		l.Close()
   414  		require.NoError(t, err)
   415  	}
   416  
   417  	provider1.Close()
   418  
   419  	provider2 := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   420  	defer provider2.Close()
   421  	ledgers = make([]lgr.PeerLedger, numLedgers)
   422  	for i := 0; i < numLedgers; i++ {
   423  		l, err := provider2.Open(constructTestLedgerID(i))
   424  		require.NoError(t, err)
   425  		ledgers[i] = l
   426  	}
   427  
   428  	for i, l := range ledgers {
   429  		q, _ := l.NewQueryExecutor()
   430  		val, err := q.GetState("ns", "testKey")
   431  		q.Done()
   432  		require.NoError(t, err)
   433  		require.Equal(t, []byte(fmt.Sprintf("testValue_%d", i)), val)
   434  		l.Close()
   435  	}
   436  }
   437  
   438  func TestLedgerBackup(t *testing.T) {
   439  	ledgerid := "TestLedger"
   440  	basePath, err := ioutil.TempDir("", "kvledger")
   441  	require.NoError(t, err, "Failed to create ledger directory")
   442  	defer os.RemoveAll(basePath)
   443  	originalPath := filepath.Join(basePath, "kvledger1")
   444  	restorePath := filepath.Join(basePath, "kvledger2")
   445  
   446  	// create and populate a ledger in the original environment
   447  	origConf := &lgr.Config{
   448  		RootFSPath:    originalPath,
   449  		StateDBConfig: &lgr.StateDBConfig{},
   450  		PrivateDataConfig: &lgr.PrivateDataConfig{
   451  			MaxBatchSize:                        5000,
   452  			BatchesInterval:                     1000,
   453  			PurgeInterval:                       100,
   454  			DeprioritizedDataReconcilerInterval: 120 * time.Minute,
   455  		},
   456  		HistoryDBConfig: &lgr.HistoryDBConfig{
   457  			Enabled: true,
   458  		},
   459  		SnapshotsConfig: &lgr.SnapshotsConfig{
   460  			RootDir: filepath.Join(originalPath, "snapshots"),
   461  		},
   462  	}
   463  	provider := testutilNewProvider(origConf, t, &mock.DeployedChaincodeInfoProvider{})
   464  	bg, gb := testutil.NewBlockGenerator(t, ledgerid, false)
   465  	gbHash := protoutil.BlockHeaderHash(gb.Header)
   466  	ledger, _ := provider.Create(gb)
   467  
   468  	txid := util.GenerateUUID()
   469  	simulator, _ := ledger.NewTxSimulator(txid)
   470  	simulator.SetState("ns1", "key1", []byte("value1"))
   471  	simulator.SetState("ns1", "key2", []byte("value2"))
   472  	simulator.SetState("ns1", "key3", []byte("value3"))
   473  	simulator.Done()
   474  	simRes, _ := simulator.GetTxSimulationResults()
   475  	pubSimBytes, _ := simRes.GetPubSimulationBytes()
   476  	block1 := bg.NextBlock([][]byte{pubSimBytes})
   477  	ledger.CommitLegacy(&lgr.BlockAndPvtData{Block: block1}, &lgr.CommitOptions{})
   478  
   479  	txid = util.GenerateUUID()
   480  	simulator, _ = ledger.NewTxSimulator(txid)
   481  	simulator.SetState("ns1", "key1", []byte("value4"))
   482  	simulator.SetState("ns1", "key2", []byte("value5"))
   483  	simulator.SetState("ns1", "key3", []byte("value6"))
   484  	simulator.Done()
   485  	simRes, _ = simulator.GetTxSimulationResults()
   486  	pubSimBytes, _ = simRes.GetPubSimulationBytes()
   487  	block2 := bg.NextBlock([][]byte{pubSimBytes})
   488  	ledger.CommitLegacy(&lgr.BlockAndPvtData{Block: block2}, &lgr.CommitOptions{})
   489  
   490  	ledger.Close()
   491  	provider.Close()
   492  
   493  	// remove the statedb, historydb, and block indexes (they are supposed to be auto created during opening of an existing ledger)
   494  	// and rename the originalPath to restorePath
   495  	require.NoError(t, os.RemoveAll(StateDBPath(originalPath)))
   496  	require.NoError(t, os.RemoveAll(HistoryDBPath(originalPath)))
   497  	require.NoError(t, os.RemoveAll(filepath.Join(BlockStorePath(originalPath), blkstorage.IndexDir)))
   498  	require.NoError(t, os.Rename(originalPath, restorePath))
   499  
   500  	// Instantiate the ledger from restore environment and this should behave exactly as it would have in the original environment
   501  	restoreConf := &lgr.Config{
   502  		RootFSPath:    restorePath,
   503  		StateDBConfig: &lgr.StateDBConfig{},
   504  		PrivateDataConfig: &lgr.PrivateDataConfig{
   505  			MaxBatchSize:                        5000,
   506  			BatchesInterval:                     1000,
   507  			PurgeInterval:                       100,
   508  			DeprioritizedDataReconcilerInterval: 120 * time.Minute,
   509  		},
   510  		HistoryDBConfig: &lgr.HistoryDBConfig{
   511  			Enabled: true,
   512  		},
   513  		SnapshotsConfig: &lgr.SnapshotsConfig{
   514  			RootDir: filepath.Join(restorePath, "snapshots"),
   515  		},
   516  	}
   517  	provider = testutilNewProvider(restoreConf, t, &mock.DeployedChaincodeInfoProvider{})
   518  	defer provider.Close()
   519  
   520  	_, err = provider.Create(gb)
   521  	require.Equal(t, ErrLedgerIDExists, err)
   522  
   523  	ledger, _ = provider.Open(ledgerid)
   524  	defer ledger.Close()
   525  
   526  	block1Hash := protoutil.BlockHeaderHash(block1.Header)
   527  	block2Hash := protoutil.BlockHeaderHash(block2.Header)
   528  	bcInfo, _ := ledger.GetBlockchainInfo()
   529  	require.Equal(t, &common.BlockchainInfo{
   530  		Height: 3, CurrentBlockHash: block2Hash, PreviousBlockHash: block1Hash,
   531  	}, bcInfo)
   532  
   533  	b0, _ := ledger.GetBlockByHash(gbHash)
   534  	require.True(t, proto.Equal(b0, gb), "proto messages are not equal")
   535  
   536  	b1, _ := ledger.GetBlockByHash(block1Hash)
   537  	require.True(t, proto.Equal(b1, block1), "proto messages are not equal")
   538  
   539  	b2, _ := ledger.GetBlockByHash(block2Hash)
   540  	require.True(t, proto.Equal(b2, block2), "proto messages are not equal")
   541  
   542  	b0, _ = ledger.GetBlockByNumber(0)
   543  	require.True(t, proto.Equal(b0, gb), "proto messages are not equal")
   544  
   545  	b1, _ = ledger.GetBlockByNumber(1)
   546  	require.True(t, proto.Equal(b1, block1), "proto messages are not equal")
   547  
   548  	b2, _ = ledger.GetBlockByNumber(2)
   549  	require.True(t, proto.Equal(b2, block2), "proto messages are not equal")
   550  
   551  	// get the tran id from the 2nd block, then use it to test GetTransactionByID()
   552  	txEnvBytes2 := block1.Data.Data[0]
   553  	txEnv2, err := protoutil.GetEnvelopeFromBlock(txEnvBytes2)
   554  	require.NoError(t, err, "Error upon GetEnvelopeFromBlock")
   555  	payload2, err := protoutil.UnmarshalPayload(txEnv2.Payload)
   556  	require.NoError(t, err, "Error upon GetPayload")
   557  	chdr, err := protoutil.UnmarshalChannelHeader(payload2.Header.ChannelHeader)
   558  	require.NoError(t, err, "Error upon GetChannelHeaderFromBytes")
   559  	txID2 := chdr.TxId
   560  	processedTran2, err := ledger.GetTransactionByID(txID2)
   561  	require.NoError(t, err, "Error upon GetTransactionByID")
   562  	// get the tran envelope from the retrieved ProcessedTransaction
   563  	retrievedTxEnv2 := processedTran2.TransactionEnvelope
   564  	require.Equal(t, txEnv2, retrievedTxEnv2)
   565  
   566  	qe, _ := ledger.NewQueryExecutor()
   567  	value1, _ := qe.GetState("ns1", "key1")
   568  	require.Equal(t, []byte("value4"), value1)
   569  
   570  	hqe, err := ledger.NewHistoryQueryExecutor()
   571  	require.NoError(t, err)
   572  	itr, err := hqe.GetHistoryForKey("ns1", "key1")
   573  	require.NoError(t, err)
   574  	defer itr.Close()
   575  
   576  	result1, err := itr.Next()
   577  	require.NoError(t, err)
   578  	require.Equal(t, []byte("value4"), result1.(*queryresult.KeyModification).Value)
   579  	result2, err := itr.Next()
   580  	require.NoError(t, err)
   581  	require.Equal(t, []byte("value1"), result2.(*queryresult.KeyModification).Value)
   582  }
   583  
   584  func constructTestLedgerID(i int) string {
   585  	return fmt.Sprintf("ledger_%06d", i)
   586  }
   587  
   588  func testConfig(t *testing.T) (conf *lgr.Config, cleanup func()) {
   589  	path, err := ioutil.TempDir("", "kvledger")
   590  	require.NoError(t, err, "Failed to create test ledger directory")
   591  	conf = &lgr.Config{
   592  		RootFSPath:    path,
   593  		StateDBConfig: &lgr.StateDBConfig{},
   594  		PrivateDataConfig: &lgr.PrivateDataConfig{
   595  			MaxBatchSize:                        5000,
   596  			BatchesInterval:                     1000,
   597  			PurgeInterval:                       100,
   598  			DeprioritizedDataReconcilerInterval: 120 * time.Minute,
   599  		},
   600  		HistoryDBConfig: &lgr.HistoryDBConfig{
   601  			Enabled: true,
   602  		},
   603  		SnapshotsConfig: &lgr.SnapshotsConfig{
   604  			RootDir: filepath.Join(path, "snapshots"),
   605  		},
   606  	}
   607  	cleanup = func() {
   608  		os.RemoveAll(path)
   609  	}
   610  
   611  	return conf, cleanup
   612  }
   613  
   614  func testutilNewProvider(conf *lgr.Config, t *testing.T, ccInfoProvider *mock.DeployedChaincodeInfoProvider) *Provider {
   615  	cryptoProvider, err := sw.NewDefaultSecurityLevelWithKeystore(sw.NewDummyKeyStore())
   616  	require.NoError(t, err)
   617  
   618  	provider, err := NewProvider(
   619  		&lgr.Initializer{
   620  			DeployedChaincodeInfoProvider: ccInfoProvider,
   621  			MetricsProvider:               &disabled.Provider{},
   622  			Config:                        conf,
   623  			HashProvider:                  cryptoProvider,
   624  		},
   625  	)
   626  	require.NoError(t, err, "Failed to create new Provider")
   627  	return provider
   628  }
   629  
   630  type nsCollBtlConfig struct {
   631  	namespace string
   632  	btlConfig map[string]uint64
   633  }
   634  
   635  func testutilNewProviderWithCollectionConfig(
   636  	t *testing.T,
   637  	nsCollBtlConfigs []*nsCollBtlConfig,
   638  	conf *lgr.Config,
   639  ) *Provider {
   640  	provider := testutilNewProvider(conf, t, &mock.DeployedChaincodeInfoProvider{})
   641  	mockCCInfoProvider := provider.initializer.DeployedChaincodeInfoProvider.(*mock.DeployedChaincodeInfoProvider)
   642  	collectionConfPkgs := []*peer.CollectionConfigPackage{}
   643  
   644  	nsCollMap := map[string]map[string]*peer.StaticCollectionConfig{}
   645  	for _, nsCollBtlConf := range nsCollBtlConfigs {
   646  		collMap := map[string]*peer.StaticCollectionConfig{}
   647  		var collConf []*peer.CollectionConfig
   648  		for collName, btl := range nsCollBtlConf.btlConfig {
   649  			staticConf := &peer.StaticCollectionConfig{Name: collName, BlockToLive: btl}
   650  			collMap[collName] = staticConf
   651  			collectionConf := &peer.CollectionConfig{}
   652  			collectionConf.Payload = &peer.CollectionConfig_StaticCollectionConfig{StaticCollectionConfig: staticConf}
   653  			collConf = append(collConf, collectionConf)
   654  		}
   655  		collectionConfPkgs = append(collectionConfPkgs, &peer.CollectionConfigPackage{Config: collConf})
   656  		nsCollMap[nsCollBtlConf.namespace] = collMap
   657  	}
   658  
   659  	mockCCInfoProvider.ChaincodeInfoStub = func(channelName, ccName string, qe lgr.SimpleQueryExecutor) (*lgr.DeployedChaincodeInfo, error) {
   660  		for i, nsCollBtlConf := range nsCollBtlConfigs {
   661  			if ccName == nsCollBtlConf.namespace {
   662  				return &lgr.DeployedChaincodeInfo{
   663  					Name: nsCollBtlConf.namespace, ExplicitCollectionConfigPkg: collectionConfPkgs[i]}, nil
   664  			}
   665  		}
   666  		return nil, nil
   667  	}
   668  
   669  	mockCCInfoProvider.AllCollectionsConfigPkgStub = func(channelName, ccName string, qe lgr.SimpleQueryExecutor) (*peer.CollectionConfigPackage, error) {
   670  		for i, nsCollBtlConf := range nsCollBtlConfigs {
   671  			if ccName == nsCollBtlConf.namespace {
   672  				return collectionConfPkgs[i], nil
   673  			}
   674  		}
   675  		return nil, nil
   676  
   677  	}
   678  
   679  	mockCCInfoProvider.CollectionInfoStub = func(channelName, ccName, collName string, qe lgr.SimpleQueryExecutor) (*peer.StaticCollectionConfig, error) {
   680  		for _, nsCollBtlConf := range nsCollBtlConfigs {
   681  			if ccName == nsCollBtlConf.namespace {
   682  				return nsCollMap[nsCollBtlConf.namespace][collName], nil
   683  			}
   684  		}
   685  		return nil, nil
   686  	}
   687  	return provider
   688  }