github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/core/ledger/kvledger/txmgmt/privacyenabledstate/snapshot_test.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package privacyenabledstate
     8  
     9  import (
    10  	"crypto/sha256"
    11  	"errors"
    12  	"fmt"
    13  	"hash"
    14  	"io/ioutil"
    15  	"os"
    16  	"path/filepath"
    17  	"strings"
    18  	"testing"
    19  
    20  	"github.com/golang/protobuf/proto"
    21  	"github.com/hechain20/hechain/common/ledger/snapshot"
    22  	"github.com/hechain20/hechain/core/ledger/internal/version"
    23  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/privacyenabledstate/mock"
    24  	"github.com/hechain20/hechain/core/ledger/kvledger/txmgmt/statedb"
    25  	"github.com/stretchr/testify/require"
    26  )
    27  
    28  var testNewHashFunc = func() (hash.Hash, error) {
    29  	return sha256.New(), nil
    30  }
    31  
    32  func TestSnapshot(t *testing.T) {
    33  	for _, env := range testEnvs {
    34  		t.Run(env.GetName(), func(t *testing.T) {
    35  			testSnapshot(t, env)
    36  		})
    37  	}
    38  }
    39  
    40  func testSnapshot(t *testing.T, env TestEnv) {
    41  	// generateSampleData returns a slice of KVs. The returned value contains five KVs for each of the namespaces
    42  	generateSampleData := func(namespaces ...string) []*statedb.VersionedKV {
    43  		sampleData := []*statedb.VersionedKV{}
    44  		for _, ns := range namespaces {
    45  			for i := 0; i < 5; i++ {
    46  				sampleKV := &statedb.VersionedKV{
    47  					CompositeKey: &statedb.CompositeKey{
    48  						Namespace: ns,
    49  						Key:       fmt.Sprintf("key-%d", i),
    50  					},
    51  					VersionedValue: &statedb.VersionedValue{
    52  						Value:    []byte(fmt.Sprintf("value-for-key-%d-for-%s", i, ns)),
    53  						Version:  version.NewHeight(1, 1),
    54  						Metadata: []byte(fmt.Sprintf("metadata-for-key-%d-for-%s", i, ns)),
    55  					},
    56  				}
    57  				sampleData = append(sampleData, sampleKV)
    58  			}
    59  		}
    60  		return sampleData
    61  	}
    62  	samplePublicState := generateSampleData(
    63  		"",
    64  		"ns1",
    65  		"ns2",
    66  		"ns4",
    67  	)
    68  
    69  	samplePvtStateHashes := generateSampleData(
    70  		deriveHashedDataNs("", "coll1"),
    71  		deriveHashedDataNs("ns1", "coll1"),
    72  		deriveHashedDataNs("ns1", "coll2"),
    73  		deriveHashedDataNs("ns2", "coll3"),
    74  		deriveHashedDataNs("ns3", "coll1"),
    75  	)
    76  
    77  	samplePvtState := generateSampleData(
    78  		derivePvtDataNs("", "coll1"),
    79  		derivePvtDataNs("ns1", "coll1"),
    80  		derivePvtDataNs("ns1", "coll2"),
    81  		derivePvtDataNs("ns2", "coll3"),
    82  		derivePvtDataNs("ns3", "coll1"),
    83  	)
    84  
    85  	testCases := []struct {
    86  		description    string
    87  		publicState    []*statedb.VersionedKV
    88  		pvtStateHashes []*statedb.VersionedKV
    89  		pvtState       []*statedb.VersionedKV
    90  	}{
    91  		{
    92  			description:    "no-data",
    93  			publicState:    nil,
    94  			pvtStateHashes: nil,
    95  			pvtState:       nil,
    96  		},
    97  		{
    98  			description:    "only-public-data",
    99  			publicState:    samplePublicState,
   100  			pvtStateHashes: nil,
   101  			pvtState:       nil,
   102  		},
   103  		{
   104  			description:    "only-pvtdatahashes",
   105  			publicState:    nil,
   106  			pvtStateHashes: samplePvtStateHashes,
   107  			pvtState:       nil,
   108  		},
   109  		{
   110  			description:    "public-and-pvtdatahashes",
   111  			publicState:    samplePublicState,
   112  			pvtStateHashes: samplePvtStateHashes,
   113  			pvtState:       nil,
   114  		},
   115  		{
   116  			description:    "public-and-pvtdatahashes-and-pvtdata",
   117  			publicState:    samplePublicState,
   118  			pvtStateHashes: samplePvtStateHashes,
   119  			pvtState:       samplePvtState,
   120  		},
   121  	}
   122  
   123  	for _, testCase := range testCases {
   124  		t.Run(testCase.description, func(t *testing.T) {
   125  			testSnapshotWithSampleData(
   126  				t,
   127  				env,
   128  				testCase.publicState,
   129  				testCase.pvtStateHashes,
   130  				testCase.pvtState,
   131  			)
   132  		})
   133  	}
   134  }
   135  
   136  func testSnapshotWithSampleData(t *testing.T, env TestEnv,
   137  	publicState []*statedb.VersionedKV,
   138  	pvtStateHashes []*statedb.VersionedKV,
   139  	pvtState []*statedb.VersionedKV,
   140  ) {
   141  	env.Init(t)
   142  	defer env.Cleanup()
   143  	// load data into source statedb
   144  	sourceDB := env.GetDBHandle(generateLedgerID(t))
   145  	updateBatch := NewUpdateBatch()
   146  	for _, s := range publicState {
   147  		updateBatch.PubUpdates.PutValAndMetadata(s.Namespace, s.Key, s.Value, s.Metadata, s.Version)
   148  	}
   149  	for _, s := range pvtStateHashes {
   150  		nsColl := strings.Split(s.Namespace, nsJoiner+hashDataPrefix)
   151  		ns := nsColl[0]
   152  		coll := nsColl[1]
   153  		updateBatch.HashUpdates.PutValHashAndMetadata(ns, coll, []byte(s.Key), s.Value, s.Metadata, s.Version)
   154  	}
   155  	for _, s := range pvtState {
   156  		nsColl := strings.Split(s.Namespace, nsJoiner+pvtDataPrefix)
   157  		ns := nsColl[0]
   158  		coll := nsColl[1]
   159  		updateBatch.PvtUpdates.Put(ns, coll, s.Key, s.Value, s.Version)
   160  	}
   161  	err := sourceDB.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(2, 2))
   162  	require.NoError(t, err)
   163  
   164  	// export snapshot files from statedb
   165  	snapshotDirSrcDB, err := ioutil.TempDir("", "testsnapshot")
   166  	require.NoError(t, err)
   167  	defer func() {
   168  		os.RemoveAll(snapshotDirSrcDB)
   169  	}()
   170  
   171  	// verify exported snapshot files
   172  	filesAndHashesSrcDB, err := sourceDB.ExportPubStateAndPvtStateHashes(snapshotDirSrcDB, testNewHashFunc)
   173  	require.NoError(t, err)
   174  	verifyExportedSnapshot(t,
   175  		snapshotDirSrcDB,
   176  		filesAndHashesSrcDB,
   177  		publicState != nil,
   178  		pvtStateHashes != nil,
   179  	)
   180  
   181  	// import snapshot in a fresh db and verify the imported state
   182  	destinationDBName := generateLedgerID(t)
   183  	err = env.GetProvider().ImportFromSnapshot(
   184  		destinationDBName, version.NewHeight(10, 10), snapshotDirSrcDB)
   185  	require.NoError(t, err)
   186  	destinationDB := env.GetDBHandle(destinationDBName)
   187  	verifyImportedSnapshot(t, destinationDB,
   188  		version.NewHeight(10, 10),
   189  		publicState, pvtStateHashes, pvtState)
   190  
   191  	// export snapshot from the destination db
   192  	snapshotDirDestDB, err := ioutil.TempDir("", "testsnapshot")
   193  	require.NoError(t, err)
   194  	defer func() {
   195  		os.RemoveAll(snapshotDirDestDB)
   196  	}()
   197  	filesAndHashesDestDB, err := destinationDB.ExportPubStateAndPvtStateHashes(snapshotDirDestDB, testNewHashFunc)
   198  	require.NoError(t, err)
   199  	require.Equal(t, filesAndHashesSrcDB, filesAndHashesDestDB)
   200  }
   201  
   202  func verifyExportedSnapshot(
   203  	t *testing.T,
   204  	snapshotDir string,
   205  	filesAndHashes map[string][]byte,
   206  	publicStateFilesExpected bool,
   207  	pvtdataHashesFilesExpected bool,
   208  ) {
   209  	numFilesExpected := 0
   210  	if publicStateFilesExpected {
   211  		numFilesExpected += 2
   212  		require.Contains(t, filesAndHashes, PubStateDataFileName)
   213  		require.Contains(t, filesAndHashes, PubStateMetadataFileName)
   214  	}
   215  
   216  	if pvtdataHashesFilesExpected {
   217  		numFilesExpected += 2
   218  		require.Contains(t, filesAndHashes, PvtStateHashesFileName)
   219  		require.Contains(t, filesAndHashes, PvtStateHashesMetadataFileName)
   220  	}
   221  
   222  	for f, h := range filesAndHashes {
   223  		expectedFile := filepath.Join(snapshotDir, f)
   224  		require.FileExists(t, expectedFile)
   225  		require.Equal(t, sha256ForFileForTest(t, expectedFile), h)
   226  	}
   227  
   228  	require.Len(t, filesAndHashes, numFilesExpected)
   229  }
   230  
   231  func verifyImportedSnapshot(t *testing.T,
   232  	db *DB,
   233  	expectedSavepoint *version.Height,
   234  	expectedPublicState,
   235  	expectedPvtStateHashes,
   236  	notExpectedPvtState []*statedb.VersionedKV,
   237  ) {
   238  	s, err := db.GetLatestSavePoint()
   239  	require.NoError(t, err)
   240  	require.Equal(t, expectedSavepoint, s)
   241  	for _, pub := range expectedPublicState {
   242  		vv, err := db.GetState(pub.Namespace, pub.Key)
   243  		require.NoError(t, err)
   244  		require.Equal(t, pub.VersionedValue, vv)
   245  	}
   246  
   247  	for _, pvtdataHashes := range expectedPvtStateHashes {
   248  		nsColl := strings.Split(pvtdataHashes.Namespace, nsJoiner+hashDataPrefix)
   249  		ns := nsColl[0]
   250  		coll := nsColl[1]
   251  		vv, err := db.GetValueHash(ns, coll, []byte(pvtdataHashes.Key))
   252  		require.NoError(t, err)
   253  		require.Equal(t, pvtdataHashes.VersionedValue, vv)
   254  	}
   255  
   256  	for _, ptvdata := range notExpectedPvtState {
   257  		nsColl := strings.Split(ptvdata.Namespace, nsJoiner+pvtDataPrefix)
   258  		ns := nsColl[0]
   259  		coll := nsColl[1]
   260  		vv, err := db.GetPrivateData(ns, coll, ptvdata.Key)
   261  		require.NoError(t, err)
   262  		require.Nil(t, vv)
   263  	}
   264  }
   265  
   266  func TestSnapshotImportMetadtaHintImport(t *testing.T) {
   267  	env := &LevelDBTestEnv{}
   268  	env.Init(t)
   269  	defer env.Cleanup()
   270  
   271  	sourceDB := env.GetDBHandle(generateLedgerID(t))
   272  	updateBatch := NewUpdateBatch()
   273  	updateBatch.PubUpdates.PutValAndMetadata(
   274  		"ns-with-no-metadata",
   275  		"key",
   276  		[]byte("value"),
   277  		nil,
   278  		version.NewHeight(1, 1),
   279  	)
   280  	updateBatch.PubUpdates.PutValAndMetadata(
   281  		"ns-with-metadata",
   282  		"key",
   283  		[]byte("value"),
   284  		[]byte("metadata"),
   285  		version.NewHeight(1, 1),
   286  	)
   287  	updateBatch.HashUpdates.PutValHashAndMetadata(
   288  		"ns-with-no-metadata-in-hashes",
   289  		"coll",
   290  		[]byte("Key"),
   291  		[]byte("Value"),
   292  		nil,
   293  		version.NewHeight(1, 1),
   294  	)
   295  	updateBatch.HashUpdates.PutValHashAndMetadata(
   296  		"ns-with-metadata-in-hashes",
   297  		"coll",
   298  		[]byte("Key"),
   299  		[]byte("Value"),
   300  		[]byte("metadata"),
   301  		version.NewHeight(1, 1),
   302  	)
   303  	err := sourceDB.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(2, 2))
   304  	require.NoError(t, err)
   305  
   306  	// export snapshot files from statedb
   307  	snapshotDir, err := ioutil.TempDir("", "testsnapshot")
   308  	require.NoError(t, err)
   309  	defer func() {
   310  		os.RemoveAll(snapshotDir)
   311  	}()
   312  	_, err = sourceDB.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   313  	require.NoError(t, err)
   314  
   315  	// import snapshot in a fresh db
   316  	destinationDBName := generateLedgerID(t)
   317  	err = env.GetProvider().ImportFromSnapshot(
   318  		destinationDBName, version.NewHeight(10, 10), snapshotDir)
   319  	require.NoError(t, err)
   320  	destinationDB := env.GetDBHandle(destinationDBName)
   321  	h := destinationDB.metadataHint
   322  	require.False(t, h.metadataEverUsedFor("ns-with-no-metadata"))
   323  	require.True(t, h.metadataEverUsedFor("ns-with-metadata"))
   324  	require.False(t, h.metadataEverUsedFor("ns-with-no-metadata-in-hashes"))
   325  	require.True(t, h.metadataEverUsedFor("ns-with-metadata-in-hashes"))
   326  }
   327  
   328  func sha256ForFileForTest(t *testing.T, file string) []byte {
   329  	data, err := ioutil.ReadFile(file)
   330  	require.NoError(t, err)
   331  	sha := sha256.Sum256(data)
   332  	return sha[:]
   333  }
   334  
   335  func TestSnapshotReaderNextFunction(t *testing.T) {
   336  	testdir, err := ioutil.TempDir("", "testsnapshot-WriterReader-")
   337  	require.NoError(t, err)
   338  	defer os.RemoveAll(testdir)
   339  
   340  	w, err := NewSnapshotWriter(testdir, "datafile", "metadatafile", testNewHashFunc)
   341  	require.NoError(t, err)
   342  
   343  	snapshotRecord := &SnapshotRecord{
   344  		Key:   []byte("key"),
   345  		Value: []byte("value"),
   346  	}
   347  	require.NoError(t, w.AddData("ns", snapshotRecord))
   348  	_, _, err = w.Done()
   349  	require.NoError(t, err)
   350  	w.Close()
   351  
   352  	r, err := NewSnapshotReader(testdir, "datafile", "metadatafile")
   353  	require.NoError(t, err)
   354  	require.NotNil(t, r)
   355  	defer r.Close()
   356  
   357  	retrievedNs, retrievedSr, err := r.Next()
   358  	require.NoError(t, err)
   359  	require.Equal(t, "ns", retrievedNs)
   360  	require.True(t, proto.Equal(snapshotRecord, retrievedSr))
   361  
   362  	retrievedNs, retrievedSr, err = r.Next()
   363  	require.NoError(t, err)
   364  	require.Equal(t, "", retrievedNs)
   365  	require.Nil(t, retrievedSr)
   366  }
   367  
   368  func TestMetadataCursor(t *testing.T) {
   369  	metadata := []*metadataRow{}
   370  	for i := 1; i <= 100; i++ {
   371  		metadata = append(metadata, &metadataRow{
   372  			namespace: fmt.Sprintf("ns-%d", i),
   373  			kvCounts:  uint64(i),
   374  		})
   375  	}
   376  
   377  	cursor := &cursor{
   378  		metadata: metadata,
   379  	}
   380  
   381  	for _, m := range metadata {
   382  		for i := uint64(0); i < m.kvCounts; i++ {
   383  			require.True(t, cursor.canMove())
   384  			require.True(t, cursor.move())
   385  			require.Equal(t, m.namespace, cursor.currentNamespace())
   386  		}
   387  	}
   388  	require.False(t, cursor.canMove())
   389  	require.False(t, cursor.move())
   390  }
   391  
   392  func TestLoadMetadata(t *testing.T) {
   393  	testdir, err := ioutil.TempDir("", "testsnapshot-metadata-")
   394  	require.NoError(t, err)
   395  	defer os.RemoveAll(testdir)
   396  
   397  	metadata := []*metadataRow{}
   398  	for i := 1; i <= 100; i++ {
   399  		metadata = append(metadata, &metadataRow{
   400  			namespace: fmt.Sprintf("ns-%d", i),
   401  			kvCounts:  uint64(i),
   402  		})
   403  	}
   404  	metadataFilePath := filepath.Join(testdir, PubStateMetadataFileName)
   405  	metadataFileWriter, err := snapshot.CreateFile(metadataFilePath, snapshotFileFormat, testNewHashFunc)
   406  	require.NoError(t, err)
   407  
   408  	require.NoError(t, writeMetadata(metadata, metadataFileWriter))
   409  	_, err = metadataFileWriter.Done()
   410  	require.NoError(t, err)
   411  	defer metadataFileWriter.Close()
   412  
   413  	metadataFileReader, err := snapshot.OpenFile(metadataFilePath, snapshotFileFormat)
   414  	require.NoError(t, err)
   415  	defer metadataFileReader.Close()
   416  	loadedMetadata, err := readMetadata(metadataFileReader)
   417  	require.NoError(t, err)
   418  	require.Equal(t, metadata, loadedMetadata)
   419  }
   420  
   421  func TestSnapshotExportErrorPropagation(t *testing.T) {
   422  	var dbEnv *LevelDBTestEnv
   423  	var snapshotDir string
   424  	var db *DB
   425  	var cleanup func()
   426  	var err error
   427  
   428  	init := func() {
   429  		dbEnv = &LevelDBTestEnv{}
   430  		dbEnv.Init(t)
   431  		db = dbEnv.GetDBHandle(generateLedgerID(t))
   432  		updateBatch := NewUpdateBatch()
   433  		updateBatch.PubUpdates.Put("ns1", "key1", []byte("value1"), version.NewHeight(1, 1))
   434  		updateBatch.HashUpdates.Put("ns1", "coll1", []byte("key1"), []byte("value1"), version.NewHeight(1, 1))
   435  		require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(1, 1)))
   436  		snapshotDir, err = ioutil.TempDir("", "testsnapshot")
   437  		require.NoError(t, err)
   438  		cleanup = func() {
   439  			dbEnv.Cleanup()
   440  			os.RemoveAll(snapshotDir)
   441  		}
   442  	}
   443  
   444  	t.Run("pubStateDataFile already exists", func(t *testing.T) {
   445  		init()
   446  		defer cleanup()
   447  
   448  		pubStateDataFilePath := filepath.Join(snapshotDir, PubStateDataFileName)
   449  		_, err = os.Create(pubStateDataFilePath)
   450  		require.NoError(t, err)
   451  		_, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   452  		require.Contains(t, err.Error(), "error while creating the snapshot file: "+pubStateDataFilePath)
   453  	})
   454  
   455  	t.Run("pubStateMetadataFile already exists", func(t *testing.T) {
   456  		init()
   457  		defer cleanup()
   458  
   459  		pubStateMetadataFilePath := filepath.Join(snapshotDir, PubStateMetadataFileName)
   460  		_, err = os.Create(pubStateMetadataFilePath)
   461  		require.NoError(t, err)
   462  		_, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   463  		require.Contains(t, err.Error(), "error while creating the snapshot file: "+pubStateMetadataFilePath)
   464  	})
   465  
   466  	t.Run("pvtStateHashesDataFile already exists", func(t *testing.T) {
   467  		init()
   468  		defer cleanup()
   469  
   470  		pvtStateHashesDataFilePath := filepath.Join(snapshotDir, PvtStateHashesFileName)
   471  		_, err = os.Create(pvtStateHashesDataFilePath)
   472  		require.NoError(t, err)
   473  		_, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   474  		require.Contains(t, err.Error(), "error while creating the snapshot file: "+pvtStateHashesDataFilePath)
   475  	})
   476  
   477  	t.Run("pvtStateHashesMetadataFile already exists", func(t *testing.T) {
   478  		init()
   479  		defer cleanup()
   480  
   481  		pvtStateHashesMetadataFilePath := filepath.Join(snapshotDir, PvtStateHashesMetadataFileName)
   482  		_, err = os.Create(pvtStateHashesMetadataFilePath)
   483  		require.NoError(t, err)
   484  		_, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   485  		require.Contains(t, err.Error(), "error while creating the snapshot file: "+pvtStateHashesMetadataFilePath)
   486  	})
   487  
   488  	t.Run("error while reading from db", func(t *testing.T) {
   489  		init()
   490  		defer cleanup()
   491  
   492  		dbEnv.provider.Close()
   493  		_, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   494  		require.Contains(t, err.Error(), "internal leveldb error while obtaining db iterator:")
   495  	})
   496  }
   497  
   498  func TestSnapshotImportErrorPropagation(t *testing.T) {
   499  	var dbEnv *LevelDBTestEnv
   500  	var snapshotDir string
   501  	var cleanup func()
   502  	var err error
   503  
   504  	init := func() {
   505  		dbEnv = &LevelDBTestEnv{}
   506  		dbEnv.Init(t)
   507  		db := dbEnv.GetDBHandle(generateLedgerID(t))
   508  		updateBatch := NewUpdateBatch()
   509  		updateBatch.PubUpdates.PutValAndMetadata("ns1", "key1", []byte("value1"), []byte("metadata"), version.NewHeight(1, 1))
   510  		updateBatch.HashUpdates.Put("ns1", "coll1", []byte("key1"), []byte("value1"), version.NewHeight(1, 1))
   511  		require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(1, 1)))
   512  		snapshotDir, err = ioutil.TempDir("", "testsnapshot")
   513  		require.NoError(t, err)
   514  		_, err := db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   515  		require.NoError(t, err)
   516  		cleanup = func() {
   517  			dbEnv.Cleanup()
   518  			os.RemoveAll(snapshotDir)
   519  		}
   520  	}
   521  
   522  	// errors related to data files
   523  	for _, f := range []string{PubStateDataFileName, PvtStateHashesFileName} {
   524  		t.Run("error_while_checking_the_presence_of_"+f, func(t *testing.T) {
   525  			init()
   526  			defer cleanup()
   527  
   528  			dataFile := filepath.Join(snapshotDir, f)
   529  			require.NoError(t, os.Remove(dataFile))
   530  			require.NoError(t, os.MkdirAll(dataFile, 0o700))
   531  			err := dbEnv.GetProvider().ImportFromSnapshot(
   532  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   533  			require.Contains(t, err.Error(), fmt.Sprintf("the supplied path [%s] is a dir", dataFile))
   534  		})
   535  
   536  		t.Run("error_while_opening_data_file_"+f, func(t *testing.T) {
   537  			init()
   538  			defer cleanup()
   539  
   540  			dataFile := filepath.Join(snapshotDir, f)
   541  			require.NoError(t, os.Remove(dataFile))
   542  			require.NoError(t, ioutil.WriteFile(dataFile, []byte(""), 0o600))
   543  			err := dbEnv.GetProvider().ImportFromSnapshot(
   544  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   545  			require.Contains(t, err.Error(), fmt.Sprintf("error while opening data file: error while reading from the snapshot file: %s", dataFile))
   546  		})
   547  
   548  		t.Run("unexpected_data_format_in_"+f, func(t *testing.T) {
   549  			init()
   550  			defer cleanup()
   551  
   552  			dataFile := filepath.Join(snapshotDir, f)
   553  			require.NoError(t, os.Remove(dataFile))
   554  			require.NoError(t, ioutil.WriteFile(dataFile, []byte{0x00}, 0o600))
   555  			err := dbEnv.GetProvider().ImportFromSnapshot(
   556  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   557  			require.EqualError(t, err, "error while opening data file: unexpected data format: 0")
   558  		})
   559  
   560  		t.Run("error_while_reading_snapshot_record_from_"+f, func(t *testing.T) {
   561  			init()
   562  			defer cleanup()
   563  
   564  			dataFile := filepath.Join(snapshotDir, f)
   565  			require.NoError(t, os.Remove(dataFile))
   566  
   567  			require.NoError(t, ioutil.WriteFile(dataFile, []byte{snapshotFileFormat}, 0o600))
   568  
   569  			err := dbEnv.GetProvider().ImportFromSnapshot(
   570  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   571  
   572  			require.Contains(t, err.Error(), "error while retrieving record from snapshot file")
   573  		})
   574  
   575  		t.Run("error_while_decoding_version_from_snapshot_record"+f, func(t *testing.T) {
   576  			init()
   577  			defer cleanup()
   578  
   579  			dataFile := filepath.Join(snapshotDir, f)
   580  			require.NoError(t, os.Remove(dataFile))
   581  
   582  			fileContent := []byte{snapshotFileFormat}
   583  			buf := proto.NewBuffer(nil)
   584  			require.NoError(t,
   585  				buf.EncodeMessage(
   586  					&SnapshotRecord{
   587  						Version: []byte("bad-version-bytes"),
   588  					},
   589  				),
   590  			)
   591  			fileContent = append(fileContent, buf.Bytes()...)
   592  			require.NoError(t, ioutil.WriteFile(dataFile, fileContent, 0o600))
   593  
   594  			err := dbEnv.GetProvider().ImportFromSnapshot(
   595  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   596  
   597  			require.Contains(t, err.Error(), "error while decoding version")
   598  		})
   599  	}
   600  
   601  	// errors related to metadata files
   602  	for _, f := range []string{PubStateMetadataFileName, PvtStateHashesMetadataFileName} {
   603  		t.Run("error_while_reading_data_format_from_metadata_file:"+f, func(t *testing.T) {
   604  			init()
   605  			defer cleanup()
   606  
   607  			metadataFile := filepath.Join(snapshotDir, f)
   608  			require.NoError(t, os.Remove(metadataFile))
   609  			err := dbEnv.GetProvider().ImportFromSnapshot(
   610  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   611  			require.Contains(t, err.Error(), "error while opening the snapshot file: "+metadataFile)
   612  		})
   613  
   614  		t.Run("error_while_reading_the_num-rows_from_metadata_file:"+f, func(t *testing.T) {
   615  			init()
   616  			defer cleanup()
   617  
   618  			metadataFile := filepath.Join(snapshotDir, f)
   619  			require.NoError(t, os.Remove(metadataFile))
   620  
   621  			fileContentWithMissingNumRows := []byte{snapshotFileFormat}
   622  			require.NoError(t, ioutil.WriteFile(metadataFile, fileContentWithMissingNumRows, 0o600))
   623  
   624  			err := dbEnv.GetProvider().ImportFromSnapshot(
   625  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   626  			require.Contains(t, err.Error(), "error while reading num-rows in metadata")
   627  		})
   628  
   629  		t.Run("error_while_reading_chaincode_name_from_metadata_file:"+f, func(t *testing.T) {
   630  			init()
   631  			defer cleanup()
   632  
   633  			metadataFile := filepath.Join(snapshotDir, f)
   634  			require.NoError(t, os.Remove(metadataFile))
   635  
   636  			fileContentWithMissingCCName := []byte{snapshotFileFormat}
   637  			buf := proto.NewBuffer(nil)
   638  			require.NoError(t, buf.EncodeVarint(5))
   639  			fileContentWithMissingCCName = append(fileContentWithMissingCCName, buf.Bytes()...)
   640  			require.NoError(t, ioutil.WriteFile(metadataFile, fileContentWithMissingCCName, 0o600))
   641  
   642  			err := dbEnv.GetProvider().ImportFromSnapshot(
   643  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   644  			require.Contains(t, err.Error(), "error while reading namespace name")
   645  		})
   646  
   647  		t.Run("error_while_reading_numKVs_for_the_chaincode_name_from_metadata_file:"+f, func(t *testing.T) {
   648  			init()
   649  			defer cleanup()
   650  
   651  			metadataFile := filepath.Join(snapshotDir, f)
   652  			require.NoError(t, os.Remove(metadataFile))
   653  
   654  			fileContentWithMissingCCName := []byte{snapshotFileFormat}
   655  			buf := proto.NewBuffer(nil)
   656  			require.NoError(t, buf.EncodeVarint(1))
   657  			require.NoError(t, buf.EncodeRawBytes([]byte("my-chaincode")))
   658  			fileContentWithMissingCCName = append(fileContentWithMissingCCName, buf.Bytes()...)
   659  			require.NoError(t, ioutil.WriteFile(metadataFile, fileContentWithMissingCCName, 0o600))
   660  
   661  			err := dbEnv.GetProvider().ImportFromSnapshot(
   662  				generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   663  			require.Contains(t, err.Error(), fmt.Sprintf("error while reading num entries for the namespace [%s]", "my-chaincode"))
   664  		})
   665  	}
   666  
   667  	t.Run("error_writing_to_db", func(t *testing.T) {
   668  		init()
   669  		defer cleanup()
   670  
   671  		dbEnv.provider.Close()
   672  		err := dbEnv.GetProvider().ImportFromSnapshot(
   673  			generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   674  
   675  		require.Contains(t, err.Error(), "error writing batch to leveldb")
   676  	})
   677  
   678  	t.Run("error_writing_to_metadata_hint_db", func(t *testing.T) {
   679  		init()
   680  		defer cleanup()
   681  
   682  		dbEnv.provider.bookkeepingProvider.Close()
   683  		err := dbEnv.GetProvider().ImportFromSnapshot(
   684  			generateLedgerID(t), version.NewHeight(10, 10), snapshotDir)
   685  
   686  		require.Contains(t, err.Error(), "error while writing to metadata-hint db")
   687  	})
   688  }
   689  
   690  //go:generate counterfeiter -o mock/snapshot_pvtdatahashes_consumer.go -fake-name SnapshotPvtdataHashesConsumer . snapshotPvtdataHashesConsumer
   691  type snapshotPvtdataHashesConsumer interface {
   692  	SnapshotPvtdataHashesConsumer
   693  }
   694  
   695  func TestSnapshotImportPvtdataHashesConsumer(t *testing.T) {
   696  	for _, dbEnv := range testEnvs {
   697  		testSnapshotImportPvtdataHashesConsumer(t, dbEnv)
   698  	}
   699  }
   700  
   701  func testSnapshotImportPvtdataHashesConsumer(t *testing.T, dbEnv TestEnv) {
   702  	var snapshotDir string
   703  
   704  	init := func() {
   705  		var err error
   706  		dbEnv.Init(t)
   707  		snapshotDir, err = ioutil.TempDir("", "testsnapshot")
   708  
   709  		t.Cleanup(func() {
   710  			dbEnv.Cleanup()
   711  			os.RemoveAll(snapshotDir)
   712  		})
   713  
   714  		require.NoError(t, err)
   715  		db := dbEnv.GetDBHandle(generateLedgerID(t))
   716  		updateBatch := NewUpdateBatch()
   717  		updateBatch.PubUpdates.Put("ns-1", "key-1", []byte("value-1"), version.NewHeight(1, 1))
   718  		updateBatch.HashUpdates.Put("ns-1", "coll-1", []byte("key-hash-1"), []byte("value-hash-1"), version.NewHeight(1, 1))
   719  		require.NoError(t, db.ApplyPrivacyAwareUpdates(updateBatch, version.NewHeight(1, 1)))
   720  		snapshotDir, err = ioutil.TempDir("", "testsnapshot")
   721  		require.NoError(t, err)
   722  		_, err = db.ExportPubStateAndPvtStateHashes(snapshotDir, testNewHashFunc)
   723  		require.NoError(t, err)
   724  	}
   725  
   726  	t.Run("snapshot-import-invokes-consumer-"+dbEnv.GetName(), func(t *testing.T) {
   727  		init()
   728  		consumers := []*mock.SnapshotPvtdataHashesConsumer{
   729  			{},
   730  			{},
   731  		}
   732  		err := dbEnv.GetProvider().ImportFromSnapshot(
   733  			generateLedgerID(t),
   734  			version.NewHeight(10, 10),
   735  			snapshotDir,
   736  			consumers[0],
   737  			consumers[1],
   738  		)
   739  		require.NoError(t, err)
   740  		for _, c := range consumers {
   741  			callCounts := c.ConsumeSnapshotDataCallCount()
   742  			require.Equal(t, 1, callCounts)
   743  
   744  			callArgNs, callArgsColl, callArgsKeyHash, callArgsValueHash, callArgsVer := c.ConsumeSnapshotDataArgsForCall(0)
   745  			require.Equal(t, "ns-1", callArgNs)
   746  			require.Equal(t, "coll-1", callArgsColl)
   747  			require.Equal(t, []byte("key-hash-1"), callArgsKeyHash)
   748  			require.Equal(t, []byte("value-hash-1"), callArgsValueHash)
   749  			require.Equal(t, version.NewHeight(1, 1), callArgsVer)
   750  
   751  			require.Equal(t, 1, c.DoneCallCount())
   752  		}
   753  	})
   754  
   755  	t.Run("snapshot-import-propages-error-from-consumer-"+dbEnv.GetName(), func(t *testing.T) {
   756  		init()
   757  		consumers := []*mock.SnapshotPvtdataHashesConsumer{
   758  			{},
   759  			{},
   760  		}
   761  		consumers[1].ConsumeSnapshotDataReturns(errors.New("cannot-consume"))
   762  
   763  		err := dbEnv.GetProvider().ImportFromSnapshot(
   764  			generateLedgerID(t),
   765  			version.NewHeight(10, 10),
   766  			snapshotDir,
   767  			consumers[0],
   768  			consumers[1],
   769  		)
   770  		require.EqualError(t, err, "cannot-consume")
   771  	})
   772  
   773  	t.Run("snapshot-import-propages-error-from-consumer-done-invoke"+dbEnv.GetName(), func(t *testing.T) {
   774  		init()
   775  		consumers := []*mock.SnapshotPvtdataHashesConsumer{
   776  			{},
   777  			{},
   778  		}
   779  		consumers[0].DoneReturns(errors.New("cannot-finish-without-error"))
   780  
   781  		err := dbEnv.GetProvider().ImportFromSnapshot(
   782  			generateLedgerID(t),
   783  			version.NewHeight(10, 10),
   784  			snapshotDir,
   785  			consumers[0],
   786  			consumers[1],
   787  		)
   788  		require.EqualError(t, err, "cannot-finish-without-error")
   789  
   790  		for _, c := range consumers {
   791  			require.Equal(t, 1, c.DoneCallCount())
   792  		}
   793  	})
   794  }