github.com/osdi23p228/fabric@v0.0.0-20221218062954-77808885f5db/common/ledger/blkstorage/blockindex_test.go (about)

     1  /*
     2  Copyright IBM Corp. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package blkstorage
     8  
     9  import (
    10  	"crypto/sha256"
    11  	"fmt"
    12  	"hash"
    13  	"io/ioutil"
    14  	"os"
    15  	"path/filepath"
    16  	"testing"
    17  
    18  	"github.com/hyperledger/fabric-protos-go/common"
    19  	"github.com/osdi23p228/fabric/common/ledger/snapshot"
    20  	"github.com/osdi23p228/fabric/common/ledger/testutil"
    21  	"github.com/osdi23p228/fabric/common/ledger/util"
    22  	commonledgerutil "github.com/osdi23p228/fabric/common/ledger/util"
    23  	"github.com/osdi23p228/fabric/common/metrics/disabled"
    24  	"github.com/osdi23p228/fabric/internal/pkg/txflags"
    25  	"github.com/osdi23p228/fabric/protoutil"
    26  	"github.com/stretchr/testify/require"
    27  )
    28  
    29  var (
    30  	testNewHashFunc = func() (hash.Hash, error) {
    31  		return sha256.New(), nil
    32  	}
    33  )
    34  
    35  func TestBlockIndexSync(t *testing.T) {
    36  	testBlockIndexSync(t, 10, 5, false)
    37  	testBlockIndexSync(t, 10, 5, true)
    38  	testBlockIndexSync(t, 10, 0, true)
    39  	testBlockIndexSync(t, 10, 10, true)
    40  }
    41  
    42  func testBlockIndexSync(t *testing.T, numBlocks int, numBlocksToIndex int, syncByRestart bool) {
    43  	testName := fmt.Sprintf("%v/%v/%v", numBlocks, numBlocksToIndex, syncByRestart)
    44  	t.Run(testName, func(t *testing.T) {
    45  		env := newTestEnv(t, NewConf(testPath(), 0))
    46  		defer env.Cleanup()
    47  		ledgerid := "testledger"
    48  		blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
    49  		defer blkfileMgrWrapper.close()
    50  		blkfileMgr := blkfileMgrWrapper.blockfileMgr
    51  		originalIndexStore := blkfileMgr.index.db
    52  		// construct blocks for testing
    53  		blocks := testutil.ConstructTestBlocks(t, numBlocks)
    54  		// add a few blocks
    55  		blkfileMgrWrapper.addBlocks(blocks[:numBlocksToIndex])
    56  
    57  		// redirect index writes to some random place and add remaining blocks
    58  		blkfileMgr.index.db = env.provider.leveldbProvider.GetDBHandle("someRandomPlace")
    59  		blkfileMgrWrapper.addBlocks(blocks[numBlocksToIndex:])
    60  
    61  		// Plug-in back the original index store
    62  		blkfileMgr.index.db = originalIndexStore
    63  		// Verify that the first set of blocks are indexed in the original index
    64  		for i := 0; i < numBlocksToIndex; i++ {
    65  			block, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
    66  			require.NoError(t, err, "block [%d] should have been present in the index", i)
    67  			require.Equal(t, blocks[i], block)
    68  		}
    69  
    70  		// Before, we test for index sync-up, verify that the last set of blocks not indexed in the original index
    71  		for i := numBlocksToIndex + 1; i <= numBlocks; i++ {
    72  			_, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
    73  			require.Exactly(t, ErrNotFoundInIndex, err)
    74  		}
    75  
    76  		// perform index sync
    77  		if syncByRestart {
    78  			blkfileMgrWrapper.close()
    79  			blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid)
    80  			defer blkfileMgrWrapper.close()
    81  			blkfileMgr = blkfileMgrWrapper.blockfileMgr
    82  		} else {
    83  			blkfileMgr.syncIndex()
    84  		}
    85  
    86  		// Now, last set of blocks should also be indexed in the original index
    87  		for i := numBlocksToIndex; i < numBlocks; i++ {
    88  			block, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
    89  			require.NoError(t, err, "block [%d] should have been present in the index", i)
    90  			require.Equal(t, blocks[i], block)
    91  		}
    92  	})
    93  }
    94  
    95  func TestBlockIndexSelectiveIndexing(t *testing.T) {
    96  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{})
    97  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockHash})
    98  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockNum})
    99  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrTxID})
   100  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockNumTranNum})
   101  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockHash, IndexableAttrBlockNum})
   102  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrTxID, IndexableAttrBlockNumTranNum})
   103  }
   104  
   105  func testBlockIndexSelectiveIndexing(t *testing.T, indexItems []IndexableAttr) {
   106  	var testName string
   107  	for _, s := range indexItems {
   108  		testName = testName + string(s)
   109  	}
   110  	t.Run(testName, func(t *testing.T) {
   111  		env := newTestEnvSelectiveIndexing(t, NewConf(testPath(), 0), indexItems, &disabled.Provider{})
   112  		defer env.Cleanup()
   113  		blkfileMgrWrapper := newTestBlockfileWrapper(env, "testledger")
   114  		defer blkfileMgrWrapper.close()
   115  
   116  		blocks := testutil.ConstructTestBlocks(t, 3)
   117  		// add test blocks
   118  		blkfileMgrWrapper.addBlocks(blocks)
   119  		blockfileMgr := blkfileMgrWrapper.blockfileMgr
   120  
   121  		// if index has been configured for an indexItem then the item should be indexed else not
   122  		// test 'retrieveBlockByHash'
   123  		block, err := blockfileMgr.retrieveBlockByHash(protoutil.BlockHeaderHash(blocks[0].Header))
   124  		if containsAttr(indexItems, IndexableAttrBlockHash) {
   125  			require.NoError(t, err, "Error while retrieving block by hash")
   126  			require.Equal(t, blocks[0], block)
   127  		} else {
   128  			require.Exactly(t, ErrAttrNotIndexed, err)
   129  		}
   130  
   131  		// test 'retrieveBlockByNumber'
   132  		block, err = blockfileMgr.retrieveBlockByNumber(0)
   133  		if containsAttr(indexItems, IndexableAttrBlockNum) {
   134  			require.NoError(t, err, "Error while retrieving block by number")
   135  			require.Equal(t, blocks[0], block)
   136  		} else {
   137  			require.Exactly(t, ErrAttrNotIndexed, err)
   138  		}
   139  
   140  		// test 'retrieveTransactionByID'
   141  		txid, err := protoutil.GetOrComputeTxIDFromEnvelope(blocks[0].Data.Data[0])
   142  		require.NoError(t, err)
   143  		txEnvelope, err := blockfileMgr.retrieveTransactionByID(txid)
   144  		if containsAttr(indexItems, IndexableAttrTxID) {
   145  			require.NoError(t, err, "Error while retrieving tx by id")
   146  			txEnvelopeBytes := blocks[0].Data.Data[0]
   147  			txEnvelopeOrig, err := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes)
   148  			require.NoError(t, err)
   149  			require.Equal(t, txEnvelopeOrig, txEnvelope)
   150  		} else {
   151  			require.Exactly(t, ErrAttrNotIndexed, err)
   152  		}
   153  
   154  		//test 'retrieveTrasnactionsByBlockNumTranNum
   155  		txEnvelope2, err := blockfileMgr.retrieveTransactionByBlockNumTranNum(0, 0)
   156  		if containsAttr(indexItems, IndexableAttrBlockNumTranNum) {
   157  			require.NoError(t, err, "Error while retrieving tx by blockNum and tranNum")
   158  			txEnvelopeBytes2 := blocks[0].Data.Data[0]
   159  			txEnvelopeOrig2, err2 := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes2)
   160  			require.NoError(t, err2)
   161  			require.Equal(t, txEnvelopeOrig2, txEnvelope2)
   162  		} else {
   163  			require.Exactly(t, ErrAttrNotIndexed, err)
   164  		}
   165  
   166  		// test 'retrieveBlockByTxID'
   167  		txid, err = protoutil.GetOrComputeTxIDFromEnvelope(blocks[0].Data.Data[0])
   168  		require.NoError(t, err)
   169  		block, err = blockfileMgr.retrieveBlockByTxID(txid)
   170  		if containsAttr(indexItems, IndexableAttrTxID) {
   171  			require.NoError(t, err, "Error while retrieving block by txID")
   172  			require.Equal(t, block, blocks[0])
   173  		} else {
   174  			require.Exactly(t, ErrAttrNotIndexed, err)
   175  		}
   176  
   177  		for _, block := range blocks {
   178  			flags := txflags.ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
   179  
   180  			for idx, d := range block.Data.Data {
   181  				txid, err = protoutil.GetOrComputeTxIDFromEnvelope(d)
   182  				require.NoError(t, err)
   183  
   184  				reason, err := blockfileMgr.retrieveTxValidationCodeByTxID(txid)
   185  
   186  				if containsAttr(indexItems, IndexableAttrTxID) {
   187  					require.NoError(t, err, "Error while retrieving tx validation code by txID")
   188  
   189  					reasonFromFlags := flags.Flag(idx)
   190  
   191  					require.Equal(t, reasonFromFlags, reason)
   192  				} else {
   193  					require.Exactly(t, ErrAttrNotIndexed, err)
   194  				}
   195  			}
   196  		}
   197  	})
   198  }
   199  
   200  func containsAttr(indexItems []IndexableAttr, attr IndexableAttr) bool {
   201  	for _, element := range indexItems {
   202  		if element == attr {
   203  			return true
   204  		}
   205  	}
   206  	return false
   207  }
   208  
   209  func TestTxIDKeyEncodingDecoding(t *testing.T) {
   210  	testcases := []struct {
   211  		txid   string
   212  		blkNum uint64
   213  		txNum  uint64
   214  	}{
   215  		{"txid1", 0, 0},
   216  		{"", 1, 1},
   217  		{"", 0, 0},
   218  		{"txid1", 100, 100},
   219  	}
   220  	for i, testcase := range testcases {
   221  		encodedTxIDKey := constructTxIDKey(testcase.txid, testcase.blkNum, testcase.txNum)
   222  		t.Run(fmt.Sprintf(" %d", i),
   223  			func(t *testing.T) {
   224  				txID, err := retrieveTxID(encodedTxIDKey)
   225  				require.NoError(t, err)
   226  				require.Equal(t, testcase.txid, txID)
   227  				verifyTxIDKeyDecodable(t,
   228  					encodedTxIDKey,
   229  					testcase.txid, testcase.blkNum, testcase.txNum,
   230  				)
   231  			})
   232  	}
   233  }
   234  
   235  func TestTxIDKeyDecodingInvalidInputs(t *testing.T) {
   236  	prefix := []byte{txIDIdxKeyPrefix}
   237  	txIDLen := util.EncodeOrderPreservingVarUint64(uint64(len("mytxid")))
   238  	txID := []byte("mytxid")
   239  
   240  	// empty byte
   241  	_, err := retrieveTxID([]byte{})
   242  	require.EqualError(t, err, "invalid txIDKey - zero-length slice")
   243  
   244  	// invalid prefix
   245  	invalidPrefix := []byte{txIDIdxKeyPrefix + 1}
   246  	_, err = retrieveTxID(invalidPrefix)
   247  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x} - unexpected prefix", invalidPrefix))
   248  
   249  	// invalid key - only prefix
   250  	_, err = retrieveTxID(prefix)
   251  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x}: number of consumed bytes from DecodeVarint is invalid, expected 1, but got 0", prefix))
   252  
   253  	// invalid key - incomplete length
   254  	incompleteLength := appendAllAndTrimLastByte(prefix, txIDLen)
   255  	_, err = retrieveTxID(incompleteLength)
   256  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x}: decoded size (1) from DecodeVarint is more than available bytes (0)", incompleteLength))
   257  
   258  	// invalid key - incomplete txid
   259  	incompleteTxID := appendAllAndTrimLastByte(prefix, txIDLen, txID)
   260  	_, err = retrieveTxID(incompleteTxID)
   261  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x}, fewer bytes present", incompleteTxID))
   262  }
   263  
   264  func TestExportUniqueTxIDs(t *testing.T) {
   265  	env := newTestEnv(t, NewConf(testPath(), 0))
   266  	defer env.Cleanup()
   267  	ledgerid := "testledger"
   268  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
   269  	defer blkfileMgrWrapper.close()
   270  	blkfileMgr := blkfileMgrWrapper.blockfileMgr
   271  
   272  	testSnapshotDir := testPath()
   273  	defer os.RemoveAll(testSnapshotDir)
   274  
   275  	// empty store generates no output
   276  	fileHashes, err := blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   277  	require.NoError(t, err)
   278  	require.Empty(t, fileHashes)
   279  	files, err := ioutil.ReadDir(testSnapshotDir)
   280  	require.NoError(t, err)
   281  	require.Len(t, files, 0)
   282  
   283  	// add genesis block and test the exported bytes
   284  	bg, gb := testutil.NewBlockGenerator(t, "myChannel", false)
   285  	blkfileMgr.addBlock(gb)
   286  	configTxID, err := protoutil.GetOrComputeTxIDFromEnvelope(gb.Data.Data[0])
   287  	require.NoError(t, err)
   288  	fileHashes, err = blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   289  	require.NoError(t, err)
   290  	verifyExportedTxIDs(t, testSnapshotDir, fileHashes, configTxID)
   291  	os.Remove(filepath.Join(testSnapshotDir, snapshotDataFileName))
   292  	os.Remove(filepath.Join(testSnapshotDir, snapshotMetadataFileName))
   293  
   294  	// add block-1 and test the exported bytes
   295  	block1 := bg.NextBlockWithTxid(
   296  		[][]byte{
   297  			[]byte("tx with id=txid-3"),
   298  			[]byte("tx with id=txid-1"),
   299  			[]byte("tx with id=txid-2"),
   300  			[]byte("another tx with existing id=txid-1"),
   301  		},
   302  		[]string{"txid-3", "txid-1", "txid-2", "txid-1"},
   303  	)
   304  	err = blkfileMgr.addBlock(block1)
   305  	require.NoError(t, err)
   306  	fileHashes, err = blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   307  	require.NoError(t, err)
   308  	verifyExportedTxIDs(t, testSnapshotDir, fileHashes, "txid-1", "txid-2", "txid-3", configTxID) //"txid-1" appears once, Txids appear in radix sort order
   309  	os.Remove(filepath.Join(testSnapshotDir, snapshotDataFileName))
   310  	os.Remove(filepath.Join(testSnapshotDir, snapshotMetadataFileName))
   311  
   312  	// add block-2 and test the exported bytes
   313  	block2 := bg.NextBlockWithTxid(
   314  		[][]byte{
   315  			[]byte("tx with id=txid-0000000"),
   316  			[]byte("tx with id=txid-3"),
   317  			[]byte("tx with id=txid-4"),
   318  		},
   319  		[]string{"txid-0000000", "txid-3", "txid-4"},
   320  	)
   321  	blkfileMgr.addBlock(block2)
   322  	require.NoError(t, err)
   323  
   324  	fileHashes, err = blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   325  	require.NoError(t, err)
   326  	verifyExportedTxIDs(t, testSnapshotDir, fileHashes, "txid-1", "txid-2", "txid-3", "txid-4", "txid-0000000", configTxID) // "txid-1", and "txid-3 appears once and Txids appear in radix sort order
   327  }
   328  
   329  func TestExportUniqueTxIDsWhenTxIDsNotIndexed(t *testing.T) {
   330  	env := newTestEnvSelectiveIndexing(t, NewConf(testPath(), 0), []IndexableAttr{IndexableAttrBlockNum}, &disabled.Provider{})
   331  	defer env.Cleanup()
   332  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testledger")
   333  	defer blkfileMgrWrapper.close()
   334  
   335  	blocks := testutil.ConstructTestBlocks(t, 5)
   336  	blkfileMgrWrapper.addBlocks(blocks)
   337  
   338  	testSnapshotDir := testPath()
   339  	defer os.RemoveAll(testSnapshotDir)
   340  	_, err := blkfileMgrWrapper.blockfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   341  	require.Equal(t, err, ErrAttrNotIndexed)
   342  }
   343  
   344  func TestExportUniqueTxIDsErrorCases(t *testing.T) {
   345  	env := newTestEnv(t, NewConf(testPath(), 0))
   346  	defer env.Cleanup()
   347  	ledgerid := "testledger"
   348  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
   349  	defer blkfileMgrWrapper.close()
   350  
   351  	blocks := testutil.ConstructTestBlocks(t, 5)
   352  	blkfileMgrWrapper.addBlocks(blocks)
   353  	blockfileMgr := blkfileMgrWrapper.blockfileMgr
   354  	index := blockfileMgr.index
   355  
   356  	testSnapshotDir := testPath()
   357  	defer os.RemoveAll(testSnapshotDir)
   358  
   359  	// error during data file creation
   360  	dataFilePath := filepath.Join(testSnapshotDir, snapshotDataFileName)
   361  	_, err := os.Create(dataFilePath)
   362  	require.NoError(t, err)
   363  	_, err = blkfileMgrWrapper.blockfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   364  	require.Contains(t, err.Error(), "error while creating the snapshot file: "+dataFilePath)
   365  	os.RemoveAll(testSnapshotDir)
   366  
   367  	// error during metadata file creation
   368  	fmt.Printf("testSnapshotDir=%s", testSnapshotDir)
   369  	require.NoError(t, os.MkdirAll(testSnapshotDir, 0700))
   370  	metadataFilePath := filepath.Join(testSnapshotDir, snapshotMetadataFileName)
   371  	_, err = os.Create(metadataFilePath)
   372  	require.NoError(t, err)
   373  	_, err = blkfileMgrWrapper.blockfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   374  	require.Contains(t, err.Error(), "error while creating the snapshot file: "+metadataFilePath)
   375  	os.RemoveAll(testSnapshotDir)
   376  
   377  	// error while retrieving the txid key
   378  	require.NoError(t, os.MkdirAll(testSnapshotDir, 0700))
   379  	index.db.Put([]byte{txIDIdxKeyPrefix}, []byte("some junk value"), true)
   380  	_, err = index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   381  	require.EqualError(t, err, "invalid txIDKey {74}: number of consumed bytes from DecodeVarint is invalid, expected 1, but got 0")
   382  	os.RemoveAll(testSnapshotDir)
   383  
   384  	// error while reading from leveldb
   385  	require.NoError(t, os.MkdirAll(testSnapshotDir, 0700))
   386  	env.provider.leveldbProvider.Close()
   387  	_, err = index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   388  	require.EqualError(t, err, "internal leveldb error while obtaining db iterator: leveldb: closed")
   389  	os.RemoveAll(testSnapshotDir)
   390  }
   391  
   392  func verifyExportedTxIDs(t *testing.T, dir string, fileHashes map[string][]byte, expectedTxIDs ...string) {
   393  	require.Len(t, fileHashes, 2)
   394  	require.Contains(t, fileHashes, snapshotDataFileName)
   395  	require.Contains(t, fileHashes, snapshotMetadataFileName)
   396  
   397  	dataFile := filepath.Join(dir, snapshotDataFileName)
   398  	dataFileContent, err := ioutil.ReadFile(dataFile)
   399  	require.NoError(t, err)
   400  	dataFileHash := sha256.Sum256(dataFileContent)
   401  	require.Equal(t, dataFileHash[:], fileHashes[snapshotDataFileName])
   402  
   403  	metadataFile := filepath.Join(dir, snapshotMetadataFileName)
   404  	metadataFileContent, err := ioutil.ReadFile(metadataFile)
   405  	require.NoError(t, err)
   406  	metadataFileHash := sha256.Sum256(metadataFileContent)
   407  	require.Equal(t, metadataFileHash[:], fileHashes[snapshotMetadataFileName])
   408  
   409  	metadataReader, err := snapshot.OpenFile(metadataFile, snapshotFileFormat)
   410  	require.NoError(t, err)
   411  	defer metadataReader.Close()
   412  
   413  	dataReader, err := snapshot.OpenFile(dataFile, snapshotFileFormat)
   414  	require.NoError(t, err)
   415  	defer dataReader.Close()
   416  
   417  	numTxIDs, err := metadataReader.DecodeUVarInt()
   418  	require.NoError(t, err)
   419  	retrievedTxIDs := []string{}
   420  	for i := uint64(0); i < numTxIDs; i++ {
   421  		txID, err := dataReader.DecodeString()
   422  		require.NoError(t, err)
   423  		retrievedTxIDs = append(retrievedTxIDs, txID)
   424  	}
   425  	require.Equal(t, expectedTxIDs, retrievedTxIDs)
   426  }
   427  
   428  func appendAllAndTrimLastByte(input ...[]byte) []byte {
   429  	r := []byte{}
   430  	for _, i := range input {
   431  		r = append(r, i...)
   432  	}
   433  	return r[:len(r)-1]
   434  }
   435  
   436  func verifyTxIDKeyDecodable(t *testing.T, txIDKey []byte, expectedTxID string, expectedBlkNum, expectedTxNum uint64) {
   437  	length, lengthBytes, err := commonledgerutil.DecodeOrderPreservingVarUint64(txIDKey[1:])
   438  	require.NoError(t, err)
   439  	firstIndexTxID := 1 + lengthBytes
   440  	firstIndexBlkNum := firstIndexTxID + int(length)
   441  	require.Equal(t, []byte(expectedTxID), txIDKey[firstIndexTxID:firstIndexBlkNum])
   442  
   443  	blkNum, n, err := commonledgerutil.DecodeOrderPreservingVarUint64(txIDKey[firstIndexBlkNum:])
   444  	require.NoError(t, err)
   445  	require.Equal(t, expectedBlkNum, blkNum)
   446  
   447  	firstIndexTxNum := firstIndexBlkNum + n
   448  	txNum, n, err := commonledgerutil.DecodeOrderPreservingVarUint64(txIDKey[firstIndexTxNum:])
   449  	require.NoError(t, err)
   450  	require.Equal(t, expectedTxNum, txNum)
   451  	require.Len(t, txIDKey, firstIndexTxNum+n)
   452  }