github.com/hechain20/hechain@v0.0.0-20220316014945-b544036ba106/common/ledger/blkstorage/blockindex_test.go (about)

     1  /*
     2  Copyright hechain. All Rights Reserved.
     3  
     4  SPDX-License-Identifier: Apache-2.0
     5  */
     6  
     7  package blkstorage
     8  
     9  import (
    10  	"crypto/sha256"
    11  	"fmt"
    12  	"hash"
    13  	"io/ioutil"
    14  	"os"
    15  	"path/filepath"
    16  	"testing"
    17  
    18  	"github.com/hechain20/hechain/common/ledger/snapshot"
    19  	"github.com/hechain20/hechain/common/ledger/testutil"
    20  	commonledgerutil "github.com/hechain20/hechain/common/ledger/util"
    21  	"github.com/hechain20/hechain/common/metrics/disabled"
    22  	"github.com/hechain20/hechain/internal/pkg/txflags"
    23  	"github.com/hechain20/hechain/protoutil"
    24  	"github.com/hyperledger/fabric-protos-go/common"
    25  	"github.com/stretchr/testify/require"
    26  )
    27  
    28  var testNewHashFunc = func() (hash.Hash, error) {
    29  	return sha256.New(), nil
    30  }
    31  
    32  func TestBlockIndexSync(t *testing.T) {
    33  	testBlockIndexSync(t, 10, 5, false)
    34  	testBlockIndexSync(t, 10, 5, true)
    35  	testBlockIndexSync(t, 10, 0, true)
    36  	testBlockIndexSync(t, 10, 10, true)
    37  }
    38  
    39  func testBlockIndexSync(t *testing.T, numBlocks int, numBlocksToIndex int, syncByRestart bool) {
    40  	testName := fmt.Sprintf("%v/%v/%v", numBlocks, numBlocksToIndex, syncByRestart)
    41  	t.Run(testName, func(t *testing.T) {
    42  		env := newTestEnv(t, NewConf(testPath(), 0))
    43  		defer env.Cleanup()
    44  		ledgerid := "testledger"
    45  		blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
    46  		defer blkfileMgrWrapper.close()
    47  		blkfileMgr := blkfileMgrWrapper.blockfileMgr
    48  		originalIndexStore := blkfileMgr.index.db
    49  		// construct blocks for testing
    50  		blocks := testutil.ConstructTestBlocks(t, numBlocks)
    51  		// add a few blocks
    52  		blkfileMgrWrapper.addBlocks(blocks[:numBlocksToIndex])
    53  
    54  		// redirect index writes to some random place and add remaining blocks
    55  		blkfileMgr.index.db = env.provider.leveldbProvider.GetDBHandle("someRandomPlace")
    56  		blkfileMgrWrapper.addBlocks(blocks[numBlocksToIndex:])
    57  
    58  		// Plug-in back the original index store
    59  		blkfileMgr.index.db = originalIndexStore
    60  		// Verify that the first set of blocks are indexed in the original index
    61  		for i := 0; i < numBlocksToIndex; i++ {
    62  			block, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
    63  			require.NoError(t, err, "block [%d] should have been present in the index", i)
    64  			require.Equal(t, blocks[i], block)
    65  		}
    66  
    67  		// Before, we test for index sync-up, verify that the last set of blocks not indexed in the original index
    68  		for i := numBlocksToIndex + 1; i <= numBlocks; i++ {
    69  			_, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
    70  			require.EqualError(t, err, fmt.Sprintf("no such block number [%d] in index", i))
    71  		}
    72  
    73  		// perform index sync
    74  		if syncByRestart {
    75  			blkfileMgrWrapper.close()
    76  			blkfileMgrWrapper = newTestBlockfileWrapper(env, ledgerid)
    77  			defer blkfileMgrWrapper.close()
    78  			blkfileMgr = blkfileMgrWrapper.blockfileMgr
    79  		} else {
    80  			blkfileMgr.syncIndex()
    81  		}
    82  
    83  		// Now, last set of blocks should also be indexed in the original index
    84  		for i := numBlocksToIndex; i < numBlocks; i++ {
    85  			block, err := blkfileMgr.retrieveBlockByNumber(uint64(i))
    86  			require.NoError(t, err, "block [%d] should have been present in the index", i)
    87  			require.Equal(t, blocks[i], block)
    88  		}
    89  	})
    90  }
    91  
    92  func TestBlockIndexSelectiveIndexing(t *testing.T) {
    93  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{})
    94  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockHash})
    95  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockNum})
    96  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrTxID})
    97  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockNumTranNum})
    98  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrBlockHash, IndexableAttrBlockNum})
    99  	testBlockIndexSelectiveIndexing(t, []IndexableAttr{IndexableAttrTxID, IndexableAttrBlockNumTranNum})
   100  }
   101  
   102  func testBlockIndexSelectiveIndexing(t *testing.T, indexItems []IndexableAttr) {
   103  	var testName string
   104  	for _, s := range indexItems {
   105  		testName = testName + string(s)
   106  	}
   107  	t.Run(testName, func(t *testing.T) {
   108  		env := newTestEnvSelectiveIndexing(t, NewConf(testPath(), 0), indexItems, &disabled.Provider{})
   109  		defer env.Cleanup()
   110  		blkfileMgrWrapper := newTestBlockfileWrapper(env, "testledger")
   111  		defer blkfileMgrWrapper.close()
   112  
   113  		blocks := testutil.ConstructTestBlocks(t, 3)
   114  		// add test blocks
   115  		blkfileMgrWrapper.addBlocks(blocks)
   116  		blockfileMgr := blkfileMgrWrapper.blockfileMgr
   117  
   118  		// if index has been configured for an indexItem then the item should be indexed else not
   119  		// test 'retrieveBlockByHash'
   120  		block, err := blockfileMgr.retrieveBlockByHash(protoutil.BlockHeaderHash(blocks[0].Header))
   121  		if containsAttr(indexItems, IndexableAttrBlockHash) {
   122  			require.NoError(t, err, "Error while retrieving block by hash")
   123  			require.Equal(t, blocks[0], block)
   124  		} else {
   125  			require.EqualError(t, err, "block hashes not maintained in index")
   126  		}
   127  
   128  		// test 'retrieveBlockByNumber'
   129  		block, err = blockfileMgr.retrieveBlockByNumber(0)
   130  		if containsAttr(indexItems, IndexableAttrBlockNum) {
   131  			require.NoError(t, err, "Error while retrieving block by number")
   132  			require.Equal(t, blocks[0], block)
   133  		} else {
   134  			require.EqualError(t, err, "block numbers not maintained in index")
   135  		}
   136  
   137  		// test 'retrieveTransactionByID'
   138  		txid, err := protoutil.GetOrComputeTxIDFromEnvelope(blocks[0].Data.Data[0])
   139  		require.NoError(t, err)
   140  		txEnvelope, err := blockfileMgr.retrieveTransactionByID(txid)
   141  		if containsAttr(indexItems, IndexableAttrTxID) {
   142  			require.NoError(t, err, "Error while retrieving tx by id")
   143  			txEnvelopeBytes := blocks[0].Data.Data[0]
   144  			txEnvelopeOrig, err := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes)
   145  			require.NoError(t, err)
   146  			require.Equal(t, txEnvelopeOrig, txEnvelope)
   147  		} else {
   148  			require.EqualError(t, err, "transaction IDs not maintained in index")
   149  		}
   150  
   151  		// test txIDExists
   152  		txid, err = protoutil.GetOrComputeTxIDFromEnvelope(blocks[0].Data.Data[0])
   153  		require.NoError(t, err)
   154  		exists, err := blockfileMgr.txIDExists(txid)
   155  		if containsAttr(indexItems, IndexableAttrTxID) {
   156  			require.NoError(t, err)
   157  			require.True(t, exists)
   158  		} else {
   159  			require.EqualError(t, err, "transaction IDs not maintained in index")
   160  		}
   161  
   162  		// test 'retrieveTrasnactionsByBlockNumTranNum
   163  		txEnvelope2, err := blockfileMgr.retrieveTransactionByBlockNumTranNum(0, 0)
   164  		if containsAttr(indexItems, IndexableAttrBlockNumTranNum) {
   165  			require.NoError(t, err, "Error while retrieving tx by blockNum and tranNum")
   166  			txEnvelopeBytes2 := blocks[0].Data.Data[0]
   167  			txEnvelopeOrig2, err2 := protoutil.GetEnvelopeFromBlock(txEnvelopeBytes2)
   168  			require.NoError(t, err2)
   169  			require.Equal(t, txEnvelopeOrig2, txEnvelope2)
   170  		} else {
   171  			require.EqualError(t, err, "<blockNumber, transactionNumber> tuple not maintained in index")
   172  		}
   173  
   174  		// test 'retrieveBlockByTxID'
   175  		txid, err = protoutil.GetOrComputeTxIDFromEnvelope(blocks[0].Data.Data[0])
   176  		require.NoError(t, err)
   177  		block, err = blockfileMgr.retrieveBlockByTxID(txid)
   178  		if containsAttr(indexItems, IndexableAttrTxID) {
   179  			require.NoError(t, err, "Error while retrieving block by txID")
   180  			require.Equal(t, block, blocks[0])
   181  		} else {
   182  			require.EqualError(t, err, "transaction IDs not maintained in index")
   183  		}
   184  
   185  		for _, block := range blocks {
   186  			flags := txflags.ValidationFlags(block.Metadata.Metadata[common.BlockMetadataIndex_TRANSACTIONS_FILTER])
   187  
   188  			for idx, d := range block.Data.Data {
   189  				txid, err = protoutil.GetOrComputeTxIDFromEnvelope(d)
   190  				require.NoError(t, err)
   191  
   192  				reason, blkNum, err := blockfileMgr.retrieveTxValidationCodeByTxID(txid)
   193  
   194  				if containsAttr(indexItems, IndexableAttrTxID) {
   195  					require.NoError(t, err)
   196  					reasonFromFlags := flags.Flag(idx)
   197  					require.Equal(t, reasonFromFlags, reason)
   198  					require.Equal(t, block.Header.Number, blkNum)
   199  				} else {
   200  					require.EqualError(t, err, "transaction IDs not maintained in index")
   201  				}
   202  			}
   203  		}
   204  	})
   205  }
   206  
   207  func containsAttr(indexItems []IndexableAttr, attr IndexableAttr) bool {
   208  	for _, element := range indexItems {
   209  		if element == attr {
   210  			return true
   211  		}
   212  	}
   213  	return false
   214  }
   215  
   216  func TestTxIDKeyEncodingDecoding(t *testing.T) {
   217  	testcases := []struct {
   218  		txid   string
   219  		blkNum uint64
   220  		txNum  uint64
   221  	}{
   222  		{"txid1", 0, 0},
   223  		{"", 1, 1},
   224  		{"", 0, 0},
   225  		{"txid1", 100, 100},
   226  	}
   227  	for i, testcase := range testcases {
   228  		encodedTxIDKey := constructTxIDKey(testcase.txid, testcase.blkNum, testcase.txNum)
   229  		t.Run(fmt.Sprintf(" %d", i),
   230  			func(t *testing.T) {
   231  				txID, err := retrieveTxID(encodedTxIDKey)
   232  				require.NoError(t, err)
   233  				require.Equal(t, testcase.txid, txID)
   234  				verifyTxIDKeyDecodable(t,
   235  					encodedTxIDKey,
   236  					testcase.txid, testcase.blkNum, testcase.txNum,
   237  				)
   238  			})
   239  	}
   240  }
   241  
   242  func TestTxIDKeyDecodingInvalidInputs(t *testing.T) {
   243  	prefix := []byte{txIDIdxKeyPrefix}
   244  	txIDLen := commonledgerutil.EncodeOrderPreservingVarUint64(uint64(len("mytxid")))
   245  	txID := []byte("mytxid")
   246  
   247  	// empty byte
   248  	_, err := retrieveTxID([]byte{})
   249  	require.EqualError(t, err, "invalid txIDKey - zero-length slice")
   250  
   251  	// invalid prefix
   252  	invalidPrefix := []byte{txIDIdxKeyPrefix + 1}
   253  	_, err = retrieveTxID(invalidPrefix)
   254  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x} - unexpected prefix", invalidPrefix))
   255  
   256  	// invalid key - only prefix
   257  	_, err = retrieveTxID(prefix)
   258  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x}: number of consumed bytes from DecodeVarint is invalid, expected 1, but got 0", prefix))
   259  
   260  	// invalid key - incomplete length
   261  	incompleteLength := appendAllAndTrimLastByte(prefix, txIDLen)
   262  	_, err = retrieveTxID(incompleteLength)
   263  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x}: decoded size (1) from DecodeVarint is more than available bytes (0)", incompleteLength))
   264  
   265  	// invalid key - incomplete txid
   266  	incompleteTxID := appendAllAndTrimLastByte(prefix, txIDLen, txID)
   267  	_, err = retrieveTxID(incompleteTxID)
   268  	require.EqualError(t, err, fmt.Sprintf("invalid txIDKey {%x}, fewer bytes present", incompleteTxID))
   269  }
   270  
   271  func TestExportUniqueTxIDs(t *testing.T) {
   272  	env := newTestEnv(t, NewConf(testPath(), 0))
   273  	defer env.Cleanup()
   274  	ledgerid := "testledger"
   275  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
   276  	defer blkfileMgrWrapper.close()
   277  	blkfileMgr := blkfileMgrWrapper.blockfileMgr
   278  
   279  	testSnapshotDir := testPath()
   280  	defer os.RemoveAll(testSnapshotDir)
   281  
   282  	// empty store generates no output
   283  	fileHashes, err := blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   284  	require.NoError(t, err)
   285  	require.Empty(t, fileHashes)
   286  	files, err := ioutil.ReadDir(testSnapshotDir)
   287  	require.NoError(t, err)
   288  	require.Len(t, files, 0)
   289  
   290  	// add genesis block and test the exported bytes
   291  	bg, gb := testutil.NewBlockGenerator(t, "myChannel", false)
   292  	blkfileMgr.addBlock(gb)
   293  	configTxID, err := protoutil.GetOrComputeTxIDFromEnvelope(gb.Data.Data[0])
   294  	require.NoError(t, err)
   295  	fileHashes, err = blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   296  	require.NoError(t, err)
   297  	verifyExportedTxIDs(t, testSnapshotDir, fileHashes, configTxID)
   298  	os.Remove(filepath.Join(testSnapshotDir, snapshotDataFileName))
   299  	os.Remove(filepath.Join(testSnapshotDir, snapshotMetadataFileName))
   300  
   301  	// add block-1 and test the exported bytes
   302  	block1 := bg.NextBlockWithTxid(
   303  		[][]byte{
   304  			[]byte("tx with id=txid-3"),
   305  			[]byte("tx with id=txid-1"),
   306  			[]byte("tx with id=txid-2"),
   307  			[]byte("another tx with existing id=txid-1"),
   308  		},
   309  		[]string{"txid-3", "txid-1", "txid-2", "txid-1"},
   310  	)
   311  	err = blkfileMgr.addBlock(block1)
   312  	require.NoError(t, err)
   313  	fileHashes, err = blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   314  	require.NoError(t, err)
   315  	verifyExportedTxIDs(t, testSnapshotDir, fileHashes, "txid-1", "txid-2", "txid-3", configTxID) //"txid-1" appears once, Txids appear in radix sort order
   316  	os.Remove(filepath.Join(testSnapshotDir, snapshotDataFileName))
   317  	os.Remove(filepath.Join(testSnapshotDir, snapshotMetadataFileName))
   318  
   319  	// add block-2 and test the exported bytes
   320  	block2 := bg.NextBlockWithTxid(
   321  		[][]byte{
   322  			[]byte("tx with id=txid-0000000"),
   323  			[]byte("tx with id=txid-3"),
   324  			[]byte("tx with id=txid-4"),
   325  		},
   326  		[]string{"txid-0000000", "txid-3", "txid-4"},
   327  	)
   328  	blkfileMgr.addBlock(block2)
   329  	require.NoError(t, err)
   330  
   331  	fileHashes, err = blkfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   332  	require.NoError(t, err)
   333  	verifyExportedTxIDs(t, testSnapshotDir, fileHashes, "txid-1", "txid-2", "txid-3", "txid-4", "txid-0000000", configTxID) // "txid-1", and "txid-3 appears once and Txids appear in radix sort order
   334  }
   335  
   336  func TestExportUniqueTxIDsWhenTxIDsNotIndexed(t *testing.T) {
   337  	env := newTestEnvSelectiveIndexing(t, NewConf(testPath(), 0), []IndexableAttr{IndexableAttrBlockNum}, &disabled.Provider{})
   338  	defer env.Cleanup()
   339  	blkfileMgrWrapper := newTestBlockfileWrapper(env, "testledger")
   340  	defer blkfileMgrWrapper.close()
   341  
   342  	blocks := testutil.ConstructTestBlocks(t, 5)
   343  	blkfileMgrWrapper.addBlocks(blocks)
   344  
   345  	testSnapshotDir := testPath()
   346  	defer os.RemoveAll(testSnapshotDir)
   347  	_, err := blkfileMgrWrapper.blockfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   348  	require.EqualError(t, err, "transaction IDs not maintained in index")
   349  }
   350  
   351  func TestExportUniqueTxIDsErrorCases(t *testing.T) {
   352  	env := newTestEnv(t, NewConf(testPath(), 0))
   353  	defer env.Cleanup()
   354  	ledgerid := "testledger"
   355  	blkfileMgrWrapper := newTestBlockfileWrapper(env, ledgerid)
   356  	defer blkfileMgrWrapper.close()
   357  
   358  	blocks := testutil.ConstructTestBlocks(t, 5)
   359  	blkfileMgrWrapper.addBlocks(blocks)
   360  	blockfileMgr := blkfileMgrWrapper.blockfileMgr
   361  	index := blockfileMgr.index
   362  
   363  	testSnapshotDir := testPath()
   364  	defer os.RemoveAll(testSnapshotDir)
   365  
   366  	// error during data file creation
   367  	dataFilePath := filepath.Join(testSnapshotDir, snapshotDataFileName)
   368  	_, err := os.Create(dataFilePath)
   369  	require.NoError(t, err)
   370  	_, err = blkfileMgrWrapper.blockfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   371  	require.Contains(t, err.Error(), "error while creating the snapshot file: "+dataFilePath)
   372  	os.RemoveAll(testSnapshotDir)
   373  
   374  	// error during metadata file creation
   375  	fmt.Printf("testSnapshotDir=%s", testSnapshotDir)
   376  	require.NoError(t, os.MkdirAll(testSnapshotDir, 0o700))
   377  	metadataFilePath := filepath.Join(testSnapshotDir, snapshotMetadataFileName)
   378  	_, err = os.Create(metadataFilePath)
   379  	require.NoError(t, err)
   380  	_, err = blkfileMgrWrapper.blockfileMgr.index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   381  	require.Contains(t, err.Error(), "error while creating the snapshot file: "+metadataFilePath)
   382  	os.RemoveAll(testSnapshotDir)
   383  
   384  	// error while retrieving the txid key
   385  	require.NoError(t, os.MkdirAll(testSnapshotDir, 0o700))
   386  	index.db.Put([]byte{txIDIdxKeyPrefix}, []byte("some junk value"), true)
   387  	_, err = index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   388  	require.EqualError(t, err, "invalid txIDKey {74}: number of consumed bytes from DecodeVarint is invalid, expected 1, but got 0")
   389  	os.RemoveAll(testSnapshotDir)
   390  
   391  	// error while reading from leveldb
   392  	require.NoError(t, os.MkdirAll(testSnapshotDir, 0o700))
   393  	env.provider.leveldbProvider.Close()
   394  	_, err = index.exportUniqueTxIDs(testSnapshotDir, testNewHashFunc)
   395  	require.EqualError(t, err, "internal leveldb error while obtaining db iterator: leveldb: closed")
   396  	os.RemoveAll(testSnapshotDir)
   397  }
   398  
   399  func verifyExportedTxIDs(t *testing.T, dir string, fileHashes map[string][]byte, expectedTxIDs ...string) {
   400  	require.Len(t, fileHashes, 2)
   401  	require.Contains(t, fileHashes, snapshotDataFileName)
   402  	require.Contains(t, fileHashes, snapshotMetadataFileName)
   403  
   404  	dataFile := filepath.Join(dir, snapshotDataFileName)
   405  	dataFileContent, err := ioutil.ReadFile(dataFile)
   406  	require.NoError(t, err)
   407  	dataFileHash := sha256.Sum256(dataFileContent)
   408  	require.Equal(t, dataFileHash[:], fileHashes[snapshotDataFileName])
   409  
   410  	metadataFile := filepath.Join(dir, snapshotMetadataFileName)
   411  	metadataFileContent, err := ioutil.ReadFile(metadataFile)
   412  	require.NoError(t, err)
   413  	metadataFileHash := sha256.Sum256(metadataFileContent)
   414  	require.Equal(t, metadataFileHash[:], fileHashes[snapshotMetadataFileName])
   415  
   416  	metadataReader, err := snapshot.OpenFile(metadataFile, snapshotFileFormat)
   417  	require.NoError(t, err)
   418  	defer metadataReader.Close()
   419  
   420  	dataReader, err := snapshot.OpenFile(dataFile, snapshotFileFormat)
   421  	require.NoError(t, err)
   422  	defer dataReader.Close()
   423  
   424  	numTxIDs, err := metadataReader.DecodeUVarInt()
   425  	require.NoError(t, err)
   426  	retrievedTxIDs := []string{}
   427  	for i := uint64(0); i < numTxIDs; i++ {
   428  		txID, err := dataReader.DecodeString()
   429  		require.NoError(t, err)
   430  		retrievedTxIDs = append(retrievedTxIDs, txID)
   431  	}
   432  	require.Equal(t, expectedTxIDs, retrievedTxIDs)
   433  }
   434  
   435  func appendAllAndTrimLastByte(input ...[]byte) []byte {
   436  	r := []byte{}
   437  	for _, i := range input {
   438  		r = append(r, i...)
   439  	}
   440  	return r[:len(r)-1]
   441  }
   442  
   443  func verifyTxIDKeyDecodable(t *testing.T, txIDKey []byte, expectedTxID string, expectedBlkNum, expectedTxNum uint64) {
   444  	length, lengthBytes, err := commonledgerutil.DecodeOrderPreservingVarUint64(txIDKey[1:])
   445  	require.NoError(t, err)
   446  	firstIndexTxID := 1 + lengthBytes
   447  	firstIndexBlkNum := firstIndexTxID + int(length)
   448  	require.Equal(t, []byte(expectedTxID), txIDKey[firstIndexTxID:firstIndexBlkNum])
   449  
   450  	blkNum, n, err := commonledgerutil.DecodeOrderPreservingVarUint64(txIDKey[firstIndexBlkNum:])
   451  	require.NoError(t, err)
   452  	require.Equal(t, expectedBlkNum, blkNum)
   453  
   454  	firstIndexTxNum := firstIndexBlkNum + n
   455  	txNum, n, err := commonledgerutil.DecodeOrderPreservingVarUint64(txIDKey[firstIndexTxNum:])
   456  	require.NoError(t, err)
   457  	require.Equal(t, expectedTxNum, txNum)
   458  	require.Len(t, txIDKey, firstIndexTxNum+n)
   459  }