github.com/dim4egster/coreth@v0.10.2/plugin/evm/syncervm_test.go (about)

     1  // (c) 2021-2022, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package evm
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"math/big"
    10  	"math/rand"
    11  	"sync"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  
    17  	"github.com/dim4egster/qmallgo/chains/atomic"
    18  	"github.com/dim4egster/qmallgo/database/manager"
    19  	"github.com/dim4egster/qmallgo/ids"
    20  	"github.com/dim4egster/qmallgo/snow"
    21  	"github.com/dim4egster/qmallgo/snow/choices"
    22  	commonEng "github.com/dim4egster/qmallgo/snow/engine/common"
    23  	"github.com/dim4egster/qmallgo/utils/crypto"
    24  	"github.com/dim4egster/qmallgo/utils/units"
    25  
    26  	"github.com/dim4egster/coreth/accounts/keystore"
    27  	"github.com/dim4egster/coreth/consensus/dummy"
    28  	"github.com/dim4egster/coreth/constants"
    29  	"github.com/dim4egster/coreth/core"
    30  	"github.com/dim4egster/coreth/core/rawdb"
    31  	"github.com/dim4egster/coreth/core/types"
    32  	"github.com/dim4egster/coreth/ethdb"
    33  	"github.com/dim4egster/coreth/metrics"
    34  	"github.com/dim4egster/coreth/params"
    35  	statesyncclient "github.com/dim4egster/coreth/sync/client"
    36  	"github.com/dim4egster/coreth/sync/statesync"
    37  	"github.com/dim4egster/coreth/trie"
    38  	"github.com/ethereum/go-ethereum/common"
    39  	"github.com/ethereum/go-ethereum/log"
    40  	"github.com/ethereum/go-ethereum/rlp"
    41  )
    42  
    43  func TestSkipStateSync(t *testing.T) {
    44  	rand.Seed(1)
    45  	test := syncTest{
    46  		syncableInterval:   256,
    47  		stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync
    48  		shouldSync:         false,
    49  	}
    50  	vmSetup := createSyncServerAndClientVMs(t, test)
    51  	defer vmSetup.Teardown(t)
    52  
    53  	testSyncerVM(t, vmSetup, test)
    54  }
    55  
    56  func TestStateSyncFromScratch(t *testing.T) {
    57  	rand.Seed(1)
    58  	test := syncTest{
    59  		syncableInterval:   256,
    60  		stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync
    61  		shouldSync:         true,
    62  	}
    63  	vmSetup := createSyncServerAndClientVMs(t, test)
    64  	defer vmSetup.Teardown(t)
    65  
    66  	testSyncerVM(t, vmSetup, test)
    67  }
    68  
    69  func TestStateSyncToggleEnabledToDisabled(t *testing.T) {
    70  	rand.Seed(1)
    71  	// Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice.
    72  	metrics.Enabled = false
    73  	defer func() {
    74  		metrics.Enabled = true
    75  	}()
    76  
    77  	var lock sync.Mutex
    78  	reqCount := 0
    79  	test := syncTest{
    80  		syncableInterval:   256,
    81  		stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync
    82  		shouldSync:         true,
    83  		responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) {
    84  			lock.Lock()
    85  			defer lock.Unlock()
    86  
    87  			reqCount++
    88  			// Fail all requests after number 50 to interrupt the sync
    89  			if reqCount > 50 {
    90  				if err := syncerVM.AppRequestFailed(nodeID, requestID); err != nil {
    91  					panic(err)
    92  				}
    93  				cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel
    94  				if cancel != nil {
    95  					cancel()
    96  				} else {
    97  					t.Fatal("state sync client not populated correctly")
    98  				}
    99  			} else {
   100  				syncerVM.AppResponse(nodeID, requestID, response)
   101  			}
   102  		},
   103  		expectedErr: context.Canceled,
   104  	}
   105  	vmSetup := createSyncServerAndClientVMs(t, test)
   106  	defer vmSetup.Teardown(t)
   107  
   108  	// Perform sync resulting in early termination.
   109  	testSyncerVM(t, vmSetup, test)
   110  
   111  	test.shouldSync = true
   112  	test.responseIntercept = nil
   113  	test.expectedErr = nil
   114  
   115  	syncDisabledVM := &VM{}
   116  	appSender := &commonEng.SenderTest{T: t}
   117  	appSender.SendAppGossipF = func([]byte) error { return nil }
   118  	appSender.SendAppRequestF = func(nodeSet ids.NodeIDSet, requestID uint32, request []byte) error {
   119  		nodeID, hasItem := nodeSet.Pop()
   120  		if !hasItem {
   121  			t.Fatal("expected nodeSet to contain at least 1 nodeID")
   122  		}
   123  		go vmSetup.serverVM.AppRequest(nodeID, requestID, time.Now().Add(1*time.Second), request)
   124  		return nil
   125  	}
   126  	// Disable metrics to prevent duplicate registerer
   127  	if err := syncDisabledVM.Initialize(
   128  		vmSetup.syncerVM.ctx,
   129  		vmSetup.syncerDBManager,
   130  		[]byte(genesisJSONLatest),
   131  		nil,
   132  		nil,
   133  		vmSetup.syncerVM.toEngine,
   134  		[]*commonEng.Fx{},
   135  		appSender,
   136  	); err != nil {
   137  		t.Fatal(err)
   138  	}
   139  
   140  	defer func() {
   141  		if err := syncDisabledVM.Shutdown(); err != nil {
   142  			t.Fatal(err)
   143  		}
   144  	}()
   145  
   146  	if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 {
   147  		t.Fatalf("Unexpected last accepted height: %d", height)
   148  	}
   149  
   150  	enabled, err := syncDisabledVM.StateSyncEnabled()
   151  	assert.NoError(t, err)
   152  	assert.False(t, enabled, "sync should be disabled")
   153  
   154  	// Process the first 10 blocks from the serverVM
   155  	for i := uint64(1); i < 10; i++ {
   156  		ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i)
   157  		if ethBlock == nil {
   158  			t.Fatalf("VM Server did not have a block available at height %d", i)
   159  		}
   160  		b, err := rlp.EncodeToBytes(ethBlock)
   161  		if err != nil {
   162  			t.Fatal(err)
   163  		}
   164  		blk, err := syncDisabledVM.ParseBlock(b)
   165  		if err != nil {
   166  			t.Fatal(err)
   167  		}
   168  		if err := blk.Verify(); err != nil {
   169  			t.Fatal(err)
   170  		}
   171  		if err := blk.Accept(); err != nil {
   172  			t.Fatal(err)
   173  		}
   174  	}
   175  	// Verify the snapshot disk layer matches the last block root
   176  	lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root()
   177  	if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil {
   178  		t.Fatal(err)
   179  	}
   180  	syncDisabledVM.blockChain.DrainAcceptorQueue()
   181  
   182  	// Create a new VM from the same database with state sync enabled.
   183  	syncReEnabledVM := &VM{}
   184  	// Enable state sync in configJSON
   185  	configJSON := fmt.Sprintf(
   186  		"{\"state-sync-enabled\":true, \"state-sync-min-blocks\":%d}",
   187  		test.stateSyncMinBlocks,
   188  	)
   189  	if err := syncReEnabledVM.Initialize(
   190  		vmSetup.syncerVM.ctx,
   191  		vmSetup.syncerDBManager,
   192  		[]byte(genesisJSONLatest),
   193  		nil,
   194  		[]byte(configJSON),
   195  		vmSetup.syncerVM.toEngine,
   196  		[]*commonEng.Fx{},
   197  		appSender,
   198  	); err != nil {
   199  		t.Fatal(err)
   200  	}
   201  
   202  	// override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM]
   203  	vmSetup.serverAppSender.SendAppResponseF = func(nodeID ids.NodeID, requestID uint32, response []byte) error {
   204  		if test.responseIntercept == nil {
   205  			go syncReEnabledVM.AppResponse(nodeID, requestID, response)
   206  		} else {
   207  			go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response)
   208  		}
   209  
   210  		return nil
   211  	}
   212  
   213  	// connect peer to [syncerVM]
   214  	assert.NoError(t, syncReEnabledVM.Connected(vmSetup.serverVM.ctx.NodeID, statesyncclient.StateSyncVersion))
   215  
   216  	enabled, err = syncReEnabledVM.StateSyncEnabled()
   217  	assert.NoError(t, err)
   218  	assert.True(t, enabled, "sync should be enabled")
   219  
   220  	vmSetup.syncerVM = syncReEnabledVM
   221  	testSyncerVM(t, vmSetup, test)
   222  }
   223  
   224  func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup {
   225  	var (
   226  		serverVM, syncerVM *VM
   227  	)
   228  	// If there is an error shutdown the VMs if they have been instantiated
   229  	defer func() {
   230  		// If the test has not already failed, shut down the VMs since the caller
   231  		// will not get the chance to shut them down.
   232  		if !t.Failed() {
   233  			return
   234  		}
   235  
   236  		// If the test already failed, shut down the VMs if they were instantiated.
   237  		if serverVM != nil {
   238  			log.Info("Shutting down server VM")
   239  			if err := serverVM.Shutdown(); err != nil {
   240  				t.Fatal(err)
   241  			}
   242  		}
   243  		if syncerVM != nil {
   244  			log.Info("Shutting down syncerVM")
   245  			if err := syncerVM.Shutdown(); err != nil {
   246  				t.Fatal(err)
   247  			}
   248  		}
   249  	}()
   250  
   251  	// configure [serverVM]
   252  	importAmount := 2000000 * units.Avax // 2M avax
   253  	_, serverVM, _, serverAtomicMemory, serverAppSender := GenesisVMWithUTXOs(
   254  		t,
   255  		true,
   256  		"",
   257  		"",
   258  		"",
   259  		map[ids.ShortID]uint64{
   260  			testShortIDAddrs[0]: importAmount,
   261  		},
   262  	)
   263  
   264  	var (
   265  		importTx, exportTx *Tx
   266  		err                error
   267  	)
   268  	generateAndAcceptBlocks(t, serverVM, parentsToGet, func(i int, gen *core.BlockGen) {
   269  		switch i {
   270  		case 0:
   271  			// spend the UTXOs from shared memory
   272  			importTx, err = serverVM.newImportTx(serverVM.ctx.XChainID, testEthAddrs[0], initialBaseFee, []*crypto.PrivateKeySECP256K1R{testKeys[0]})
   273  			if err != nil {
   274  				t.Fatal(err)
   275  			}
   276  			if err := serverVM.issueTx(importTx, true /*=local*/); err != nil {
   277  				t.Fatal(err)
   278  			}
   279  		case 1:
   280  			// export some of the imported UTXOs to test exportTx is properly synced
   281  			exportTx, err = serverVM.newExportTx(
   282  				serverVM.ctx.AVAXAssetID,
   283  				importAmount/2,
   284  				serverVM.ctx.XChainID,
   285  				testShortIDAddrs[0],
   286  				initialBaseFee,
   287  				[]*crypto.PrivateKeySECP256K1R{testKeys[0]},
   288  			)
   289  			if err != nil {
   290  				t.Fatal(err)
   291  			}
   292  			if err := serverVM.issueTx(exportTx, true /*=local*/); err != nil {
   293  				t.Fatal(err)
   294  			}
   295  		default: // Generate simple transfer transactions.
   296  			pk := testKeys[0].ToECDSA()
   297  			tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, params.TxGas, initialBaseFee, nil)
   298  			signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), pk)
   299  			if err != nil {
   300  				t.Fatal(t)
   301  			}
   302  			gen.AddTx(signedTx)
   303  		}
   304  	})
   305  
   306  	// override serverAtomicTrie's commitInterval so the call to [serverAtomicTrie.Index]
   307  	// creates a commit at the height [syncableInterval]. This is necessary to support
   308  	// fetching a state summary.
   309  	serverAtomicTrie := serverVM.atomicTrie.(*atomicTrie)
   310  	serverAtomicTrie.commitInterval = test.syncableInterval
   311  	assert.NoError(t, serverAtomicTrie.commit(test.syncableInterval, serverAtomicTrie.LastAcceptedRoot()))
   312  	assert.NoError(t, serverVM.db.Commit())
   313  
   314  	serverSharedMemories := newSharedMemories(serverAtomicMemory, serverVM.ctx.ChainID, serverVM.ctx.XChainID)
   315  	serverSharedMemories.assertOpsApplied(t, importTx.mustAtomicOps())
   316  	serverSharedMemories.assertOpsApplied(t, exportTx.mustAtomicOps())
   317  
   318  	// make some accounts
   319  	trieDB := trie.NewDatabase(serverVM.chaindb)
   320  	root, accounts := statesync.FillAccountsWithOverlappingStorage(t, trieDB, types.EmptyRootHash, 1000, 16)
   321  
   322  	// patch serverVM's lastAcceptedBlock to have the new root
   323  	// and update the vm's state so the trie with accounts will
   324  	// be returned by StateSyncGetLastSummary
   325  	lastAccepted := serverVM.blockChain.LastAcceptedBlock()
   326  	patchedBlock := patchBlock(lastAccepted, root, serverVM.chaindb)
   327  	blockBytes, err := rlp.EncodeToBytes(patchedBlock)
   328  	if err != nil {
   329  		t.Fatal(err)
   330  	}
   331  	internalBlock, err := serverVM.parseBlock(blockBytes)
   332  	if err != nil {
   333  		t.Fatal(err)
   334  	}
   335  	internalBlock.(*Block).SetStatus(choices.Accepted)
   336  	assert.NoError(t, serverVM.State.SetLastAcceptedBlock(internalBlock))
   337  
   338  	// patch syncableInterval for test
   339  	serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval
   340  
   341  	// initialise [syncerVM] with blank genesis state
   342  	stateSyncEnabledJSON := fmt.Sprintf("{\"state-sync-enabled\":true, \"state-sync-min-blocks\": %d}", test.stateSyncMinBlocks)
   343  	syncerEngineChan, syncerVM, syncerDBManager, syncerAtomicMemory, syncerAppSender := GenesisVMWithUTXOs(
   344  		t,
   345  		false,
   346  		"",
   347  		stateSyncEnabledJSON,
   348  		"",
   349  		map[ids.ShortID]uint64{
   350  			testShortIDAddrs[0]: importAmount,
   351  		},
   352  	)
   353  	if err := syncerVM.SetState(snow.StateSyncing); err != nil {
   354  		t.Fatal(err)
   355  	}
   356  	enabled, err := syncerVM.StateSyncEnabled()
   357  	assert.NoError(t, err)
   358  	assert.True(t, enabled)
   359  
   360  	// override [syncerVM]'s commit interval so the atomic trie works correctly.
   361  	syncerVM.atomicTrie.(*atomicTrie).commitInterval = test.syncableInterval
   362  
   363  	// override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM]
   364  	serverAppSender.SendAppResponseF = func(nodeID ids.NodeID, requestID uint32, response []byte) error {
   365  		if test.responseIntercept == nil {
   366  			go syncerVM.AppResponse(nodeID, requestID, response)
   367  		} else {
   368  			go test.responseIntercept(syncerVM, nodeID, requestID, response)
   369  		}
   370  
   371  		return nil
   372  	}
   373  
   374  	// connect peer to [syncerVM]
   375  	assert.NoError(t, syncerVM.Connected(serverVM.ctx.NodeID, statesyncclient.StateSyncVersion))
   376  
   377  	// override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM]
   378  	syncerAppSender.SendAppRequestF = func(nodeSet ids.NodeIDSet, requestID uint32, request []byte) error {
   379  		nodeID, hasItem := nodeSet.Pop()
   380  		if !hasItem {
   381  			t.Fatal("expected nodeSet to contain at least 1 nodeID")
   382  		}
   383  		go serverVM.AppRequest(nodeID, requestID, time.Now().Add(1*time.Second), request)
   384  		return nil
   385  	}
   386  
   387  	return &syncVMSetup{
   388  		serverVM:        serverVM,
   389  		serverAppSender: serverAppSender,
   390  		includedAtomicTxs: []*Tx{
   391  			importTx,
   392  			exportTx,
   393  		},
   394  		fundedAccounts:     accounts,
   395  		syncerVM:           syncerVM,
   396  		syncerDBManager:    syncerDBManager,
   397  		syncerEngineChan:   syncerEngineChan,
   398  		syncerAtomicMemory: syncerAtomicMemory,
   399  	}
   400  }
   401  
   402  // syncVMSetup contains the required set up for a client VM to perform state sync
   403  // off of a server VM.
   404  type syncVMSetup struct {
   405  	serverVM        *VM
   406  	serverAppSender *commonEng.SenderTest
   407  
   408  	includedAtomicTxs []*Tx
   409  	fundedAccounts    map[*keystore.Key]*types.StateAccount
   410  
   411  	syncerVM           *VM
   412  	syncerDBManager    manager.Manager
   413  	syncerEngineChan   <-chan commonEng.Message
   414  	syncerAtomicMemory *atomic.Memory
   415  }
   416  
   417  // Teardown shuts down both VMs and asserts that both exit without error.
   418  // Note: assumes both serverVM and sycnerVM have been initialized.
   419  func (s *syncVMSetup) Teardown(t *testing.T) {
   420  	assert.NoError(t, s.serverVM.Shutdown())
   421  	assert.NoError(t, s.syncerVM.Shutdown())
   422  }
   423  
   424  // syncTest contains both the actual VMs as well as the parameters with the expected output.
   425  type syncTest struct {
   426  	responseIntercept  func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte)
   427  	stateSyncMinBlocks uint64
   428  	syncableInterval   uint64
   429  	shouldSync         bool
   430  	expectedErr        error
   431  }
   432  
   433  func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) {
   434  	t.Helper()
   435  	var (
   436  		serverVM           = vmSetup.serverVM
   437  		includedAtomicTxs  = vmSetup.includedAtomicTxs
   438  		fundedAccounts     = vmSetup.fundedAccounts
   439  		syncerVM           = vmSetup.syncerVM
   440  		syncerEngineChan   = vmSetup.syncerEngineChan
   441  		syncerAtomicMemory = vmSetup.syncerAtomicMemory
   442  	)
   443  
   444  	// get last summary and test related methods
   445  	summary, err := serverVM.GetLastStateSummary()
   446  	if err != nil {
   447  		t.Fatal("error getting state sync last summary", "err", err)
   448  	}
   449  	parsedSummary, err := syncerVM.ParseStateSummary(summary.Bytes())
   450  	if err != nil {
   451  		t.Fatal("error getting state sync last summary", "err", err)
   452  	}
   453  	retrievedSummary, err := serverVM.GetStateSummary(parsedSummary.Height())
   454  	if err != nil {
   455  		t.Fatal("error when checking if summary is accepted", "err", err)
   456  	}
   457  	assert.Equal(t, summary, retrievedSummary)
   458  
   459  	shouldSync, err := parsedSummary.Accept()
   460  	if err != nil {
   461  		t.Fatal("unexpected error accepting state summary", "err", err)
   462  	}
   463  	if shouldSync != test.shouldSync {
   464  		t.Fatal("unexpected value returned from accept", "expected", test.shouldSync, "got", shouldSync)
   465  	}
   466  	if !shouldSync {
   467  		return
   468  	}
   469  	msg := <-syncerEngineChan
   470  	assert.Equal(t, commonEng.StateSyncDone, msg)
   471  
   472  	// If the test is expected to error, assert the correct error is returned and finish the test.
   473  	err = syncerVM.StateSyncClient.Error()
   474  	if test.expectedErr != nil {
   475  		assert.ErrorIs(t, err, test.expectedErr)
   476  		return
   477  	}
   478  	if err != nil {
   479  		t.Fatal("state sync failed", err)
   480  	}
   481  
   482  	// set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly
   483  	// and that we can bootstrap and process some blocks.
   484  	if err := syncerVM.SetState(snow.Bootstrapping); err != nil {
   485  		t.Fatal(err)
   486  	}
   487  	assert.Equal(t, serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server")
   488  	assert.Equal(t, serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server")
   489  	assert.True(t, syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block")
   490  
   491  	blocksToBuild := 10
   492  	txsPerBlock := 10
   493  	toAddress := testEthAddrs[2] // arbitrary choice
   494  	generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) {
   495  		i := 0
   496  		for k := range fundedAccounts {
   497  			tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil)
   498  			signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), k.PrivateKey)
   499  			if err != nil {
   500  				t.Fatal(err)
   501  			}
   502  			gen.AddTx(signedTx)
   503  			i++
   504  			if i >= txsPerBlock {
   505  				break
   506  			}
   507  		}
   508  	})
   509  
   510  	// check we can transition to [NormalOp] state and continue to process blocks.
   511  	assert.NoError(t, syncerVM.SetState(snow.NormalOp))
   512  	assert.True(t, syncerVM.bootstrapped)
   513  
   514  	// check atomic memory was synced properly
   515  	syncerSharedMemories := newSharedMemories(syncerAtomicMemory, syncerVM.ctx.ChainID, syncerVM.ctx.XChainID)
   516  
   517  	for _, tx := range includedAtomicTxs {
   518  		syncerSharedMemories.assertOpsApplied(t, tx.mustAtomicOps())
   519  	}
   520  
   521  	// Generate blocks after we have entered normal consensus as well
   522  	generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) {
   523  		i := 0
   524  		for k := range fundedAccounts {
   525  			tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, initialBaseFee, nil)
   526  			signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainID), k.PrivateKey)
   527  			if err != nil {
   528  				t.Fatal(err)
   529  			}
   530  			gen.AddTx(signedTx)
   531  			i++
   532  			if i >= txsPerBlock {
   533  				break
   534  			}
   535  		}
   536  	})
   537  }
   538  
   539  // patchBlock returns a copy of [blk] with [root] and updates [db] to
   540  // include the new block as canonical for [blk]'s height.
   541  // This breaks the digestibility of the chain since after this call
   542  // [blk] does not necessarily define a state transition from its parent
   543  // state to the new state root.
   544  func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Block {
   545  	header := blk.Header()
   546  	header.Root = root
   547  	receipts := rawdb.ReadRawReceipts(db, blk.Hash(), blk.NumberU64())
   548  	newBlk := types.NewBlock(
   549  		header, blk.Transactions(), blk.Uncles(), receipts, trie.NewStackTrie(nil), blk.ExtData(), true,
   550  	)
   551  	rawdb.WriteBlock(db, newBlk)
   552  	rawdb.WriteCanonicalHash(db, newBlk.Hash(), newBlk.NumberU64())
   553  	return newBlk
   554  }
   555  
   556  // generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then
   557  // calls Verify and Accept on each generated block
   558  // TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests
   559  func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen)) {
   560  	t.Helper()
   561  
   562  	// acceptExternalBlock defines a function to parse, verify, and accept a block once it has been
   563  	// generated by GenerateChain
   564  	acceptExternalBlock := func(block *types.Block) {
   565  		bytes, err := rlp.EncodeToBytes(block)
   566  		if err != nil {
   567  			t.Fatal(err)
   568  		}
   569  		vmBlock, err := vm.ParseBlock(bytes)
   570  		if err != nil {
   571  			t.Fatal(err)
   572  		}
   573  		if err := vmBlock.Verify(); err != nil {
   574  			t.Fatal(err)
   575  		}
   576  		if err := vmBlock.Accept(); err != nil {
   577  			t.Fatal(err)
   578  		}
   579  	}
   580  	_, _, err := core.GenerateChain(
   581  		vm.chainConfig,
   582  		vm.blockChain.LastAcceptedBlock(),
   583  		dummy.NewDummyEngine(vm.createConsensusCallbacks()),
   584  		vm.chaindb,
   585  		numBlocks,
   586  		10,
   587  		func(i int, g *core.BlockGen) {
   588  			g.SetOnBlockGenerated(acceptExternalBlock)
   589  			g.SetCoinbase(constants.BlackholeAddr) // necessary for syntactic validation of the block
   590  			gen(i, g)
   591  		},
   592  	)
   593  	if err != nil {
   594  		t.Fatal(err)
   595  	}
   596  	vm.blockChain.DrainAcceptorQueue()
   597  }