github.com/MetalBlockchain/subnet-evm@v0.4.9/plugin/evm/syncervm_test.go (about)

     1  // (c) 2021-2022, Ava Labs, Inc. All rights reserved.
     2  // See the file LICENSE for licensing terms.
     3  
     4  package evm
     5  
     6  import (
     7  	"context"
     8  	"fmt"
     9  	"math/big"
    10  	"math/rand"
    11  	"sync"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/stretchr/testify/assert"
    16  	"github.com/stretchr/testify/require"
    17  
    18  	"github.com/MetalBlockchain/metalgo/database/manager"
    19  	"github.com/MetalBlockchain/metalgo/ids"
    20  	"github.com/MetalBlockchain/metalgo/snow"
    21  	"github.com/MetalBlockchain/metalgo/snow/choices"
    22  	commonEng "github.com/MetalBlockchain/metalgo/snow/engine/common"
    23  	"github.com/MetalBlockchain/metalgo/snow/engine/snowman/block"
    24  	"github.com/MetalBlockchain/metalgo/utils/set"
    25  
    26  	"github.com/MetalBlockchain/subnet-evm/accounts/keystore"
    27  	"github.com/MetalBlockchain/subnet-evm/consensus/dummy"
    28  	"github.com/MetalBlockchain/subnet-evm/constants"
    29  	"github.com/MetalBlockchain/subnet-evm/core"
    30  	"github.com/MetalBlockchain/subnet-evm/core/rawdb"
    31  	"github.com/MetalBlockchain/subnet-evm/core/types"
    32  	"github.com/MetalBlockchain/subnet-evm/ethdb"
    33  	"github.com/MetalBlockchain/subnet-evm/metrics"
    34  	"github.com/MetalBlockchain/subnet-evm/params"
    35  	statesyncclient "github.com/MetalBlockchain/subnet-evm/sync/client"
    36  	"github.com/MetalBlockchain/subnet-evm/sync/statesync"
    37  	"github.com/MetalBlockchain/subnet-evm/trie"
    38  	"github.com/ethereum/go-ethereum/common"
    39  	"github.com/ethereum/go-ethereum/log"
    40  	"github.com/ethereum/go-ethereum/rlp"
    41  )
    42  
    43  func TestSkipStateSync(t *testing.T) {
    44  	rand.Seed(1)
    45  	test := syncTest{
    46  		syncableInterval:   256,
    47  		stateSyncMinBlocks: 300, // must be greater than [syncableInterval] to skip sync
    48  		syncMode:           block.StateSyncSkipped,
    49  	}
    50  	vmSetup := createSyncServerAndClientVMs(t, test)
    51  	defer vmSetup.Teardown(t)
    52  
    53  	testSyncerVM(t, vmSetup, test)
    54  }
    55  
    56  func TestStateSyncFromScratch(t *testing.T) {
    57  	rand.Seed(1)
    58  	test := syncTest{
    59  		syncableInterval:   256,
    60  		stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync
    61  		syncMode:           block.StateSyncStatic,
    62  	}
    63  	vmSetup := createSyncServerAndClientVMs(t, test)
    64  	defer vmSetup.Teardown(t)
    65  
    66  	testSyncerVM(t, vmSetup, test)
    67  }
    68  
    69  func TestStateSyncToggleEnabledToDisabled(t *testing.T) {
    70  	rand.Seed(1)
    71  	// Hack: registering metrics uses global variables, so we need to disable metrics here so that we can initialize the VM twice.
    72  	metrics.Enabled = false
    73  	defer func() {
    74  		metrics.Enabled = true
    75  	}()
    76  
    77  	var lock sync.Mutex
    78  	reqCount := 0
    79  	test := syncTest{
    80  		syncableInterval:   256,
    81  		stateSyncMinBlocks: 50, // must be less than [syncableInterval] to perform sync
    82  		syncMode:           block.StateSyncStatic,
    83  		responseIntercept: func(syncerVM *VM, nodeID ids.NodeID, requestID uint32, response []byte) {
    84  			lock.Lock()
    85  			defer lock.Unlock()
    86  
    87  			reqCount++
    88  			// Fail all requests after number 50 to interrupt the sync
    89  			if reqCount > 50 {
    90  				if err := syncerVM.AppRequestFailed(context.Background(), nodeID, requestID); err != nil {
    91  					panic(err)
    92  				}
    93  				cancel := syncerVM.StateSyncClient.(*stateSyncerClient).cancel
    94  				if cancel != nil {
    95  					cancel()
    96  				} else {
    97  					t.Fatal("state sync client not populated correctly")
    98  				}
    99  			} else {
   100  				syncerVM.AppResponse(context.Background(), nodeID, requestID, response)
   101  			}
   102  		},
   103  		expectedErr: context.Canceled,
   104  	}
   105  	vmSetup := createSyncServerAndClientVMs(t, test)
   106  	defer vmSetup.Teardown(t)
   107  
   108  	// Perform sync resulting in early termination.
   109  	testSyncerVM(t, vmSetup, test)
   110  
   111  	test.syncMode = block.StateSyncStatic
   112  	test.responseIntercept = nil
   113  	test.expectedErr = nil
   114  
   115  	syncDisabledVM := &VM{}
   116  	appSender := &commonEng.SenderTest{T: t}
   117  	appSender.SendAppGossipF = func(context.Context, []byte) error { return nil }
   118  	appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error {
   119  		nodeID, hasItem := nodeSet.Pop()
   120  		if !hasItem {
   121  			t.Fatal("expected nodeSet to contain at least 1 nodeID")
   122  		}
   123  		go vmSetup.serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request)
   124  		return nil
   125  	}
   126  	// Disable metrics to prevent duplicate registerer
   127  	if err := syncDisabledVM.Initialize(
   128  		context.Background(),
   129  		vmSetup.syncerVM.ctx,
   130  		vmSetup.syncerDBManager,
   131  		[]byte(genesisJSONSubnetEVM),
   132  		nil,
   133  		nil,
   134  		vmSetup.syncerVM.toEngine,
   135  		[]*commonEng.Fx{},
   136  		appSender,
   137  	); err != nil {
   138  		t.Fatal(err)
   139  	}
   140  
   141  	defer func() {
   142  		if err := syncDisabledVM.Shutdown(context.Background()); err != nil {
   143  			t.Fatal(err)
   144  		}
   145  	}()
   146  
   147  	if height := syncDisabledVM.LastAcceptedBlockInternal().Height(); height != 0 {
   148  		t.Fatalf("Unexpected last accepted height: %d", height)
   149  	}
   150  
   151  	enabled, err := syncDisabledVM.StateSyncEnabled(context.Background())
   152  	assert.NoError(t, err)
   153  	assert.False(t, enabled, "sync should be disabled")
   154  
   155  	// Process the first 10 blocks from the serverVM
   156  	for i := uint64(1); i < 10; i++ {
   157  		ethBlock := vmSetup.serverVM.blockChain.GetBlockByNumber(i)
   158  		if ethBlock == nil {
   159  			t.Fatalf("VM Server did not have a block available at height %d", i)
   160  		}
   161  		b, err := rlp.EncodeToBytes(ethBlock)
   162  		if err != nil {
   163  			t.Fatal(err)
   164  		}
   165  		blk, err := syncDisabledVM.ParseBlock(context.Background(), b)
   166  		if err != nil {
   167  			t.Fatal(err)
   168  		}
   169  		if err := blk.Verify(context.Background()); err != nil {
   170  			t.Fatal(err)
   171  		}
   172  		if err := blk.Accept(context.Background()); err != nil {
   173  			t.Fatal(err)
   174  		}
   175  	}
   176  	// Verify the snapshot disk layer matches the last block root
   177  	lastRoot := syncDisabledVM.blockChain.CurrentBlock().Root()
   178  	if err := syncDisabledVM.blockChain.Snapshots().Verify(lastRoot); err != nil {
   179  		t.Fatal(err)
   180  	}
   181  	syncDisabledVM.blockChain.DrainAcceptorQueue()
   182  
   183  	// Create a new VM from the same database with state sync enabled.
   184  	syncReEnabledVM := &VM{}
   185  	// Enable state sync in configJSON
   186  	configJSON := fmt.Sprintf(
   187  		"{\"state-sync-enabled\":true, \"state-sync-min-blocks\":%d}",
   188  		test.stateSyncMinBlocks,
   189  	)
   190  	if err := syncReEnabledVM.Initialize(
   191  		context.Background(),
   192  		vmSetup.syncerVM.ctx,
   193  		vmSetup.syncerDBManager,
   194  		[]byte(genesisJSONSubnetEVM),
   195  		nil,
   196  		[]byte(configJSON),
   197  		vmSetup.syncerVM.toEngine,
   198  		[]*commonEng.Fx{},
   199  		appSender,
   200  	); err != nil {
   201  		t.Fatal(err)
   202  	}
   203  
   204  	// override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM]
   205  	vmSetup.serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error {
   206  		if test.responseIntercept == nil {
   207  			go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response)
   208  		} else {
   209  			go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response)
   210  		}
   211  
   212  		return nil
   213  	}
   214  
   215  	// connect peer to [syncerVM]
   216  	assert.NoError(t, syncReEnabledVM.Connected(
   217  		context.Background(),
   218  		vmSetup.serverVM.ctx.NodeID,
   219  		statesyncclient.StateSyncVersion,
   220  	))
   221  
   222  	enabled, err = syncReEnabledVM.StateSyncEnabled(context.Background())
   223  	assert.NoError(t, err)
   224  	assert.True(t, enabled, "sync should be enabled")
   225  
   226  	vmSetup.syncerVM = syncReEnabledVM
   227  	testSyncerVM(t, vmSetup, test)
   228  }
   229  
   230  func createSyncServerAndClientVMs(t *testing.T, test syncTest) *syncVMSetup {
   231  	var (
   232  		serverVM, syncerVM *VM
   233  	)
   234  	// If there is an error shutdown the VMs if they have been instantiated
   235  	defer func() {
   236  		// If the test has not already failed, shut down the VMs since the caller
   237  		// will not get the chance to shut them down.
   238  		if !t.Failed() {
   239  			return
   240  		}
   241  
   242  		// If the test already failed, shut down the VMs if they were instantiated.
   243  		if serverVM != nil {
   244  			log.Info("Shutting down server VM")
   245  			if err := serverVM.Shutdown(context.Background()); err != nil {
   246  				t.Fatal(err)
   247  			}
   248  		}
   249  		if syncerVM != nil {
   250  			log.Info("Shutting down syncerVM")
   251  			if err := syncerVM.Shutdown(context.Background()); err != nil {
   252  				t.Fatal(err)
   253  			}
   254  		}
   255  	}()
   256  
   257  	// configure [serverVM]
   258  	_, serverVM, _, serverAppSender := GenesisVM(t, true, genesisJSONSubnetEVM, "", "")
   259  	generateAndAcceptBlocks(t, serverVM, parentsToGet, func(i int, gen *core.BlockGen) {
   260  		tx := types.NewTransaction(gen.TxNonce(testEthAddrs[0]), testEthAddrs[1], common.Big1, params.TxGas, big.NewInt(testMinGasPrice), nil)
   261  		signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), testKeys[0])
   262  		if err != nil {
   263  			t.Fatal(t)
   264  		}
   265  		gen.AddTx(signedTx)
   266  	})
   267  
   268  	// make some accounts
   269  	trieDB := trie.NewDatabase(serverVM.chaindb)
   270  	root, accounts := statesync.FillAccountsWithOverlappingStorage(t, trieDB, types.EmptyRootHash, 1000, 16)
   271  
   272  	// patch serverVM's lastAcceptedBlock to have the new root
   273  	// and update the vm's state so the trie with accounts will
   274  	// be returned by StateSyncGetLastSummary
   275  	lastAccepted := serverVM.blockChain.LastAcceptedBlock()
   276  	patchedBlock := patchBlock(lastAccepted, root, serverVM.chaindb)
   277  	blockBytes, err := rlp.EncodeToBytes(patchedBlock)
   278  	if err != nil {
   279  		t.Fatal(err)
   280  	}
   281  	internalBlock, err := serverVM.parseBlock(context.Background(), blockBytes)
   282  	if err != nil {
   283  		t.Fatal(err)
   284  	}
   285  	internalBlock.(*Block).SetStatus(choices.Accepted)
   286  	assert.NoError(t, serverVM.State.SetLastAcceptedBlock(internalBlock))
   287  
   288  	// patch syncableInterval for test
   289  	serverVM.StateSyncServer.(*stateSyncServer).syncableInterval = test.syncableInterval
   290  
   291  	// initialise [syncerVM] with blank genesis state
   292  	stateSyncEnabledJSON := fmt.Sprintf("{\"state-sync-enabled\":true, \"state-sync-min-blocks\": %d}", test.stateSyncMinBlocks)
   293  	syncerEngineChan, syncerVM, syncerDBManager, syncerAppSender := GenesisVM(t, false, genesisJSONSubnetEVM, stateSyncEnabledJSON, "")
   294  	if err := syncerVM.SetState(context.Background(), snow.StateSyncing); err != nil {
   295  		t.Fatal(err)
   296  	}
   297  	enabled, err := syncerVM.StateSyncEnabled(context.Background())
   298  	assert.NoError(t, err)
   299  	assert.True(t, enabled)
   300  
   301  	// override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM]
   302  	serverAppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error {
   303  		if test.responseIntercept == nil {
   304  			go syncerVM.AppResponse(ctx, nodeID, requestID, response)
   305  		} else {
   306  			go test.responseIntercept(syncerVM, nodeID, requestID, response)
   307  		}
   308  
   309  		return nil
   310  	}
   311  
   312  	// connect peer to [syncerVM]
   313  	assert.NoError(t, syncerVM.Connected(
   314  		context.Background(),
   315  		serverVM.ctx.NodeID,
   316  		statesyncclient.StateSyncVersion,
   317  	))
   318  
   319  	// override [syncerVM]'s SendAppRequest function to trigger AppRequest on [serverVM]
   320  	syncerAppSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error {
   321  		nodeID, hasItem := nodeSet.Pop()
   322  		if !hasItem {
   323  			t.Fatal("expected nodeSet to contain at least 1 nodeID")
   324  		}
   325  		go serverVM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request)
   326  		return nil
   327  	}
   328  
   329  	return &syncVMSetup{
   330  		serverVM:         serverVM,
   331  		serverAppSender:  serverAppSender,
   332  		fundedAccounts:   accounts,
   333  		syncerVM:         syncerVM,
   334  		syncerDBManager:  syncerDBManager,
   335  		syncerEngineChan: syncerEngineChan,
   336  	}
   337  }
   338  
   339  // syncVMSetup contains the required set up for a client VM to perform state sync
   340  // off of a server VM.
   341  type syncVMSetup struct {
   342  	serverVM        *VM
   343  	serverAppSender *commonEng.SenderTest
   344  
   345  	fundedAccounts map[*keystore.Key]*types.StateAccount
   346  
   347  	syncerVM         *VM
   348  	syncerDBManager  manager.Manager
   349  	syncerEngineChan <-chan commonEng.Message
   350  }
   351  
   352  // Teardown shuts down both VMs and asserts that both exit without error.
   353  // Note: assumes both serverVM and sycnerVM have been initialized.
   354  func (s *syncVMSetup) Teardown(t *testing.T) {
   355  	assert.NoError(t, s.serverVM.Shutdown(context.Background()))
   356  	assert.NoError(t, s.syncerVM.Shutdown(context.Background()))
   357  }
   358  
   359  // syncTest contains both the actual VMs as well as the parameters with the expected output.
   360  type syncTest struct {
   361  	responseIntercept  func(vm *VM, nodeID ids.NodeID, requestID uint32, response []byte)
   362  	stateSyncMinBlocks uint64
   363  	syncableInterval   uint64
   364  	syncMode           block.StateSyncMode
   365  	expectedErr        error
   366  }
   367  
   368  func testSyncerVM(t *testing.T, vmSetup *syncVMSetup, test syncTest) {
   369  	t.Helper()
   370  	var (
   371  		serverVM         = vmSetup.serverVM
   372  		fundedAccounts   = vmSetup.fundedAccounts
   373  		syncerVM         = vmSetup.syncerVM
   374  		syncerEngineChan = vmSetup.syncerEngineChan
   375  	)
   376  
   377  	// get last summary and test related methods
   378  	summary, err := serverVM.GetLastStateSummary(context.Background())
   379  	if err != nil {
   380  		t.Fatal("error getting state sync last summary", "err", err)
   381  	}
   382  	parsedSummary, err := syncerVM.ParseStateSummary(context.Background(), summary.Bytes())
   383  	if err != nil {
   384  		t.Fatal("error getting state sync last summary", "err", err)
   385  	}
   386  	retrievedSummary, err := serverVM.GetStateSummary(context.Background(), parsedSummary.Height())
   387  	if err != nil {
   388  		t.Fatal("error when checking if summary is accepted", "err", err)
   389  	}
   390  	assert.Equal(t, summary, retrievedSummary)
   391  
   392  	syncMode, err := parsedSummary.Accept(context.Background())
   393  	if err != nil {
   394  		t.Fatal("unexpected error accepting state summary", "err", err)
   395  	}
   396  	if syncMode != test.syncMode {
   397  		t.Fatal("unexpected value returned from accept", "expected", test.syncMode, "got", syncMode)
   398  	}
   399  	if syncMode == block.StateSyncSkipped {
   400  		return
   401  	}
   402  	msg := <-syncerEngineChan
   403  	assert.Equal(t, commonEng.StateSyncDone, msg)
   404  
   405  	// If the test is expected to error, assert the correct error is returned and finish the test.
   406  	err = syncerVM.StateSyncClient.Error()
   407  	if test.expectedErr != nil {
   408  		assert.ErrorIs(t, err, test.expectedErr)
   409  		assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{})
   410  		return
   411  	}
   412  	if err != nil {
   413  		t.Fatal("state sync failed", err)
   414  	}
   415  
   416  	// set [syncerVM] to bootstrapping and verify the last accepted block has been updated correctly
   417  	// and that we can bootstrap and process some blocks.
   418  	if err := syncerVM.SetState(context.Background(), snow.Bootstrapping); err != nil {
   419  		t.Fatal(err)
   420  	}
   421  	assert.Equal(t, serverVM.LastAcceptedBlock().Height(), syncerVM.LastAcceptedBlock().Height(), "block height mismatch between syncer and server")
   422  	assert.Equal(t, serverVM.LastAcceptedBlock().ID(), syncerVM.LastAcceptedBlock().ID(), "blockID mismatch between syncer and server")
   423  	assert.True(t, syncerVM.blockChain.HasState(syncerVM.blockChain.LastAcceptedBlock().Root()), "unavailable state for last accepted block")
   424  	assertSyncPerformedHeights(t, syncerVM.chaindb, map[uint64]struct{}{retrievedSummary.Height(): {}})
   425  
   426  	blocksToBuild := 10
   427  	txsPerBlock := 10
   428  	toAddress := testEthAddrs[1] // arbitrary choice
   429  	generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) {
   430  		i := 0
   431  		for k := range fundedAccounts {
   432  			tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil)
   433  			signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey)
   434  			if err != nil {
   435  				t.Fatal(err)
   436  			}
   437  			gen.AddTx(signedTx)
   438  			i++
   439  			if i >= txsPerBlock {
   440  				break
   441  			}
   442  		}
   443  	})
   444  
   445  	// check we can transition to [NormalOp] state and continue to process blocks.
   446  	assert.NoError(t, syncerVM.SetState(context.Background(), snow.NormalOp))
   447  	assert.True(t, syncerVM.bootstrapped)
   448  
   449  	// Generate blocks after we have entered normal consensus as well
   450  	generateAndAcceptBlocks(t, syncerVM, blocksToBuild, func(_ int, gen *core.BlockGen) {
   451  		i := 0
   452  		for k := range fundedAccounts {
   453  			tx := types.NewTransaction(gen.TxNonce(k.Address), toAddress, big.NewInt(1), 21000, big.NewInt(testMinGasPrice), nil)
   454  			signedTx, err := types.SignTx(tx, types.NewEIP155Signer(serverVM.chainConfig.ChainID), k.PrivateKey)
   455  			if err != nil {
   456  				t.Fatal(err)
   457  			}
   458  			gen.AddTx(signedTx)
   459  			i++
   460  			if i >= txsPerBlock {
   461  				break
   462  			}
   463  		}
   464  	})
   465  }
   466  
   467  // patchBlock returns a copy of [blk] with [root] and updates [db] to
   468  // include the new block as canonical for [blk]'s height.
   469  // This breaks the digestibility of the chain since after this call
   470  // [blk] does not necessarily define a state transition from its parent
   471  // state to the new state root.
   472  func patchBlock(blk *types.Block, root common.Hash, db ethdb.Database) *types.Block {
   473  	header := blk.Header()
   474  	header.Root = root
   475  	receipts := rawdb.ReadRawReceipts(db, blk.Hash(), blk.NumberU64())
   476  	newBlk := types.NewBlock(
   477  		header, blk.Transactions(), blk.Uncles(), receipts, trie.NewStackTrie(nil),
   478  	)
   479  	rawdb.WriteBlock(db, newBlk)
   480  	rawdb.WriteCanonicalHash(db, newBlk.Hash(), newBlk.NumberU64())
   481  	return newBlk
   482  }
   483  
   484  // generateAndAcceptBlocks uses [core.GenerateChain] to generate blocks, then
   485  // calls Verify and Accept on each generated block
   486  // TODO: consider using this helper function in vm_test.go and elsewhere in this package to clean up tests
   487  func generateAndAcceptBlocks(t *testing.T, vm *VM, numBlocks int, gen func(int, *core.BlockGen)) {
   488  	t.Helper()
   489  
   490  	// acceptExternalBlock defines a function to parse, verify, and accept a block once it has been
   491  	// generated by GenerateChain
   492  	acceptExternalBlock := func(block *types.Block) {
   493  		bytes, err := rlp.EncodeToBytes(block)
   494  		if err != nil {
   495  			t.Fatal(err)
   496  		}
   497  		vmBlock, err := vm.ParseBlock(context.Background(), bytes)
   498  		if err != nil {
   499  			t.Fatal(err)
   500  		}
   501  		if err := vmBlock.Verify(context.Background()); err != nil {
   502  			t.Fatal(err)
   503  		}
   504  		if err := vmBlock.Accept(context.Background()); err != nil {
   505  			t.Fatal(err)
   506  		}
   507  	}
   508  	_, _, err := core.GenerateChain(
   509  		vm.chainConfig,
   510  		vm.blockChain.LastAcceptedBlock(),
   511  		dummy.NewETHFaker(),
   512  		vm.chaindb,
   513  		numBlocks,
   514  		10,
   515  		func(i int, g *core.BlockGen) {
   516  			g.SetOnBlockGenerated(acceptExternalBlock)
   517  			g.SetCoinbase(constants.BlackholeAddr) // necessary for syntactic validation of the block
   518  			gen(i, g)
   519  		},
   520  	)
   521  	if err != nil {
   522  		t.Fatal(err)
   523  	}
   524  	vm.blockChain.DrainAcceptorQueue()
   525  }
   526  
   527  // assertSyncPerformedHeights iterates over all heights the VM has synced to and
   528  // verifies it matches [expected].
   529  func assertSyncPerformedHeights(t *testing.T, db ethdb.Iteratee, expected map[uint64]struct{}) {
   530  	it := rawdb.NewSyncPerformedIterator(db)
   531  	defer it.Release()
   532  
   533  	found := make(map[uint64]struct{}, len(expected))
   534  	for it.Next() {
   535  		found[rawdb.UnpackSyncPerformedKey(it.Key())] = struct{}{}
   536  	}
   537  	require.NoError(t, it.Error())
   538  	require.Equal(t, expected, found)
   539  }