github.com/prysmaticlabs/prysm@v1.4.4/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go (about)

     1  package initialsync
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  	"time"
     9  
    10  	"github.com/kevinms/leakybucket-go"
    11  	"github.com/libp2p/go-libp2p-core/network"
    12  	"github.com/libp2p/go-libp2p-core/peer"
    13  	types "github.com/prysmaticlabs/eth2-types"
    14  	mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
    15  	"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
    16  	dbtest "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
    17  	p2pm "github.com/prysmaticlabs/prysm/beacon-chain/p2p"
    18  	p2pt "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
    19  	"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
    20  	p2ppb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
    21  	eth "github.com/prysmaticlabs/prysm/proto/eth/v1alpha1"
    22  	"github.com/prysmaticlabs/prysm/proto/eth/v1alpha1/wrapper"
    23  	"github.com/prysmaticlabs/prysm/shared/bytesutil"
    24  	"github.com/prysmaticlabs/prysm/shared/params"
    25  	"github.com/prysmaticlabs/prysm/shared/testutil"
    26  	"github.com/prysmaticlabs/prysm/shared/testutil/assert"
    27  	"github.com/prysmaticlabs/prysm/shared/testutil/require"
    28  )
    29  
    30  func TestBlocksFetcher_nonSkippedSlotAfter(t *testing.T) {
    31  	peersGen := func(size int) []*peerData {
    32  		blocks := append(makeSequence(1, 64), makeSequence(500, 640)...)
    33  		blocks = append(blocks, makeSequence(51200, 51264)...)
    34  		blocks = append(blocks, 55000)
    35  		blocks = append(blocks, makeSequence(57000, 57256)...)
    36  		var peersData []*peerData
    37  		for i := 0; i < size; i++ {
    38  			peersData = append(peersData, &peerData{
    39  				blocks:         blocks,
    40  				finalizedEpoch: 1800,
    41  				headSlot:       57000,
    42  			})
    43  		}
    44  		return peersData
    45  	}
    46  	chainConfig := struct {
    47  		peers []*peerData
    48  	}{
    49  		peers: peersGen(5),
    50  	}
    51  
    52  	mc, p2p, _ := initializeTestServices(t, []types.Slot{}, chainConfig.peers)
    53  	ctx, cancel := context.WithCancel(context.Background())
    54  	defer cancel()
    55  
    56  	fetcher := newBlocksFetcher(
    57  		ctx,
    58  		&blocksFetcherConfig{
    59  			chain: mc,
    60  			p2p:   p2p,
    61  		},
    62  	)
    63  	fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
    64  	seekSlots := map[types.Slot]types.Slot{
    65  		0:     1,
    66  		10:    11,
    67  		31:    32,
    68  		32:    33,
    69  		63:    64,
    70  		64:    500,
    71  		160:   500,
    72  		352:   500,
    73  		480:   500,
    74  		512:   513,
    75  		639:   640,
    76  		640:   51200,
    77  		6640:  51200,
    78  		51200: 51201,
    79  	}
    80  	for seekSlot, expectedSlot := range seekSlots {
    81  		t.Run(fmt.Sprintf("range: %d (%d-%d)", expectedSlot-seekSlot, seekSlot, expectedSlot), func(t *testing.T) {
    82  			slot, err := fetcher.nonSkippedSlotAfter(ctx, seekSlot)
    83  			assert.NoError(t, err)
    84  			assert.Equal(t, expectedSlot, slot, "Unexpected slot")
    85  		})
    86  	}
    87  
    88  	t.Run("test isolated non-skipped slot", func(t *testing.T) {
    89  		seekSlot := types.Slot(51264)
    90  		expectedSlot := types.Slot(55000)
    91  
    92  		var wg sync.WaitGroup
    93  		wg.Add(1)
    94  
    95  		var i int
    96  		go func() {
    97  			for {
    98  				i++
    99  				slot, err := fetcher.nonSkippedSlotAfter(ctx, seekSlot)
   100  				assert.NoError(t, err)
   101  				if slot == expectedSlot {
   102  					wg.Done()
   103  					break
   104  				}
   105  			}
   106  		}()
   107  		if testutil.WaitTimeout(&wg, 5*time.Second) {
   108  			t.Errorf("Isolated non-skipped slot not found in %d iterations: %v", i, expectedSlot)
   109  		} else {
   110  			log.Debugf("Isolated non-skipped slot found in %d iterations", i)
   111  		}
   112  	})
   113  
   114  	t.Run("no peers with higher target epoch available", func(t *testing.T) {
   115  		peers := []*peerData{
   116  			{finalizedEpoch: 3, headSlot: 160},
   117  			{finalizedEpoch: 3, headSlot: 160},
   118  			{finalizedEpoch: 3, headSlot: 160},
   119  			{finalizedEpoch: 8, headSlot: 320},
   120  			{finalizedEpoch: 8, headSlot: 320},
   121  			{finalizedEpoch: 10, headSlot: 320},
   122  			{finalizedEpoch: 10, headSlot: 640},
   123  		}
   124  		p2p := p2pt.NewTestP2P(t)
   125  		connectPeers(t, p2p, peers, p2p.Peers())
   126  		fetcher := newBlocksFetcher(
   127  			ctx,
   128  			&blocksFetcherConfig{
   129  				chain: mc,
   130  				p2p:   p2p,
   131  			},
   132  		)
   133  		mc.FinalizedCheckPoint = &eth.Checkpoint{
   134  			Epoch: 10,
   135  		}
   136  		require.NoError(t, mc.State.SetSlot(12*params.BeaconConfig().SlotsPerEpoch))
   137  
   138  		fetcher.mode = modeStopOnFinalizedEpoch
   139  		slot, err := fetcher.nonSkippedSlotAfter(ctx, 160)
   140  		assert.ErrorContains(t, errSlotIsTooHigh.Error(), err)
   141  		assert.Equal(t, types.Slot(0), slot)
   142  
   143  		fetcher.mode = modeNonConstrained
   144  		require.NoError(t, mc.State.SetSlot(20*params.BeaconConfig().SlotsPerEpoch))
   145  		slot, err = fetcher.nonSkippedSlotAfter(ctx, 160)
   146  		assert.ErrorContains(t, errSlotIsTooHigh.Error(), err)
   147  		assert.Equal(t, types.Slot(0), slot)
   148  	})
   149  }
   150  
   151  func TestBlocksFetcher_findFork(t *testing.T) {
   152  	// Chain graph:
   153  	// A - B - C - D - E
   154  	//      \
   155  	//       - C'- D'- E'- F'- G'
   156  	// Allow fetcher to proceed till E, then connect peer having alternative branch.
   157  	// Test that G' slot can be reached i.e. fetcher can track back and explore alternative paths.
   158  	beaconDB := dbtest.SetupDB(t)
   159  	p2p := p2pt.NewTestP2P(t)
   160  
   161  	// Chain contains blocks from 8 epochs (from 0 to 7, 256 is the start slot of epoch8).
   162  	chain1 := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 250)
   163  	finalizedSlot := types.Slot(63)
   164  	finalizedEpoch := helpers.SlotToEpoch(finalizedSlot)
   165  
   166  	genesisBlock := chain1[0]
   167  	require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
   168  	genesisRoot, err := genesisBlock.Block.HashTreeRoot()
   169  	require.NoError(t, err)
   170  
   171  	st, err := testutil.NewBeaconState()
   172  	require.NoError(t, err)
   173  	mc := &mock.ChainService{
   174  		State: st,
   175  		Root:  genesisRoot[:],
   176  		DB:    beaconDB,
   177  		FinalizedCheckPoint: &eth.Checkpoint{
   178  			Epoch: finalizedEpoch,
   179  			Root:  []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
   180  		},
   181  	}
   182  
   183  	ctx, cancel := context.WithCancel(context.Background())
   184  	defer cancel()
   185  	fetcher := newBlocksFetcher(
   186  		ctx,
   187  		&blocksFetcherConfig{
   188  			chain: mc,
   189  			p2p:   p2p,
   190  			db:    beaconDB,
   191  		},
   192  	)
   193  	fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
   194  
   195  	// Consume all chain1 blocks from many peers (alternative fork will be featured by a single peer,
   196  	// and should still be enough to explore alternative paths).
   197  	peers := make([]peer.ID, 0)
   198  	for i := 0; i < 5; i++ {
   199  		peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers()))
   200  	}
   201  
   202  	blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2
   203  	pidInd := 0
   204  	for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit {
   205  		req := &p2ppb.BeaconBlocksByRangeRequest{
   206  			StartSlot: types.Slot(i),
   207  			Step:      1,
   208  			Count:     blockBatchLimit,
   209  		}
   210  		blocks, err := fetcher.requestBlocks(ctx, req, peers[pidInd%len(peers)])
   211  		require.NoError(t, err)
   212  		for _, blk := range blocks {
   213  			require.NoError(t, beaconDB.SaveBlock(ctx, blk))
   214  			require.NoError(t, st.SetSlot(blk.Block().Slot()))
   215  		}
   216  		pidInd++
   217  	}
   218  
   219  	// Assert that all the blocks from chain1 are known.
   220  	for _, blk := range chain1 {
   221  		blkRoot, err := blk.Block.HashTreeRoot()
   222  		require.NoError(t, err)
   223  		require.Equal(t, true, beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot))
   224  	}
   225  	assert.Equal(t, types.Slot(250), mc.HeadSlot())
   226  
   227  	// Assert no blocks on further requests, disallowing to progress.
   228  	req := &p2ppb.BeaconBlocksByRangeRequest{
   229  		StartSlot: 251,
   230  		Step:      1,
   231  		Count:     blockBatchLimit,
   232  	}
   233  	blocks, err := fetcher.requestBlocks(ctx, req, peers[pidInd%len(peers)])
   234  	require.NoError(t, err)
   235  	assert.Equal(t, 0, len(blocks))
   236  
   237  	// If no peers with unexplored paths exist, error should be returned.
   238  	fork, err := fetcher.findFork(ctx, 251)
   239  	require.ErrorContains(t, errNoPeersAvailable.Error(), err)
   240  	require.Equal(t, (*forkData)(nil), fork)
   241  
   242  	// Add peer that has blocks after 250, but those blocks are orphaned i.e. they do not have common
   243  	// ancestor with what we already have. So, error is expected.
   244  	chain1a := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 265)
   245  	connectPeerHavingBlocks(t, p2p, chain1a, finalizedSlot, p2p.Peers())
   246  	fork, err = fetcher.findFork(ctx, 251)
   247  	require.ErrorContains(t, errNoPeersWithAltBlocks.Error(), err)
   248  	require.Equal(t, (*forkData)(nil), fork)
   249  
   250  	// Add peer which has blocks after 250. It is not on another fork, but algorithm
   251  	// is smart enough to link back to common ancestor, w/o discriminating between forks. This is
   252  	// by design: fork exploration is undertaken when FSMs are stuck, so any progress is good.
   253  	chain1b := extendBlockSequence(t, chain1, 64)
   254  	curForkMoreBlocksPeer := connectPeerHavingBlocks(t, p2p, chain1b, finalizedSlot, p2p.Peers())
   255  	fork, err = fetcher.findFork(ctx, 251)
   256  	require.NoError(t, err)
   257  	require.Equal(t, 64, len(fork.blocks))
   258  	require.Equal(t, curForkMoreBlocksPeer, fork.peer)
   259  	// Save all chain1b blocks (so that they do not interfere with alternative fork)
   260  	for _, blk := range chain1b {
   261  		require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
   262  		require.NoError(t, st.SetSlot(blk.Block.Slot))
   263  	}
   264  	forkSlot := types.Slot(129)
   265  	chain2 := extendBlockSequence(t, chain1[:forkSlot], 165)
   266  	// Assert that forked blocks from chain2 are unknown.
   267  	assert.Equal(t, 294, len(chain2))
   268  	for _, blk := range chain2[forkSlot:] {
   269  		blkRoot, err := blk.Block.HashTreeRoot()
   270  		require.NoError(t, err)
   271  		require.Equal(t, false, beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot))
   272  	}
   273  
   274  	// Search for alternative paths (add single peer having alternative path).
   275  	alternativePeer := connectPeerHavingBlocks(t, p2p, chain2, finalizedSlot, p2p.Peers())
   276  	fork, err = fetcher.findFork(ctx, 251)
   277  	require.NoError(t, err)
   278  	assert.Equal(t, alternativePeer, fork.peer)
   279  	assert.Equal(t, 65, len(fork.blocks))
   280  	ind := forkSlot
   281  	for _, blk := range fork.blocks {
   282  		require.Equal(t, blk.Block().Slot(), chain2[ind].Block.Slot)
   283  		ind++
   284  	}
   285  
   286  	// Process returned blocks and then attempt to extend chain (ensuring that parent block exists).
   287  	for _, blk := range fork.blocks {
   288  		require.NoError(t, beaconDB.SaveBlock(ctx, blk))
   289  		require.NoError(t, st.SetSlot(blk.Block().Slot()))
   290  	}
   291  	assert.Equal(t, forkSlot.Add(uint64(len(fork.blocks)-1)), mc.HeadSlot())
   292  	for i := forkSlot.Add(uint64(len(fork.blocks))); i < types.Slot(len(chain2)); i++ {
   293  		blk := chain2[i]
   294  		require.Equal(t, blk.Block.Slot, i, "incorrect block selected for slot %d", i)
   295  		// Only save is parent block exists.
   296  		parentRoot := bytesutil.ToBytes32(blk.Block.ParentRoot)
   297  		if beaconDB.HasBlock(ctx, parentRoot) || mc.HasInitSyncBlock(parentRoot) {
   298  			require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
   299  			require.NoError(t, st.SetSlot(blk.Block.Slot))
   300  		}
   301  	}
   302  
   303  	// Assert that all the blocks from chain2 are known.
   304  	for _, blk := range chain2 {
   305  		blkRoot, err := blk.Block.HashTreeRoot()
   306  		require.NoError(t, err)
   307  		require.Equal(t, true, beaconDB.HasBlock(ctx, blkRoot) || mc.HasInitSyncBlock(blkRoot), "slot %d", blk.Block.Slot)
   308  	}
   309  }
   310  
   311  func TestBlocksFetcher_findForkWithPeer(t *testing.T) {
   312  	beaconDB := dbtest.SetupDB(t)
   313  	p1 := p2pt.NewTestP2P(t)
   314  
   315  	knownBlocks := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 128)
   316  	genesisBlock := knownBlocks[0]
   317  	require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
   318  	genesisRoot, err := genesisBlock.Block.HashTreeRoot()
   319  	require.NoError(t, err)
   320  
   321  	st, err := testutil.NewBeaconState()
   322  	require.NoError(t, err)
   323  	mc := &mock.ChainService{
   324  		State: st,
   325  		Root:  genesisRoot[:],
   326  		DB:    beaconDB,
   327  	}
   328  
   329  	ctx, cancel := context.WithCancel(context.Background())
   330  	defer cancel()
   331  	fetcher := newBlocksFetcher(
   332  		ctx,
   333  		&blocksFetcherConfig{
   334  			chain: mc,
   335  			p2p:   p1,
   336  			db:    beaconDB,
   337  		},
   338  	)
   339  	fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
   340  
   341  	for _, blk := range knownBlocks {
   342  		require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(blk)))
   343  		require.NoError(t, st.SetSlot(blk.Block.Slot))
   344  	}
   345  
   346  	t.Run("slot is too early", func(t *testing.T) {
   347  		p2 := p2pt.NewTestP2P(t)
   348  		_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 0)
   349  		assert.ErrorContains(t, "slot is too low to backtrack", err)
   350  	})
   351  
   352  	t.Run("no peer status", func(t *testing.T) {
   353  		p2 := p2pt.NewTestP2P(t)
   354  		_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
   355  		assert.ErrorContains(t, "cannot obtain peer's status", err)
   356  	})
   357  
   358  	t.Run("no non-skipped blocks found", func(t *testing.T) {
   359  		p2 := p2pt.NewTestP2P(t)
   360  		p1.Connect(p2)
   361  		defer func() {
   362  			assert.NoError(t, p1.Disconnect(p2.PeerID()))
   363  		}()
   364  		p1.Peers().SetChainState(p2.PeerID(), &p2ppb.Status{
   365  			HeadRoot: nil,
   366  			HeadSlot: 0,
   367  		})
   368  		_, err := fetcher.findForkWithPeer(ctx, p2.PeerID(), 64)
   369  		assert.ErrorContains(t, "cannot locate non-empty slot for a peer", err)
   370  	})
   371  
   372  	t.Run("no diverging blocks", func(t *testing.T) {
   373  		p2 := connectPeerHavingBlocks(t, p1, knownBlocks, 64, p1.Peers())
   374  		defer func() {
   375  			assert.NoError(t, p1.Disconnect(p2))
   376  		}()
   377  		_, err := fetcher.findForkWithPeer(ctx, p2, 64)
   378  		assert.ErrorContains(t, "no alternative blocks exist within scanned range", err)
   379  	})
   380  
   381  	t.Run("first block is diverging - backtrack successfully", func(t *testing.T) {
   382  		forkedSlot := types.Slot(24)
   383  		altBlocks := extendBlockSequence(t, knownBlocks[:forkedSlot], 128)
   384  		p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
   385  		defer func() {
   386  			assert.NoError(t, p1.Disconnect(p2))
   387  		}()
   388  		fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
   389  		require.NoError(t, err)
   390  		require.Equal(t, 10, len(fork.blocks))
   391  		assert.Equal(t, forkedSlot, fork.blocks[0].Block().Slot(), "Expected slot %d to be ancestor", forkedSlot)
   392  	})
   393  
   394  	t.Run("first block is diverging - no common ancestor", func(t *testing.T) {
   395  		altBlocks := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 128)
   396  		p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
   397  		defer func() {
   398  			assert.NoError(t, p1.Disconnect(p2))
   399  		}()
   400  		_, err := fetcher.findForkWithPeer(ctx, p2, 64)
   401  		require.ErrorContains(t, "failed to find common ancestor", err)
   402  	})
   403  
   404  	t.Run("mid block is diverging - no backtrack is necessary", func(t *testing.T) {
   405  		forkedSlot := types.Slot(60)
   406  		altBlocks := extendBlockSequence(t, knownBlocks[:forkedSlot], 128)
   407  		p2 := connectPeerHavingBlocks(t, p1, altBlocks, 128, p1.Peers())
   408  		defer func() {
   409  			assert.NoError(t, p1.Disconnect(p2))
   410  		}()
   411  		fork, err := fetcher.findForkWithPeer(ctx, p2, 64)
   412  		require.NoError(t, err)
   413  		require.Equal(t, 64, len(fork.blocks))
   414  		assert.Equal(t, types.Slot(33), fork.blocks[0].Block().Slot())
   415  	})
   416  }
   417  
   418  func TestBlocksFetcher_findAncestor(t *testing.T) {
   419  	beaconDB := dbtest.SetupDB(t)
   420  	p2p := p2pt.NewTestP2P(t)
   421  
   422  	knownBlocks := extendBlockSequence(t, []*eth.SignedBeaconBlock{}, 128)
   423  	finalizedSlot := types.Slot(63)
   424  	finalizedEpoch := helpers.SlotToEpoch(finalizedSlot)
   425  
   426  	genesisBlock := knownBlocks[0]
   427  	require.NoError(t, beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(genesisBlock)))
   428  	genesisRoot, err := genesisBlock.Block.HashTreeRoot()
   429  	require.NoError(t, err)
   430  
   431  	st, err := testutil.NewBeaconState()
   432  	require.NoError(t, err)
   433  	mc := &mock.ChainService{
   434  		State: st,
   435  		Root:  genesisRoot[:],
   436  		DB:    beaconDB,
   437  		FinalizedCheckPoint: &eth.Checkpoint{
   438  			Epoch: finalizedEpoch,
   439  			Root:  []byte(fmt.Sprintf("finalized_root %d", finalizedEpoch)),
   440  		},
   441  	}
   442  
   443  	ctx, cancel := context.WithCancel(context.Background())
   444  	defer cancel()
   445  	fetcher := newBlocksFetcher(
   446  		ctx,
   447  		&blocksFetcherConfig{
   448  			chain: mc,
   449  			p2p:   p2p,
   450  			db:    beaconDB,
   451  		},
   452  	)
   453  	fetcher.rateLimiter = leakybucket.NewCollector(6400, 6400, false)
   454  	pcl := fmt.Sprintf("%s/ssz_snappy", p2pm.RPCBlocksByRootTopicV1)
   455  
   456  	t.Run("error on request", func(t *testing.T) {
   457  		p2 := p2pt.NewTestP2P(t)
   458  		p2p.Connect(p2)
   459  
   460  		_, err := fetcher.findAncestor(ctx, p2.PeerID(), wrapper.WrappedPhase0SignedBeaconBlock(knownBlocks[4]))
   461  		assert.ErrorContains(t, "protocol not supported", err)
   462  	})
   463  
   464  	t.Run("no blocks", func(t *testing.T) {
   465  		p2 := p2pt.NewTestP2P(t)
   466  		p2p.Connect(p2)
   467  
   468  		p2.SetStreamHandler(pcl, func(stream network.Stream) {
   469  			assert.NoError(t, stream.Close())
   470  		})
   471  
   472  		fork, err := fetcher.findAncestor(ctx, p2.PeerID(), wrapper.WrappedPhase0SignedBeaconBlock(knownBlocks[4]))
   473  		assert.ErrorContains(t, "no common ancestor found", err)
   474  		assert.Equal(t, (*forkData)(nil), fork)
   475  	})
   476  }
   477  
   478  func TestBlocksFetcher_currentHeadAndTargetEpochs(t *testing.T) {
   479  	tests := []struct {
   480  		name               string
   481  		syncMode           syncMode
   482  		peers              []*peerData
   483  		ourFinalizedEpoch  types.Epoch
   484  		ourHeadSlot        types.Slot
   485  		expectedHeadEpoch  types.Epoch
   486  		targetEpoch        types.Epoch
   487  		targetEpochSupport int
   488  	}{
   489  		{
   490  			name:               "ignore lower epoch peers in best finalized",
   491  			syncMode:           modeStopOnFinalizedEpoch,
   492  			ourHeadSlot:        5 * params.BeaconConfig().SlotsPerEpoch,
   493  			expectedHeadEpoch:  4,
   494  			ourFinalizedEpoch:  4,
   495  			targetEpoch:        10,
   496  			targetEpochSupport: 3,
   497  			peers: []*peerData{
   498  				{finalizedEpoch: 3, headSlot: 160},
   499  				{finalizedEpoch: 3, headSlot: 160},
   500  				{finalizedEpoch: 3, headSlot: 160},
   501  				{finalizedEpoch: 3, headSlot: 160},
   502  				{finalizedEpoch: 3, headSlot: 160},
   503  				{finalizedEpoch: 8, headSlot: 320},
   504  				{finalizedEpoch: 8, headSlot: 320},
   505  				{finalizedEpoch: 10, headSlot: 320},
   506  				{finalizedEpoch: 10, headSlot: 320},
   507  				{finalizedEpoch: 10, headSlot: 320},
   508  			},
   509  		},
   510  		{
   511  			name:               "resolve ties in best finalized",
   512  			syncMode:           modeStopOnFinalizedEpoch,
   513  			ourHeadSlot:        5 * params.BeaconConfig().SlotsPerEpoch,
   514  			expectedHeadEpoch:  4,
   515  			ourFinalizedEpoch:  4,
   516  			targetEpoch:        10,
   517  			targetEpochSupport: 3,
   518  			peers: []*peerData{
   519  				{finalizedEpoch: 3, headSlot: 160},
   520  				{finalizedEpoch: 3, headSlot: 160},
   521  				{finalizedEpoch: 3, headSlot: 160},
   522  				{finalizedEpoch: 3, headSlot: 160},
   523  				{finalizedEpoch: 3, headSlot: 160},
   524  				{finalizedEpoch: 8, headSlot: 320},
   525  				{finalizedEpoch: 8, headSlot: 320},
   526  				{finalizedEpoch: 8, headSlot: 320},
   527  				{finalizedEpoch: 10, headSlot: 320},
   528  				{finalizedEpoch: 10, headSlot: 320},
   529  				{finalizedEpoch: 10, headSlot: 320},
   530  			},
   531  		},
   532  		{
   533  			name:               "best non-finalized",
   534  			syncMode:           modeNonConstrained,
   535  			ourHeadSlot:        5 * params.BeaconConfig().SlotsPerEpoch,
   536  			expectedHeadEpoch:  5,
   537  			ourFinalizedEpoch:  4,
   538  			targetEpoch:        20,
   539  			targetEpochSupport: 1,
   540  			peers: []*peerData{
   541  				{finalizedEpoch: 3, headSlot: 160},
   542  				{finalizedEpoch: 3, headSlot: 160},
   543  				{finalizedEpoch: 3, headSlot: 160},
   544  				{finalizedEpoch: 3, headSlot: 160},
   545  				{finalizedEpoch: 3, headSlot: 160},
   546  				{finalizedEpoch: 8, headSlot: 320},
   547  				{finalizedEpoch: 8, headSlot: 320},
   548  				{finalizedEpoch: 10, headSlot: 320},
   549  				{finalizedEpoch: 10, headSlot: 320},
   550  				{finalizedEpoch: 10, headSlot: 320},
   551  				{finalizedEpoch: 15, headSlot: 640},
   552  			},
   553  		},
   554  	}
   555  
   556  	for _, tt := range tests {
   557  		t.Run(tt.name, func(t *testing.T) {
   558  			mc, p2p, _ := initializeTestServices(t, []types.Slot{}, tt.peers)
   559  			ctx, cancel := context.WithCancel(context.Background())
   560  			defer cancel()
   561  			fetcher := newBlocksFetcher(
   562  				ctx,
   563  				&blocksFetcherConfig{
   564  					chain: mc,
   565  					p2p:   p2p,
   566  				},
   567  			)
   568  			mc.FinalizedCheckPoint = &eth.Checkpoint{
   569  				Epoch: tt.ourFinalizedEpoch,
   570  			}
   571  			require.NoError(t, mc.State.SetSlot(tt.ourHeadSlot))
   572  			fetcher.mode = tt.syncMode
   573  
   574  			// Head and target epochs calculation.
   575  			headEpoch, targetEpoch, peers := fetcher.calculateHeadAndTargetEpochs()
   576  			assert.Equal(t, tt.expectedHeadEpoch, headEpoch, "Unexpected head epoch")
   577  			assert.Equal(t, tt.targetEpoch, targetEpoch, "Unexpected target epoch")
   578  			assert.Equal(t, tt.targetEpochSupport, len(peers), "Unexpected number of peers supporting target epoch")
   579  
   580  			// Best finalized and non-finalized slots.
   581  			finalizedSlot := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(tt.targetEpoch))
   582  			if tt.syncMode == modeStopOnFinalizedEpoch {
   583  				assert.Equal(t, finalizedSlot, fetcher.bestFinalizedSlot(), "Unexpected finalized slot")
   584  			} else {
   585  				assert.Equal(t, finalizedSlot, fetcher.bestNonFinalizedSlot(), "Unexpected non-finalized slot")
   586  			}
   587  		})
   588  	}
   589  }